aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/Kconfig.debug11
-rw-r--r--arch/arm/boot/compressed/Makefile3
-rw-r--r--arch/arm/boot/compressed/debug.S12
-rw-r--r--arch/arm/boot/compressed/misc.c8
-rw-r--r--arch/arm/common/Makefile3
-rw-r--r--arch/arm/common/mcpm_entry.c263
-rw-r--r--arch/arm/common/mcpm_head.S219
-rw-r--r--arch/arm/common/mcpm_platsmp.c92
-rw-r--r--arch/arm/common/vlock.S108
-rw-r--r--arch/arm/common/vlock.h29
-rw-r--r--arch/arm/include/asm/atomic.h24
-rw-r--r--arch/arm/include/asm/cacheflush.h75
-rw-r--r--arch/arm/include/asm/cp15.h16
-rw-r--r--arch/arm/include/asm/cputype.h61
-rw-r--r--arch/arm/include/asm/glue-df.h20
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h107
-rw-r--r--arch/arm/include/asm/kvm_host.h42
-rw-r--r--arch/arm/include/asm/kvm_mmu.h67
-rw-r--r--arch/arm/include/asm/kvm_vgic.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h11
-rw-r--r--arch/arm/include/asm/mcpm.h209
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h2
-rw-r--r--arch/arm/include/debug/uncompress.h7
-rw-r--r--arch/arm/include/uapi/asm/kvm.h12
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/bios32.c6
-rw-r--r--arch/arm/kernel/entry-armv.S59
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/entry-header.S66
-rw-r--r--arch/arm/kernel/head-common.S9
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/return_address.c5
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/smp.c42
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_tlb.c9
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arm.c194
-rw-r--r--arch/arm/kvm/coproc.c28
-rw-r--r--arch/arm/kvm/coproc.h4
-rw-r--r--arch/arm/kvm/emulate.c75
-rw-r--r--arch/arm/kvm/guest.c17
-rw-r--r--arch/arm/kvm/handle_exit.c164
-rw-r--r--arch/arm/kvm/interrupts.S13
-rw-r--r--arch/arm/kvm/mmio.c46
-rw-r--r--arch/arm/kvm/mmu.c184
-rw-r--r--arch/arm/kvm/vgic.c2
-rw-r--r--arch/arm/mach-exynos/hotplug.c1
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c2
-rw-r--r--arch/arm/mach-highbank/hotplug.c1
-rw-r--r--arch/arm/mach-imx/devices/devices.c2
-rw-r--r--arch/arm/mach-imx/hotplug.c2
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c6
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c7
-rw-r--r--arch/arm/mach-msm/hotplug.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c2
-rw-r--r--arch/arm/mach-omap2/clock.c2
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c4
-rw-r--r--arch/arm/mach-omap2/gpmc.c8
-rw-r--r--arch/arm/mach-omap2/id.c4
-rw-r--r--arch/arm/mach-omap2/omap-smp.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c13
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c4
-rw-r--r--arch/arm/mach-omap2/pm-debug.c6
-rw-r--r--arch/arm/mach-omap2/powerdomain.c2
-rw-r--r--arch/arm/mach-omap2/timer.c2
-rw-r--r--arch/arm/mach-prima2/hotplug.c3
-rw-r--r--arch/arm/mach-realview/hotplug.c2
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c8
-rw-r--r--arch/arm/mach-spear/hotplug.c2
-rw-r--r--arch/arm/mach-tegra/board-harmony-pcie.c6
-rw-r--r--arch/arm/mach-tegra/common.h1
-rw-r--r--arch/arm/mach-tegra/hotplug.c10
-rw-r--r--arch/arm/mach-tegra/platsmp.c1
-rw-r--r--arch/arm/mach-tegra/tegra2_emc.c2
-rw-r--r--arch/arm/mach-ux500/cpu.c5
-rw-r--r--arch/arm/mach-ux500/hotplug.c3
-rw-r--r--arch/arm/mach-vexpress/hotplug.c2
-rw-r--r--arch/arm/mm/Kconfig9
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/dma-mapping.c15
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/mmu.c17
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-2level.S3
-rw-r--r--arch/arm/mm/proc-v7-3level.S3
-rw-r--r--arch/arm/mm/proc-v7.S7
-rw-r--r--arch/arm/plat-omap/dmtimer.c12
-rw-r--r--arch/arm/tools/mach-types991
94 files changed, 2378 insertions, 1210 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4ed24b4aa714..62079d434581 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -59,6 +59,7 @@ config ARM
59 select CLONE_BACKWARDS 59 select CLONE_BACKWARDS
60 select OLD_SIGSUSPEND3 60 select OLD_SIGSUSPEND3
61 select OLD_SIGACTION 61 select OLD_SIGACTION
62 select HAVE_CONTEXT_TRACKING
62 help 63 help
63 The ARM series is a line of low-power-consumption RISC chip designs 64 The ARM series is a line of low-power-consumption RISC chip designs
64 licensed by ARM Ltd and targeted at embedded applications and 65 licensed by ARM Ltd and targeted at embedded applications and
@@ -1479,6 +1480,14 @@ config HAVE_ARM_TWD
1479 help 1480 help
1480 This options enables support for the ARM timer and watchdog unit 1481 This options enables support for the ARM timer and watchdog unit
1481 1482
1483config MCPM
1484 bool "Multi-Cluster Power Management"
1485 depends on CPU_V7 && SMP
1486 help
1487 This option provides the common power management infrastructure
1488 for (multi-)cluster based systems, such as big.LITTLE based
1489 systems.
1490
1482choice 1491choice
1483 prompt "Memory split" 1492 prompt "Memory split"
1484 default VMSPLIT_3G 1493 default VMSPLIT_3G
@@ -1565,8 +1574,9 @@ config SCHED_HRTICK
1565 def_bool HIGH_RES_TIMERS 1574 def_bool HIGH_RES_TIMERS
1566 1575
1567config THUMB2_KERNEL 1576config THUMB2_KERNEL
1568 bool "Compile the kernel in Thumb-2 mode" 1577 bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
1569 depends on CPU_V7 && !CPU_V6 && !CPU_V6K 1578 depends on CPU_V7 && !CPU_V6 && !CPU_V6K
1579 default y if CPU_THUMBONLY
1570 select AEABI 1580 select AEABI
1571 select ARM_ASM_UNIFIED 1581 select ARM_ASM_UNIFIED
1572 select ARM_UNWIND 1582 select ARM_UNWIND
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 54d6fdc03e04..5c8e59f6a6f4 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -641,6 +641,17 @@ config DEBUG_LL_INCLUDE
641 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 641 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
642 default "mach/debug-macro.S" 642 default "mach/debug-macro.S"
643 643
644config DEBUG_UNCOMPRESS
645 bool
646 default y if ARCH_MULTIPLATFORM && DEBUG_LL && \
647 !DEBUG_OMAP2PLUS_UART && \
648 !DEBUG_TEGRA_UART
649
650config UNCOMPRESS_INCLUDE
651 string
652 default "debug/uncompress.h" if ARCH_MULTIPLATFORM
653 default "mach/uncompress.h"
654
644config EARLY_PRINTK 655config EARLY_PRINTK
645 bool "Early printk" 656 bool "Early printk"
646 depends on DEBUG_LL 657 depends on DEBUG_LL
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index afed28e37ea5..3580d57ea218 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -24,6 +24,9 @@ endif
24AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) 24AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
25HEAD = head.o 25HEAD = head.o
26OBJS += misc.o decompress.o 26OBJS += misc.o decompress.o
27ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
28OBJS += debug.o
29endif
27FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c 30FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
28 31
29# string library code (-Os is enforced to keep it much smaller) 32# string library code (-Os is enforced to keep it much smaller)
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
new file mode 100644
index 000000000000..6e8382d5b7a4
--- /dev/null
+++ b/arch/arm/boot/compressed/debug.S
@@ -0,0 +1,12 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3
4#include CONFIG_DEBUG_LL_INCLUDE
5
6ENTRY(putc)
7 addruart r1, r2, r3
8 waituart r3, r1
9 senduart r0, r1
10 busyuart r3, r1
11 mov pc, lr
12ENDPROC(putc)
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index df899834d84e..31bd43b82095 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -25,13 +25,7 @@ unsigned int __machine_arch_type;
25static void putstr(const char *ptr); 25static void putstr(const char *ptr);
26extern void error(char *x); 26extern void error(char *x);
27 27
28#ifdef CONFIG_ARCH_MULTIPLATFORM 28#include CONFIG_UNCOMPRESS_INCLUDE
29static inline void putc(int c) {}
30static inline void flush(void) {}
31static inline void arch_decomp_setup(void) {}
32#else
33#include <mach/uncompress.h>
34#endif
35 29
36#ifdef CONFIG_DEBUG_ICEDCC 30#ifdef CONFIG_DEBUG_ICEDCC
37 31
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index dc8dd0de5c0f..53e68b163196 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -11,3 +11,6 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
11obj-$(CONFIG_SHARP_SCOOP) += scoop.o 11obj-$(CONFIG_SHARP_SCOOP) += scoop.o
12obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o 12obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
13obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o 13obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
14obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
15AFLAGS_mcpm_head.o := -march=armv7-a
16AFLAGS_vlock.o := -march=armv7-a
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
new file mode 100644
index 000000000000..370236dd1a03
--- /dev/null
+++ b/arch/arm/common/mcpm_entry.c
@@ -0,0 +1,263 @@
1/*
2 * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
3 *
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/irqflags.h>
15
16#include <asm/mcpm.h>
17#include <asm/cacheflush.h>
18#include <asm/idmap.h>
19#include <asm/cputype.h>
20
21extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
22
23void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
24{
25 unsigned long val = ptr ? virt_to_phys(ptr) : 0;
26 mcpm_entry_vectors[cluster][cpu] = val;
27 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
28}
29
30static const struct mcpm_platform_ops *platform_ops;
31
32int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
33{
34 if (platform_ops)
35 return -EBUSY;
36 platform_ops = ops;
37 return 0;
38}
39
40int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
41{
42 if (!platform_ops)
43 return -EUNATCH; /* try not to shadow power_up errors */
44 might_sleep();
45 return platform_ops->power_up(cpu, cluster);
46}
47
48typedef void (*phys_reset_t)(unsigned long);
49
50void mcpm_cpu_power_down(void)
51{
52 phys_reset_t phys_reset;
53
54 BUG_ON(!platform_ops);
55 BUG_ON(!irqs_disabled());
56
57 /*
58 * Do this before calling into the power_down method,
59 * as it might not always be safe to do afterwards.
60 */
61 setup_mm_for_reboot();
62
63 platform_ops->power_down();
64
65 /*
66 * It is possible for a power_up request to happen concurrently
67 * with a power_down request for the same CPU. In this case the
68 * power_down method might not be able to actually enter a
69 * powered down state with the WFI instruction if the power_up
70 * method has removed the required reset condition. The
71 * power_down method is then allowed to return. We must perform
72 * a re-entry in the kernel as if the power_up method just had
73 * deasserted reset on the CPU.
74 *
75 * To simplify race issues, the platform specific implementation
76 * must accommodate for the possibility of unordered calls to
77 * power_down and power_up with a usage count. Therefore, if a
78 * call to power_up is issued for a CPU that is not down, then
79 * the next call to power_down must not attempt a full shutdown
80 * but only do the minimum (normally disabling L1 cache and CPU
81 * coherency) and return just as if a concurrent power_up request
82 * had happened as described above.
83 */
84
85 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
86 phys_reset(virt_to_phys(mcpm_entry_point));
87
88 /* should never get here */
89 BUG();
90}
91
92void mcpm_cpu_suspend(u64 expected_residency)
93{
94 phys_reset_t phys_reset;
95
96 BUG_ON(!platform_ops);
97 BUG_ON(!irqs_disabled());
98
99 /* Very similar to mcpm_cpu_power_down() */
100 setup_mm_for_reboot();
101 platform_ops->suspend(expected_residency);
102 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
103 phys_reset(virt_to_phys(mcpm_entry_point));
104 BUG();
105}
106
107int mcpm_cpu_powered_up(void)
108{
109 if (!platform_ops)
110 return -EUNATCH;
111 if (platform_ops->powered_up)
112 platform_ops->powered_up();
113 return 0;
114}
115
116struct sync_struct mcpm_sync;
117
118/*
119 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
120 * This must be called at the point of committing to teardown of a CPU.
121 * The CPU cache (SCTRL.C bit) is expected to still be active.
122 */
123void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
124{
125 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
126 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
127}
128
129/*
130 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
131 * cluster can be torn down without disrupting this CPU.
132 * To avoid deadlocks, this must be called before a CPU is powered down.
133 * The CPU cache (SCTRL.C bit) is expected to be off.
134 * However L2 cache might or might not be active.
135 */
136void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
137{
138 dmb();
139 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
140 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
141 dsb_sev();
142}
143
144/*
145 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
146 * @state: the final state of the cluster:
147 * CLUSTER_UP: no destructive teardown was done and the cluster has been
148 * restored to the previous state (CPU cache still active); or
149 * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
150 * (CPU cache disabled, L2 cache either enabled or disabled).
151 */
152void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
153{
154 dmb();
155 mcpm_sync.clusters[cluster].cluster = state;
156 sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
157 dsb_sev();
158}
159
160/*
161 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
162 * This function should be called by the last man, after local CPU teardown
163 * is complete. CPU cache expected to be active.
164 *
165 * Returns:
166 * false: the critical section was not entered because an inbound CPU was
167 * observed, or the cluster is already being set up;
168 * true: the critical section was entered: it is now safe to tear down the
169 * cluster.
170 */
171bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
172{
173 unsigned int i;
174 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
175
176 /* Warn inbound CPUs that the cluster is being torn down: */
177 c->cluster = CLUSTER_GOING_DOWN;
178 sync_cache_w(&c->cluster);
179
180 /* Back out if the inbound cluster is already in the critical region: */
181 sync_cache_r(&c->inbound);
182 if (c->inbound == INBOUND_COMING_UP)
183 goto abort;
184
185 /*
186 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
187 * teardown is complete on each CPU before tearing down the cluster.
188 *
189 * If any CPU has been woken up again from the DOWN state, then we
190 * shouldn't be taking the cluster down at all: abort in that case.
191 */
192 sync_cache_r(&c->cpus);
193 for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
194 int cpustate;
195
196 if (i == cpu)
197 continue;
198
199 while (1) {
200 cpustate = c->cpus[i].cpu;
201 if (cpustate != CPU_GOING_DOWN)
202 break;
203
204 wfe();
205 sync_cache_r(&c->cpus[i].cpu);
206 }
207
208 switch (cpustate) {
209 case CPU_DOWN:
210 continue;
211
212 default:
213 goto abort;
214 }
215 }
216
217 return true;
218
219abort:
220 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
221 return false;
222}
223
224int __mcpm_cluster_state(unsigned int cluster)
225{
226 sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
227 return mcpm_sync.clusters[cluster].cluster;
228}
229
230extern unsigned long mcpm_power_up_setup_phys;
231
232int __init mcpm_sync_init(
233 void (*power_up_setup)(unsigned int affinity_level))
234{
235 unsigned int i, j, mpidr, this_cluster;
236
237 BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
238 BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
239
240 /*
241 * Set initial CPU and cluster states.
242 * Only one cluster is assumed to be active at this point.
243 */
244 for (i = 0; i < MAX_NR_CLUSTERS; i++) {
245 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
246 mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
247 for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
248 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
249 }
250 mpidr = read_cpuid_mpidr();
251 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
252 for_each_online_cpu(i)
253 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
254 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
255 sync_cache_w(&mcpm_sync);
256
257 if (power_up_setup) {
258 mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
259 sync_cache_w(&mcpm_power_up_setup_phys);
260 }
261
262 return 0;
263}
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
new file mode 100644
index 000000000000..8178705c4b24
--- /dev/null
+++ b/arch/arm/common/mcpm_head.S
@@ -0,0 +1,219 @@
1/*
2 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
3 *
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *
12 * Refer to Documentation/arm/cluster-pm-race-avoidance.txt
13 * for details of the synchronisation algorithms used here.
14 */
15
16#include <linux/linkage.h>
17#include <asm/mcpm.h>
18
19#include "vlock.h"
20
21.if MCPM_SYNC_CLUSTER_CPUS
22.error "cpus must be the first member of struct mcpm_sync_struct"
23.endif
24
25 .macro pr_dbg string
26#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
27 b 1901f
281902: .asciz "CPU"
291903: .asciz " cluster"
301904: .asciz ": \string"
31 .align
321901: adr r0, 1902b
33 bl printascii
34 mov r0, r9
35 bl printhex8
36 adr r0, 1903b
37 bl printascii
38 mov r0, r10
39 bl printhex8
40 adr r0, 1904b
41 bl printascii
42#endif
43 .endm
44
45 .arm
46 .align
47
48ENTRY(mcpm_entry_point)
49
50 THUMB( adr r12, BSYM(1f) )
51 THUMB( bx r12 )
52 THUMB( .thumb )
531:
54 mrc p15, 0, r0, c0, c0, 5 @ MPIDR
55 ubfx r9, r0, #0, #8 @ r9 = cpu
56 ubfx r10, r0, #8, #8 @ r10 = cluster
57 mov r3, #MAX_CPUS_PER_CLUSTER
58 mla r4, r3, r10, r9 @ r4 = canonical CPU index
59 cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
60 blo 2f
61
62 /* We didn't expect this CPU. Try to cheaply make it quiet. */
631: wfi
64 wfe
65 b 1b
66
672: pr_dbg "kernel mcpm_entry_point\n"
68
69 /*
70 * MMU is off so we need to get to various variables in a
71 * position independent way.
72 */
73 adr r5, 3f
74 ldmia r5, {r6, r7, r8, r11}
75 add r6, r5, r6 @ r6 = mcpm_entry_vectors
76 ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
77 add r8, r5, r8 @ r8 = mcpm_sync
78 add r11, r5, r11 @ r11 = first_man_locks
79
80 mov r0, #MCPM_SYNC_CLUSTER_SIZE
81 mla r8, r0, r10, r8 @ r8 = sync cluster base
82
83 @ Signal that this CPU is coming UP:
84 mov r0, #CPU_COMING_UP
85 mov r5, #MCPM_SYNC_CPU_SIZE
86 mla r5, r9, r5, r8 @ r5 = sync cpu address
87 strb r0, [r5]
88
89 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
90 @ state, because there is at least one active CPU (this CPU).
91
92 mov r0, #VLOCK_SIZE
93 mla r11, r0, r10, r11 @ r11 = cluster first man lock
94 mov r0, r11
95 mov r1, r9 @ cpu
96 bl vlock_trylock @ implies DMB
97
98 cmp r0, #0 @ failed to get the lock?
99 bne mcpm_setup_wait @ wait for cluster setup if so
100
101 ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
102 cmp r0, #CLUSTER_UP @ cluster already up?
103 bne mcpm_setup @ if not, set up the cluster
104
105 @ Otherwise, release the first man lock and skip setup:
106 mov r0, r11
107 bl vlock_unlock
108 b mcpm_setup_complete
109
110mcpm_setup:
111 @ Control dependency implies strb not observable before previous ldrb.
112
113 @ Signal that the cluster is being brought up:
114 mov r0, #INBOUND_COMING_UP
115 strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
116 dmb
117
118 @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
119 @ point onwards will observe INBOUND_COMING_UP and abort.
120
121 @ Wait for any previously-pending cluster teardown operations to abort
122 @ or complete:
123mcpm_teardown_wait:
124 ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
125 cmp r0, #CLUSTER_GOING_DOWN
126 bne first_man_setup
127 wfe
128 b mcpm_teardown_wait
129
130first_man_setup:
131 dmb
132
133 @ If the outbound gave up before teardown started, skip cluster setup:
134
135 cmp r0, #CLUSTER_UP
136 beq mcpm_setup_leave
137
138 @ power_up_setup is now responsible for setting up the cluster:
139
140 cmp r7, #0
141 mov r0, #1 @ second (cluster) affinity level
142 blxne r7 @ Call power_up_setup if defined
143 dmb
144
145 mov r0, #CLUSTER_UP
146 strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
147 dmb
148
149mcpm_setup_leave:
150 @ Leave the cluster setup critical section:
151
152 mov r0, #INBOUND_NOT_COMING_UP
153 strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
154 dsb
155 sev
156
157 mov r0, r11
158 bl vlock_unlock @ implies DMB
159 b mcpm_setup_complete
160
161 @ In the contended case, non-first men wait here for cluster setup
162 @ to complete:
163mcpm_setup_wait:
164 ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
165 cmp r0, #CLUSTER_UP
166 wfene
167 bne mcpm_setup_wait
168 dmb
169
170mcpm_setup_complete:
171 @ If a platform-specific CPU setup hook is needed, it is
172 @ called from here.
173
174 cmp r7, #0
175 mov r0, #0 @ first (CPU) affinity level
176 blxne r7 @ Call power_up_setup if defined
177 dmb
178
179 @ Mark the CPU as up:
180
181 mov r0, #CPU_UP
182 strb r0, [r5]
183
184 @ Observability order of CPU_UP and opening of the gate does not matter.
185
186mcpm_entry_gated:
187 ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector
188 cmp r5, #0
189 wfeeq
190 beq mcpm_entry_gated
191 dmb
192
193 pr_dbg "released\n"
194 bx r5
195
196 .align 2
197
1983: .word mcpm_entry_vectors - .
199 .word mcpm_power_up_setup_phys - 3b
200 .word mcpm_sync - 3b
201 .word first_man_locks - 3b
202
203ENDPROC(mcpm_entry_point)
204
205 .bss
206
207 .align CACHE_WRITEBACK_ORDER
208 .type first_man_locks, #object
209first_man_locks:
210 .space VLOCK_SIZE * MAX_NR_CLUSTERS
211 .align CACHE_WRITEBACK_ORDER
212
213 .type mcpm_entry_vectors, #object
214ENTRY(mcpm_entry_vectors)
215 .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
216
217 .type mcpm_power_up_setup_phys, #object
218ENTRY(mcpm_power_up_setup_phys)
219 .space 4 @ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
new file mode 100644
index 000000000000..52b88d81b7bb
--- /dev/null
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -0,0 +1,92 @@
1/*
2 * linux/arch/arm/mach-vexpress/mcpm_platsmp.c
3 *
4 * Created by: Nicolas Pitre, November 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Code to handle secondary CPU bringup and hotplug for the cluster power API.
12 */
13
14#include <linux/init.h>
15#include <linux/smp.h>
16#include <linux/spinlock.h>
17
18#include <linux/irqchip/arm-gic.h>
19
20#include <asm/mcpm.h>
21#include <asm/smp.h>
22#include <asm/smp_plat.h>
23
24static void __init simple_smp_init_cpus(void)
25{
26}
27
28static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
29{
30 unsigned int mpidr, pcpu, pcluster, ret;
31 extern void secondary_startup(void);
32
33 mpidr = cpu_logical_map(cpu);
34 pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
35 pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
36 pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
37 __func__, cpu, pcpu, pcluster);
38
39 mcpm_set_entry_vector(pcpu, pcluster, NULL);
40 ret = mcpm_cpu_power_up(pcpu, pcluster);
41 if (ret)
42 return ret;
43 mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
44 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
45 dsb_sev();
46 return 0;
47}
48
49static void __cpuinit mcpm_secondary_init(unsigned int cpu)
50{
51 mcpm_cpu_powered_up();
52 gic_secondary_init(0);
53}
54
55#ifdef CONFIG_HOTPLUG_CPU
56
57static int mcpm_cpu_disable(unsigned int cpu)
58{
59 /*
60 * We assume all CPUs may be shut down.
61 * This would be the hook to use for eventual Secure
62 * OS migration requests as described in the PSCI spec.
63 */
64 return 0;
65}
66
67static void mcpm_cpu_die(unsigned int cpu)
68{
69 unsigned int mpidr, pcpu, pcluster;
70 mpidr = read_cpuid_mpidr();
71 pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
72 pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
73 mcpm_set_entry_vector(pcpu, pcluster, NULL);
74 mcpm_cpu_power_down();
75}
76
77#endif
78
79static struct smp_operations __initdata mcpm_smp_ops = {
80 .smp_init_cpus = simple_smp_init_cpus,
81 .smp_boot_secondary = mcpm_boot_secondary,
82 .smp_secondary_init = mcpm_secondary_init,
83#ifdef CONFIG_HOTPLUG_CPU
84 .cpu_disable = mcpm_cpu_disable,
85 .cpu_die = mcpm_cpu_die,
86#endif
87};
88
89void __init mcpm_smp_set_ops(void)
90{
91 smp_set_ops(&mcpm_smp_ops);
92}
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
new file mode 100644
index 000000000000..ff198583f683
--- /dev/null
+++ b/arch/arm/common/vlock.S
@@ -0,0 +1,108 @@
1/*
2 * vlock.S - simple voting lock implementation for ARM
3 *
4 * Created by: Dave Martin, 2012-08-16
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 *
17 * This algorithm is described in more detail in
18 * Documentation/arm/vlocks.txt.
19 */
20
21#include <linux/linkage.h>
22#include "vlock.h"
23
24/* Select different code if voting flags can fit in a single word. */
25#if VLOCK_VOTING_SIZE > 4
26#define FEW(x...)
27#define MANY(x...) x
28#else
29#define FEW(x...) x
30#define MANY(x...)
31#endif
32
33@ voting lock for first-man coordination
34
35.macro voting_begin rbase:req, rcpu:req, rscratch:req
36 mov \rscratch, #1
37 strb \rscratch, [\rbase, \rcpu]
38 dmb
39.endm
40
41.macro voting_end rbase:req, rcpu:req, rscratch:req
42 dmb
43 mov \rscratch, #0
44 strb \rscratch, [\rbase, \rcpu]
45 dsb
46 sev
47.endm
48
49/*
50 * The vlock structure must reside in Strongly-Ordered or Device memory.
51 * This implementation deliberately eliminates most of the barriers which
52 * would be required for other memory types, and assumes that independent
53 * writes to neighbouring locations within a cacheline do not interfere
54 * with one another.
55 */
56
57@ r0: lock structure base
58@ r1: CPU ID (0-based index within cluster)
59ENTRY(vlock_trylock)
60 add r1, r1, #VLOCK_VOTING_OFFSET
61
62 voting_begin r0, r1, r2
63
64 ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
65 cmp r2, #VLOCK_OWNER_NONE
66 bne trylock_fail @ fail if so
67
68 @ Control dependency implies strb not observable before previous ldrb.
69
70 strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
71
72 voting_end r0, r1, r2 @ implies DMB
73
74 @ Wait for the current round of voting to finish:
75
76 MANY( mov r3, #VLOCK_VOTING_OFFSET )
770:
78 MANY( ldr r2, [r0, r3] )
79 FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
80 cmp r2, #0
81 wfene
82 bne 0b
83 MANY( add r3, r3, #4 )
84 MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
85 MANY( bne 0b )
86
87 @ Check who won:
88
89 dmb
90 ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
91 eor r0, r1, r2 @ zero if I won, else nonzero
92 bx lr
93
94trylock_fail:
95 voting_end r0, r1, r2
96 mov r0, #1 @ nonzero indicates that I lost
97 bx lr
98ENDPROC(vlock_trylock)
99
100@ r0: lock structure base
101ENTRY(vlock_unlock)
102 dmb
103 mov r1, #VLOCK_OWNER_NONE
104 strb r1, [r0, #VLOCK_OWNER_OFFSET]
105 dsb
106 sev
107 bx lr
108ENDPROC(vlock_unlock)
diff --git a/arch/arm/common/vlock.h b/arch/arm/common/vlock.h
new file mode 100644
index 000000000000..3b441475a59b
--- /dev/null
+++ b/arch/arm/common/vlock.h
@@ -0,0 +1,29 @@
1/*
2 * vlock.h - simple voting lock implementation
3 *
4 * Created by: Dave Martin, 2012-08-16
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __VLOCK_H
18#define __VLOCK_H
19
20#include <asm/mcpm.h>
21
22/* Offsets and sizes are rounded to a word (4 bytes) */
23#define VLOCK_OWNER_OFFSET 0
24#define VLOCK_VOTING_OFFSET 4
25#define VLOCK_VOTING_SIZE ((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4)
26#define VLOCK_SIZE (VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE)
27#define VLOCK_OWNER_NONE 0
28
29#endif /* ! __VLOCK_H */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index c79f61faa3a5..da1c77d39327 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -243,6 +243,29 @@ typedef struct {
243 243
244#define ATOMIC64_INIT(i) { (i) } 244#define ATOMIC64_INIT(i) { (i) }
245 245
246#ifdef CONFIG_ARM_LPAE
247static inline u64 atomic64_read(const atomic64_t *v)
248{
249 u64 result;
250
251 __asm__ __volatile__("@ atomic64_read\n"
252" ldrd %0, %H0, [%1]"
253 : "=&r" (result)
254 : "r" (&v->counter), "Qo" (v->counter)
255 );
256
257 return result;
258}
259
260static inline void atomic64_set(atomic64_t *v, u64 i)
261{
262 __asm__ __volatile__("@ atomic64_set\n"
263" strd %2, %H2, [%1]"
264 : "=Qo" (v->counter)
265 : "r" (&v->counter), "r" (i)
266 );
267}
268#else
246static inline u64 atomic64_read(const atomic64_t *v) 269static inline u64 atomic64_read(const atomic64_t *v)
247{ 270{
248 u64 result; 271 u64 result;
@@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
269 : "r" (&v->counter), "r" (i) 292 : "r" (&v->counter), "r" (i)
270 : "cc"); 293 : "cc");
271} 294}
295#endif
272 296
273static inline void atomic64_add(u64 i, atomic64_t *v) 297static inline void atomic64_add(u64 i, atomic64_t *v)
274{ 298{
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e1489c54cd12..bff71388e72a 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
363 flush_cache_all(); 363 flush_cache_all();
364} 364}
365 365
366/*
367 * Memory synchronization helpers for mixed cached vs non cached accesses.
368 *
369 * Some synchronization algorithms have to set states in memory with the
370 * cache enabled or disabled depending on the code path. It is crucial
371 * to always ensure proper cache maintenance to update main memory right
372 * away in that case.
373 *
374 * Any cached write must be followed by a cache clean operation.
375 * Any cached read must be preceded by a cache invalidate operation.
376 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
377 * operation is needed to avoid discarding possible concurrent writes to the
378 * accessed memory.
379 *
380 * Also, in order to prevent a cached writer from interfering with an
381 * adjacent non-cached writer, each state variable must be located to
382 * a separate cache line.
383 */
384
385/*
386 * This needs to be >= the max cache writeback size of all
387 * supported platforms included in the current kernel configuration.
388 * This is used to align state variables to their own cache lines.
389 */
390#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
391#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
392
393/*
394 * There is no __cpuc_clean_dcache_area but we use it anyway for
395 * code intent clarity, and alias it to __cpuc_flush_dcache_area.
396 */
397#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
398
399/*
400 * Ensure preceding writes to *p by this CPU are visible to
401 * subsequent reads by other CPUs:
402 */
403static inline void __sync_cache_range_w(volatile void *p, size_t size)
404{
405 char *_p = (char *)p;
406
407 __cpuc_clean_dcache_area(_p, size);
408 outer_clean_range(__pa(_p), __pa(_p + size));
409}
410
411/*
412 * Ensure preceding writes to *p by other CPUs are visible to
413 * subsequent reads by this CPU. We must be careful not to
414 * discard data simultaneously written by another CPU, hence the
415 * usage of flush rather than invalidate operations.
416 */
417static inline void __sync_cache_range_r(volatile void *p, size_t size)
418{
419 char *_p = (char *)p;
420
421#ifdef CONFIG_OUTER_CACHE
422 if (outer_cache.flush_range) {
423 /*
424 * Ensure dirty data migrated from other CPUs into our cache
425 * are cleaned out safely before the outer cache is cleaned:
426 */
427 __cpuc_clean_dcache_area(_p, size);
428
429 /* Clean and invalidate stale data for *p from outer ... */
430 outer_flush_range(__pa(_p), __pa(_p + size));
431 }
432#endif
433
434 /* ... and inner cache: */
435 __cpuc_flush_dcache_area(_p, size);
436}
437
438#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
439#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
440
366#endif 441#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 5ef4d8015a60..1f3262e99d81 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -42,6 +42,8 @@
42#define vectors_high() (0) 42#define vectors_high() (0)
43#endif 43#endif
44 44
45#ifdef CONFIG_CPU_CP15
46
45extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 47extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
46extern unsigned long cr_alignment; /* defined in entry-armv.S */ 48extern unsigned long cr_alignment; /* defined in entry-armv.S */
47 49
@@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val)
82 isb(); 84 isb();
83} 85}
84 86
85#endif 87#else /* ifdef CONFIG_CPU_CP15 */
88
89/*
90 * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
91 * minds of the developers). Yielding 0 for machines without a cp15 (and making
92 * it read-only) is fine for most cases and saves quite some #ifdeffery.
93 */
94#define cr_no_alignment UL(0)
95#define cr_alignment UL(0)
96
97#endif /* ifdef CONFIG_CPU_CP15 / else */
98
99#endif /* ifndef __ASSEMBLY__ */
86 100
87#endif 101#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ad41ec2471e8..7652712d1d14 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -38,6 +38,24 @@
38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ 38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) 39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
40 40
41#define ARM_CPU_IMP_ARM 0x41
42#define ARM_CPU_IMP_INTEL 0x69
43
44#define ARM_CPU_PART_ARM1136 0xB360
45#define ARM_CPU_PART_ARM1156 0xB560
46#define ARM_CPU_PART_ARM1176 0xB760
47#define ARM_CPU_PART_ARM11MPCORE 0xB020
48#define ARM_CPU_PART_CORTEX_A8 0xC080
49#define ARM_CPU_PART_CORTEX_A9 0xC090
50#define ARM_CPU_PART_CORTEX_A5 0xC050
51#define ARM_CPU_PART_CORTEX_A15 0xC0F0
52#define ARM_CPU_PART_CORTEX_A7 0xC070
53
54#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
55#define ARM_CPU_XSCALE_ARCH_V1 0x2000
56#define ARM_CPU_XSCALE_ARCH_V2 0x4000
57#define ARM_CPU_XSCALE_ARCH_V3 0x6000
58
41extern unsigned int processor_id; 59extern unsigned int processor_id;
42 60
43#ifdef CONFIG_CPU_CP15 61#ifdef CONFIG_CPU_CP15
@@ -50,6 +68,7 @@ extern unsigned int processor_id;
50 : "cc"); \ 68 : "cc"); \
51 __val; \ 69 __val; \
52 }) 70 })
71
53#define read_cpuid_ext(ext_reg) \ 72#define read_cpuid_ext(ext_reg) \
54 ({ \ 73 ({ \
55 unsigned int __val; \ 74 unsigned int __val; \
@@ -59,29 +78,24 @@ extern unsigned int processor_id;
59 : "cc"); \ 78 : "cc"); \
60 __val; \ 79 __val; \
61 }) 80 })
62#else
63#define read_cpuid(reg) (processor_id)
64#define read_cpuid_ext(reg) 0
65#endif
66 81
67#define ARM_CPU_IMP_ARM 0x41 82#else /* ifdef CONFIG_CPU_CP15 */
68#define ARM_CPU_IMP_INTEL 0x69
69 83
70#define ARM_CPU_PART_ARM1136 0xB360 84/*
71#define ARM_CPU_PART_ARM1156 0xB560 85 * read_cpuid and read_cpuid_ext should only ever be called on machines that
72#define ARM_CPU_PART_ARM1176 0xB760 86 * have cp15 so warn on other usages.
73#define ARM_CPU_PART_ARM11MPCORE 0xB020 87 */
74#define ARM_CPU_PART_CORTEX_A8 0xC080 88#define read_cpuid(reg) \
75#define ARM_CPU_PART_CORTEX_A9 0xC090 89 ({ \
76#define ARM_CPU_PART_CORTEX_A5 0xC050 90 WARN_ON_ONCE(1); \
77#define ARM_CPU_PART_CORTEX_A15 0xC0F0 91 0; \
78#define ARM_CPU_PART_CORTEX_A7 0xC070 92 })
79 93
80#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 94#define read_cpuid_ext(reg) read_cpuid(reg)
81#define ARM_CPU_XSCALE_ARCH_V1 0x2000 95
82#define ARM_CPU_XSCALE_ARCH_V2 0x4000 96#endif /* ifdef CONFIG_CPU_CP15 / else */
83#define ARM_CPU_XSCALE_ARCH_V3 0x6000
84 97
98#ifdef CONFIG_CPU_CP15
85/* 99/*
86 * The CPU ID never changes at run time, so we might as well tell the 100 * The CPU ID never changes at run time, so we might as well tell the
87 * compiler that it's constant. Use this function to read the CPU ID 101 * compiler that it's constant. Use this function to read the CPU ID
@@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
92 return read_cpuid(CPUID_ID); 106 return read_cpuid(CPUID_ID);
93} 107}
94 108
109#else /* ifdef CONFIG_CPU_CP15 */
110
111static inline unsigned int __attribute_const__ read_cpuid_id(void)
112{
113 return processor_id;
114}
115
116#endif /* ifdef CONFIG_CPU_CP15 / else */
117
95static inline unsigned int __attribute_const__ read_cpuid_implementor(void) 118static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
96{ 119{
97 return (read_cpuid_id() & 0xFF000000) >> 24; 120 return (read_cpuid_id() & 0xFF000000) >> 24;
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 8cacbcda76da..b6e9f2c108b5 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -18,12 +18,12 @@
18 * ================ 18 * ================
19 * 19 *
20 * We have the following to choose from: 20 * We have the following to choose from:
21 * arm6 - ARM6 style
22 * arm7 - ARM7 style 21 * arm7 - ARM7 style
23 * v4_early - ARMv4 without Thumb early abort handler 22 * v4_early - ARMv4 without Thumb early abort handler
24 * v4t_late - ARMv4 with Thumb late abort handler 23 * v4t_late - ARMv4 with Thumb late abort handler
25 * v4t_early - ARMv4 with Thumb early abort handler 24 * v4t_early - ARMv4 with Thumb early abort handler
26 * v5tej_early - ARMv5 with Thumb and Java early abort handler 25 * v5t_early - ARMv5 with Thumb early abort handler
26 * v5tj_early - ARMv5 with Thumb and Java early abort handler
27 * xscale - ARMv5 with Thumb with Xscale extensions 27 * xscale - ARMv5 with Thumb with Xscale extensions
28 * v6_early - ARMv6 generic early abort handler 28 * v6_early - ARMv6 generic early abort handler
29 * v7_early - ARMv7 generic early abort handler 29 * v7_early - ARMv7 generic early abort handler
@@ -39,19 +39,19 @@
39# endif 39# endif
40#endif 40#endif
41 41
42#ifdef CONFIG_CPU_ABRT_LV4T 42#ifdef CONFIG_CPU_ABRT_EV4
43# ifdef CPU_DABORT_HANDLER 43# ifdef CPU_DABORT_HANDLER
44# define MULTI_DABORT 1 44# define MULTI_DABORT 1
45# else 45# else
46# define CPU_DABORT_HANDLER v4t_late_abort 46# define CPU_DABORT_HANDLER v4_early_abort
47# endif 47# endif
48#endif 48#endif
49 49
50#ifdef CONFIG_CPU_ABRT_EV4 50#ifdef CONFIG_CPU_ABRT_LV4T
51# ifdef CPU_DABORT_HANDLER 51# ifdef CPU_DABORT_HANDLER
52# define MULTI_DABORT 1 52# define MULTI_DABORT 1
53# else 53# else
54# define CPU_DABORT_HANDLER v4_early_abort 54# define CPU_DABORT_HANDLER v4t_late_abort
55# endif 55# endif
56#endif 56#endif
57 57
@@ -63,19 +63,19 @@
63# endif 63# endif
64#endif 64#endif
65 65
66#ifdef CONFIG_CPU_ABRT_EV5TJ 66#ifdef CONFIG_CPU_ABRT_EV5T
67# ifdef CPU_DABORT_HANDLER 67# ifdef CPU_DABORT_HANDLER
68# define MULTI_DABORT 1 68# define MULTI_DABORT 1
69# else 69# else
70# define CPU_DABORT_HANDLER v5tj_early_abort 70# define CPU_DABORT_HANDLER v5t_early_abort
71# endif 71# endif
72#endif 72#endif
73 73
74#ifdef CONFIG_CPU_ABRT_EV5T 74#ifdef CONFIG_CPU_ABRT_EV5TJ
75# ifdef CPU_DABORT_HANDLER 75# ifdef CPU_DABORT_HANDLER
76# define MULTI_DABORT 1 76# define MULTI_DABORT 1
77# else 77# else
78# define CPU_DABORT_HANDLER v5t_early_abort 78# define CPU_DABORT_HANDLER v5tj_early_abort
79# endif 79# endif
80#endif 80#endif
81 81
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 7c3d813e15df..124623e5ef14 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -211,4 +211,8 @@
211 211
212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) 212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
213 213
214#define HSR_DABT_S1PTW (1U << 7)
215#define HSR_DABT_CM (1U << 8)
216#define HSR_DABT_EA (1U << 9)
217
214#endif /* __ARM_KVM_ARM_H__ */ 218#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index e4956f4e23e1..18d50322a9e2 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
75extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 75extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
76 76
77extern void __kvm_flush_vm_context(void); 77extern void __kvm_flush_vm_context(void);
78extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 78extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
79 79
80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
81#endif 81#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index fd611996bfb5..82b4babead2c 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -22,11 +22,12 @@
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h> 24#include <asm/kvm_mmio.h>
25#include <asm/kvm_arm.h>
25 26
26u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 27unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
27u32 *vcpu_spsr(struct kvm_vcpu *vcpu); 28unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
28 29
29int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); 30bool kvm_condition_valid(struct kvm_vcpu *vcpu);
30void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 31void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
31void kvm_inject_undefined(struct kvm_vcpu *vcpu); 32void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 33void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
37 return 1; 38 return 1;
38} 39}
39 40
40static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) 41static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
41{ 42{
42 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; 43 return &vcpu->arch.regs.usr_regs.ARM_pc;
43} 44}
44 45
45static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) 46static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
46{ 47{
47 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; 48 return &vcpu->arch.regs.usr_regs.ARM_cpsr;
48} 49}
49 50
50static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 51static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
69 return reg == 15; 70 return reg == 15;
70} 71}
71 72
73static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
74{
75 return vcpu->arch.fault.hsr;
76}
77
78static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
79{
80 return vcpu->arch.fault.hxfar;
81}
82
83static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
84{
85 return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
86}
87
88static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
89{
90 return vcpu->arch.fault.hyp_pc;
91}
92
93static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
94{
95 return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
96}
97
98static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
99{
100 return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
101}
102
103static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
104{
105 return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
106}
107
108static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
109{
110 return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
111}
112
113static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
114{
115 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
116}
117
118static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
119{
120 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
121}
122
123/* Get Access Size from a data abort */
124static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
125{
126 switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
127 case 0:
128 return 1;
129 case 1:
130 return 2;
131 case 2:
132 return 4;
133 default:
134 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
135 return -EFAULT;
136 }
137}
138
139/* This one is not specific to Data Abort */
140static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
141{
142 return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
143}
144
145static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
146{
147 return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
148}
149
150static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
151{
152 return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
153}
154
155static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
156{
157 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
158}
159
160static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
161{
162 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
163}
164
72#endif /* __ARM_KVM_EMULATE_H__ */ 165#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d1736a53b12d..0c4e643d939e 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
80 void *objects[KVM_NR_MEM_OBJS]; 80 void *objects[KVM_NR_MEM_OBJS];
81}; 81};
82 82
83struct kvm_vcpu_fault_info {
84 u32 hsr; /* Hyp Syndrome Register */
85 u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
86 u32 hpfar; /* Hyp IPA Fault Address Register */
87 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
88};
89
90typedef struct vfp_hard_struct kvm_kernel_vfp_t;
91
83struct kvm_vcpu_arch { 92struct kvm_vcpu_arch {
84 struct kvm_regs regs; 93 struct kvm_regs regs;
85 94
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
93 u32 midr; 102 u32 midr;
94 103
95 /* Exception Information */ 104 /* Exception Information */
96 u32 hsr; /* Hyp Syndrome Register */ 105 struct kvm_vcpu_fault_info fault;
97 u32 hxfar; /* Hyp Data/Inst Fault Address Register */
98 u32 hpfar; /* Hyp IPA Fault Address Register */
99 106
100 /* Floating point registers (VFP and Advanced SIMD/NEON) */ 107 /* Floating point registers (VFP and Advanced SIMD/NEON) */
101 struct vfp_hard_struct vfp_guest; 108 kvm_kernel_vfp_t vfp_guest;
102 struct vfp_hard_struct *vfp_host; 109 kvm_kernel_vfp_t *vfp_host;
103 110
104 /* VGIC state */ 111 /* VGIC state */
105 struct vgic_cpu vgic_cpu; 112 struct vgic_cpu vgic_cpu;
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
122 /* Interrupt related fields */ 129 /* Interrupt related fields */
123 u32 irq_lines; /* IRQ and FIQ levels */ 130 u32 irq_lines; /* IRQ and FIQ levels */
124 131
125 /* Hyp exception information */
126 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
127
128 /* Cache some mmu pages needed inside spinlock regions */ 132 /* Cache some mmu pages needed inside spinlock regions */
129 struct kvm_mmu_memory_cache mmu_page_cache; 133 struct kvm_mmu_memory_cache mmu_page_cache;
130 134
@@ -181,4 +185,26 @@ struct kvm_one_reg;
181int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 185int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
182int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 186int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
183 187
188int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
189 int exception_index);
190
191static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr,
192 unsigned long hyp_stack_ptr,
193 unsigned long vector_ptr)
194{
195 unsigned long pgd_low, pgd_high;
196
197 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
198 pgd_high = (pgd_ptr >> 32ULL);
199
200 /*
201 * Call initialization code, and switch to the full blown
202 * HYP code. The init code doesn't need to preserve these registers as
203 * r1-r3 and r12 are already callee save according to the AAPCS.
204 * Note that we slightly misuse the prototype by casing the pgd_low to
205 * a void *.
206 */
207 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
208}
209
184#endif /* __ARM_KVM_HOST_H__ */ 210#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 421a20b34874..970f3b5fa109 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,6 +19,18 @@
19#ifndef __ARM_KVM_MMU_H__ 19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__ 20#define __ARM_KVM_MMU_H__
21 21
22#include <asm/cacheflush.h>
23#include <asm/pgalloc.h>
24#include <asm/idmap.h>
25
26/*
27 * We directly use the kernel VA for the HYP, as we can directly share
28 * the mapping (HTTBR "covers" TTBR1).
29 */
30#define HYP_PAGE_OFFSET_MASK (~0UL)
31#define HYP_PAGE_OFFSET PAGE_OFFSET
32#define KERN_TO_HYP(kva) (kva)
33
22int create_hyp_mappings(void *from, void *to); 34int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 35int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void); 36void free_hyp_pmds(void);
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
36int kvm_mmu_init(void); 48int kvm_mmu_init(void);
37void kvm_clear_hyp_idmap(void); 49void kvm_clear_hyp_idmap(void);
38 50
51static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
52{
53 pte_val(*pte) = new_pte;
54 /*
55 * flush_pmd_entry just takes a void pointer and cleans the necessary
56 * cache entries, so we can reuse the function for ptes.
57 */
58 flush_pmd_entry(pte);
59}
60
39static inline bool kvm_is_write_fault(unsigned long hsr) 61static inline bool kvm_is_write_fault(unsigned long hsr)
40{ 62{
41 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; 63 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
47 return true; 69 return true;
48} 70}
49 71
72static inline void kvm_clean_pgd(pgd_t *pgd)
73{
74 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
75}
76
77static inline void kvm_clean_pmd_entry(pmd_t *pmd)
78{
79 clean_pmd_entry(pmd);
80}
81
82static inline void kvm_clean_pte(pte_t *pte)
83{
84 clean_pte_table(pte);
85}
86
87static inline void kvm_set_s2pte_writable(pte_t *pte)
88{
89 pte_val(*pte) |= L_PTE_S2_RDWR;
90}
91
92struct kvm;
93
94static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
95{
96 /*
97 * If we are going to insert an instruction page and the icache is
98 * either VIPT or PIPT, there is a potential problem where the host
99 * (or another VM) may have used the same page as this guest, and we
100 * read incorrect data from the icache. If we're using a PIPT cache,
101 * we can invalidate just that page, but if we are using a VIPT cache
102 * we need to invalidate the entire icache - damn shame - as written
103 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
104 *
105 * VIVT caches are tagged using both the ASID and the VMID and doesn't
106 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
107 */
108 if (icache_is_pipt()) {
109 unsigned long hva = gfn_to_hva(kvm, gfn);
110 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
111 } else if (!icache_is_vivt_asid_tagged()) {
112 /* any kind of VIPT cache */
113 __flush_icache_all();
114 }
115}
116
50#endif /* __ARM_KVM_MMU_H__ */ 117#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
index ab97207d9cd3..343744e4809c 100644
--- a/arch/arm/include/asm/kvm_vgic.h
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -21,7 +21,6 @@
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kvm.h> 23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/irqreturn.h> 24#include <linux/irqreturn.h>
26#include <linux/spinlock.h> 25#include <linux/spinlock.h>
27#include <linux/types.h> 26#include <linux/types.h>
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 5cf2e979b4be..7d2c3c843801 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -30,6 +30,11 @@ struct hw_pci {
30 void (*postinit)(void); 30 void (*postinit)(void);
31 u8 (*swizzle)(struct pci_dev *dev, u8 *pin); 31 u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
32 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); 32 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
33 resource_size_t (*align_resource)(struct pci_dev *dev,
34 const struct resource *res,
35 resource_size_t start,
36 resource_size_t size,
37 resource_size_t align);
33}; 38};
34 39
35/* 40/*
@@ -51,6 +56,12 @@ struct pci_sys_data {
51 u8 (*swizzle)(struct pci_dev *, u8 *); 56 u8 (*swizzle)(struct pci_dev *, u8 *);
52 /* IRQ mapping */ 57 /* IRQ mapping */
53 int (*map_irq)(const struct pci_dev *, u8, u8); 58 int (*map_irq)(const struct pci_dev *, u8, u8);
59 /* Resource alignement requirements */
60 resource_size_t (*align_resource)(struct pci_dev *dev,
61 const struct resource *res,
62 resource_size_t start,
63 resource_size_t size,
64 resource_size_t align);
54 void *private_data; /* platform controller private data */ 65 void *private_data; /* platform controller private data */
55}; 66};
56 67
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644
index 000000000000..0f7b7620e9a5
--- /dev/null
+++ b/arch/arm/include/asm/mcpm.h
@@ -0,0 +1,209 @@
1/*
2 * arch/arm/include/asm/mcpm.h
3 *
4 * Created by: Nicolas Pitre, April 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef MCPM_H
13#define MCPM_H
14
15/*
16 * Maximum number of possible clusters / CPUs per cluster.
17 *
18 * This should be sufficient for quite a while, while keeping the
19 * (assembly) code simpler. When this starts to grow then we'll have
20 * to consider dynamic allocation.
21 */
22#define MAX_CPUS_PER_CLUSTER 4
23#define MAX_NR_CLUSTERS 2
24
25#ifndef __ASSEMBLY__
26
27#include <linux/types.h>
28#include <asm/cacheflush.h>
29
30/*
31 * Platform specific code should use this symbol to set up secondary
32 * entry location for processors to use when released from reset.
33 */
34extern void mcpm_entry_point(void);
35
36/*
37 * This is used to indicate where the given CPU from given cluster should
38 * branch once it is ready to re-enter the kernel using ptr, or NULL if it
39 * should be gated. A gated CPU is held in a WFE loop until its vector
40 * becomes non NULL.
41 */
42void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
43
44/*
45 * CPU/cluster power operations API for higher subsystems to use.
46 */
47
48/**
49 * mcpm_cpu_power_up - make given CPU in given cluster runable
50 *
51 * @cpu: CPU number within given cluster
52 * @cluster: cluster number for the CPU
53 *
54 * The identified CPU is brought out of reset. If the cluster was powered
55 * down then it is brought up as well, taking care not to let the other CPUs
56 * in the cluster run, and ensuring appropriate cluster setup.
57 *
58 * Caller must ensure the appropriate entry vector is initialized with
59 * mcpm_set_entry_vector() prior to calling this.
60 *
61 * This must be called in a sleepable context. However, the implementation
62 * is strongly encouraged to return early and let the operation happen
63 * asynchronously, especially when significant delays are expected.
64 *
65 * If the operation cannot be performed then an error code is returned.
66 */
67int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
68
69/**
70 * mcpm_cpu_power_down - power the calling CPU down
71 *
72 * The calling CPU is powered down.
73 *
74 * If this CPU is found to be the "last man standing" in the cluster
75 * then the cluster is prepared for power-down too.
76 *
77 * This must be called with interrupts disabled.
78 *
79 * This does not return. Re-entry in the kernel is expected via
80 * mcpm_entry_point.
81 */
82void mcpm_cpu_power_down(void);
83
84/**
85 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
86 *
87 * @expected_residency: duration in microseconds the CPU is expected
88 * to remain suspended, or 0 if unknown/infinity.
89 *
90 * The calling CPU is suspended. The expected residency argument is used
91 * as a hint by the platform specific backend to implement the appropriate
92 * sleep state level according to the knowledge it has on wake-up latency
93 * for the given hardware.
94 *
95 * If this CPU is found to be the "last man standing" in the cluster
96 * then the cluster may be prepared for power-down too, if the expected
97 * residency makes it worthwhile.
98 *
99 * This must be called with interrupts disabled.
100 *
101 * This does not return. Re-entry in the kernel is expected via
102 * mcpm_entry_point.
103 */
104void mcpm_cpu_suspend(u64 expected_residency);
105
106/**
107 * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
108 *
109 * This lets the platform specific backend code perform needed housekeeping
110 * work. This must be called by the newly activated CPU as soon as it is
111 * fully operational in kernel space, before it enables interrupts.
112 *
113 * If the operation cannot be performed then an error code is returned.
114 */
115int mcpm_cpu_powered_up(void);
116
117/*
118 * Platform specific methods used in the implementation of the above API.
119 */
120struct mcpm_platform_ops {
121 int (*power_up)(unsigned int cpu, unsigned int cluster);
122 void (*power_down)(void);
123 void (*suspend)(u64);
124 void (*powered_up)(void);
125};
126
127/**
128 * mcpm_platform_register - register platform specific power methods
129 *
130 * @ops: mcpm_platform_ops structure to register
131 *
132 * An error is returned if the registration has been done previously.
133 */
134int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
135
136/* Synchronisation structures for coordinating safe cluster setup/teardown: */
137
138/*
139 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
140 * to match.
141 */
142struct mcpm_sync_struct {
143 /* individual CPU states */
144 struct {
145 s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
146 } cpus[MAX_CPUS_PER_CLUSTER];
147
148 /* cluster state */
149 s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
150
151 /* inbound-side state */
152 s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
153};
154
155struct sync_struct {
156 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
157};
158
159extern unsigned long sync_phys; /* physical address of *mcpm_sync */
160
161void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
162void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
163void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
164bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
165int __mcpm_cluster_state(unsigned int cluster);
166
167int __init mcpm_sync_init(
168 void (*power_up_setup)(unsigned int affinity_level));
169
170void __init mcpm_smp_set_ops(void);
171
172#else
173
174/*
175 * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
176 * cannot be included in asm files. Let's work around the conflict like this.
177 */
178#include <asm/asm-offsets.h>
179#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
180
181#endif /* ! __ASSEMBLY__ */
182
183/* Definitions for mcpm_sync_struct */
184#define CPU_DOWN 0x11
185#define CPU_COMING_UP 0x12
186#define CPU_UP 0x13
187#define CPU_GOING_DOWN 0x14
188
189#define CLUSTER_DOWN 0x21
190#define CLUSTER_UP 0x22
191#define CLUSTER_GOING_DOWN 0x23
192
193#define INBOUND_NOT_COMING_UP 0x31
194#define INBOUND_COMING_UP 0x32
195
196/*
197 * Offsets for the mcpm_sync_struct members, for use in asm.
198 * We don't want to make them global to the kernel via asm-offsets.c.
199 */
200#define MCPM_SYNC_CLUSTER_CPUS 0
201#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
202#define MCPM_SYNC_CLUSTER_CLUSTER \
203 (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
204#define MCPM_SYNC_CLUSTER_INBOUND \
205 (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
206#define MCPM_SYNC_CLUSTER_SIZE \
207 (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
208
209#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index cddda1f41f0f..1995d1a84060 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
152#define TIF_SYSCALL_AUDIT 9 152#define TIF_SYSCALL_AUDIT 9
153#define TIF_SYSCALL_TRACEPOINT 10 153#define TIF_SYSCALL_TRACEPOINT 10
154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ 154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
155#define TIF_NOHZ 12 /* in adaptive nohz mode */
155#define TIF_USING_IWMMXT 17 156#define TIF_USING_IWMMXT 17
156#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
157#define TIF_RESTORE_SIGMASK 20 158#define TIF_RESTORE_SIGMASK 20
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index ab865e65a84c..a3625d141c1d 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -166,7 +166,7 @@
166# define v6wbi_always_flags (-1UL) 166# define v6wbi_always_flags (-1UL)
167#endif 167#endif
168 168
169#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 169#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \
170 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ 170 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
171 TLB_V7_UIS_ASID | TLB_V7_UIS_BP) 171 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
172#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 172#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h
new file mode 100644
index 000000000000..0e2949b0fae9
--- /dev/null
+++ b/arch/arm/include/debug/uncompress.h
@@ -0,0 +1,7 @@
1#ifdef CONFIG_DEBUG_UNCOMPRESS
2extern void putc(int c);
3#else
4static inline void putc(int c) {}
5#endif
6static inline void flush(void) {}
7static inline void arch_decomp_setup(void) {}
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 023bfeb367bf..c1ee007523d7 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -53,12 +53,12 @@
53#define KVM_ARM_FIQ_spsr fiq_regs[7] 53#define KVM_ARM_FIQ_spsr fiq_regs[7]
54 54
55struct kvm_regs { 55struct kvm_regs {
56 struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ 56 struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
57 __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ 57 unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
58 __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ 58 unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
59 __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ 59 unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
60 __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ 60 unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
61 __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ 61 unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
62}; 62};
63 63
64/* Supported Processor Types */ 64/* Supported Processor Types */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 923eec7105cf..a53efa993690 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -149,6 +149,10 @@ int main(void)
149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); 151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
152 BLANK();
153 DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
154 DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
155 BLANK();
152#ifdef CONFIG_KVM_ARM_HOST 156#ifdef CONFIG_KVM_ARM_HOST
153 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 157 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
154 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); 158 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
@@ -165,10 +169,10 @@ int main(void)
165 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); 169 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
166 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); 170 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
167 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); 171 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
168 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); 172 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); 173 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); 174 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); 175 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
172#ifdef CONFIG_KVM_ARM_VGIC 176#ifdef CONFIG_KVM_ARM_VGIC
173 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 177 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
174 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); 178 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index a1f73b502ef0..b2ed73c45489 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -462,6 +462,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
462 sys->busnr = busnr; 462 sys->busnr = busnr;
463 sys->swizzle = hw->swizzle; 463 sys->swizzle = hw->swizzle;
464 sys->map_irq = hw->map_irq; 464 sys->map_irq = hw->map_irq;
465 sys->align_resource = hw->align_resource;
465 INIT_LIST_HEAD(&sys->resources); 466 INIT_LIST_HEAD(&sys->resources);
466 467
467 if (hw->private_data) 468 if (hw->private_data)
@@ -574,6 +575,8 @@ char * __init pcibios_setup(char *str)
574resource_size_t pcibios_align_resource(void *data, const struct resource *res, 575resource_size_t pcibios_align_resource(void *data, const struct resource *res,
575 resource_size_t size, resource_size_t align) 576 resource_size_t size, resource_size_t align)
576{ 577{
578 struct pci_dev *dev = data;
579 struct pci_sys_data *sys = dev->sysdata;
577 resource_size_t start = res->start; 580 resource_size_t start = res->start;
578 581
579 if (res->flags & IORESOURCE_IO && start & 0x300) 582 if (res->flags & IORESOURCE_IO && start & 0x300)
@@ -581,6 +584,9 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
581 584
582 start = (start + align - 1) & ~(align - 1); 585 start = (start + align - 1) & ~(align - 1);
583 586
587 if (sys->align_resource)
588 return sys->align_resource(dev, res, start, size, align);
589
584 return start; 590 return start;
585} 591}
586 592
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0f82098c9bfe..582b405befc5 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,18 +192,6 @@ __dabt_svc:
192 svc_entry 192 svc_entry
193 mov r2, sp 193 mov r2, sp
194 dabt_helper 194 dabt_helper
195
196 @
197 @ IRQs off again before pulling preserved data off the stack
198 @
199 disable_irq_notrace
200
201#ifdef CONFIG_TRACE_IRQFLAGS
202 tst r5, #PSR_I_BIT
203 bleq trace_hardirqs_on
204 tst r5, #PSR_I_BIT
205 blne trace_hardirqs_off
206#endif
207 svc_exit r5 @ return from exception 195 svc_exit r5 @ return from exception
208 UNWIND(.fnend ) 196 UNWIND(.fnend )
209ENDPROC(__dabt_svc) 197ENDPROC(__dabt_svc)
@@ -223,12 +211,7 @@ __irq_svc:
223 blne svc_preempt 211 blne svc_preempt
224#endif 212#endif
225 213
226#ifdef CONFIG_TRACE_IRQFLAGS 214 svc_exit r5, irq = 1 @ return from exception
227 @ The parent context IRQs must have been enabled to get here in
228 @ the first place, so there's no point checking the PSR I bit.
229 bl trace_hardirqs_on
230#endif
231 svc_exit r5 @ return from exception
232 UNWIND(.fnend ) 215 UNWIND(.fnend )
233ENDPROC(__irq_svc) 216ENDPROC(__irq_svc)
234 217
@@ -295,22 +278,8 @@ __und_svc_fault:
295 mov r0, sp @ struct pt_regs *regs 278 mov r0, sp @ struct pt_regs *regs
296 bl __und_fault 279 bl __und_fault
297 280
298 @
299 @ IRQs off again before pulling preserved data off the stack
300 @
301__und_svc_finish: 281__und_svc_finish:
302 disable_irq_notrace
303
304 @
305 @ restore SPSR and restart the instruction
306 @
307 ldr r5, [sp, #S_PSR] @ Get SVC cpsr 282 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
308#ifdef CONFIG_TRACE_IRQFLAGS
309 tst r5, #PSR_I_BIT
310 bleq trace_hardirqs_on
311 tst r5, #PSR_I_BIT
312 blne trace_hardirqs_off
313#endif
314 svc_exit r5 @ return from exception 283 svc_exit r5 @ return from exception
315 UNWIND(.fnend ) 284 UNWIND(.fnend )
316ENDPROC(__und_svc) 285ENDPROC(__und_svc)
@@ -320,18 +289,6 @@ __pabt_svc:
320 svc_entry 289 svc_entry
321 mov r2, sp @ regs 290 mov r2, sp @ regs
322 pabt_helper 291 pabt_helper
323
324 @
325 @ IRQs off again before pulling preserved data off the stack
326 @
327 disable_irq_notrace
328
329#ifdef CONFIG_TRACE_IRQFLAGS
330 tst r5, #PSR_I_BIT
331 bleq trace_hardirqs_on
332 tst r5, #PSR_I_BIT
333 blne trace_hardirqs_off
334#endif
335 svc_exit r5 @ return from exception 292 svc_exit r5 @ return from exception
336 UNWIND(.fnend ) 293 UNWIND(.fnend )
337ENDPROC(__pabt_svc) 294ENDPROC(__pabt_svc)
@@ -396,6 +353,7 @@ ENDPROC(__pabt_svc)
396#ifdef CONFIG_IRQSOFF_TRACER 353#ifdef CONFIG_IRQSOFF_TRACER
397 bl trace_hardirqs_off 354 bl trace_hardirqs_off
398#endif 355#endif
356 ct_user_exit save = 0
399 .endm 357 .endm
400 358
401 .macro kuser_cmpxchg_check 359 .macro kuser_cmpxchg_check
@@ -562,21 +520,21 @@ ENDPROC(__und_usr)
562 @ Fall-through from Thumb-2 __und_usr 520 @ Fall-through from Thumb-2 __und_usr
563 @ 521 @
564#ifdef CONFIG_NEON 522#ifdef CONFIG_NEON
523 get_thread_info r10 @ get current thread
565 adr r6, .LCneon_thumb_opcodes 524 adr r6, .LCneon_thumb_opcodes
566 b 2f 525 b 2f
567#endif 526#endif
568call_fpe: 527call_fpe:
528 get_thread_info r10 @ get current thread
569#ifdef CONFIG_NEON 529#ifdef CONFIG_NEON
570 adr r6, .LCneon_arm_opcodes 530 adr r6, .LCneon_arm_opcodes
5712: 5312: ldr r5, [r6], #4 @ mask value
572 ldr r7, [r6], #4 @ mask value
573 cmp r7, #0 @ end mask?
574 beq 1f
575 and r8, r0, r7
576 ldr r7, [r6], #4 @ opcode bits matching in mask 532 ldr r7, [r6], #4 @ opcode bits matching in mask
533 cmp r5, #0 @ end mask?
534 beq 1f
535 and r8, r0, r5
577 cmp r8, r7 @ NEON instruction? 536 cmp r8, r7 @ NEON instruction?
578 bne 2b 537 bne 2b
579 get_thread_info r10
580 mov r7, #1 538 mov r7, #1
581 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 539 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
582 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 540 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
@@ -586,7 +544,6 @@ call_fpe:
586 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 544 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
587 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 545 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
588 moveq pc, lr 546 moveq pc, lr
589 get_thread_info r10 @ get current thread
590 and r8, r0, #0x00000f00 @ mask out CP number 547 and r8, r0, #0x00000f00 @ mask out CP number
591 THUMB( lsr r8, r8, #8 ) 548 THUMB( lsr r8, r8, #8 )
592 mov r7, #1 549 mov r7, #1
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index fefd7f971437..bc5bc0a97131 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -35,12 +35,11 @@ ret_fast_syscall:
35 ldr r1, [tsk, #TI_FLAGS] 35 ldr r1, [tsk, #TI_FLAGS]
36 tst r1, #_TIF_WORK_MASK 36 tst r1, #_TIF_WORK_MASK
37 bne fast_work_pending 37 bne fast_work_pending
38#if defined(CONFIG_IRQSOFF_TRACER)
39 asm_trace_hardirqs_on 38 asm_trace_hardirqs_on
40#endif
41 39
42 /* perform architecture specific actions before user return */ 40 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr 41 arch_ret_to_user r1, lr
42 ct_user_enter
44 43
45 restore_user_regs fast = 1, offset = S_OFF 44 restore_user_regs fast = 1, offset = S_OFF
46 UNWIND(.fnend ) 45 UNWIND(.fnend )
@@ -71,11 +70,11 @@ ENTRY(ret_to_user_from_irq)
71 tst r1, #_TIF_WORK_MASK 70 tst r1, #_TIF_WORK_MASK
72 bne work_pending 71 bne work_pending
73no_work_pending: 72no_work_pending:
74#if defined(CONFIG_IRQSOFF_TRACER)
75 asm_trace_hardirqs_on 73 asm_trace_hardirqs_on
76#endif 74
77 /* perform architecture specific actions before user return */ 75 /* perform architecture specific actions before user return */
78 arch_ret_to_user r1, lr 76 arch_ret_to_user r1, lr
77 ct_user_enter save = 0
79 78
80 restore_user_regs fast = 0, offset = 0 79 restore_user_regs fast = 0, offset = 0
81ENDPROC(ret_to_user_from_irq) 80ENDPROC(ret_to_user_from_irq)
@@ -406,6 +405,7 @@ ENTRY(vector_swi)
406 mcr p15, 0, ip, c1, c0 @ update control register 405 mcr p15, 0, ip, c1, c0 @ update control register
407#endif 406#endif
408 enable_irq 407 enable_irq
408 ct_user_exit
409 409
410 get_thread_info tsk 410 get_thread_info tsk
411 adr tbl, sys_call_table @ load syscall table pointer 411 adr tbl, sys_call_table @ load syscall table pointer
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 9a8531eadd3d..160f3376ba6d 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -74,7 +74,24 @@
74 .endm 74 .endm
75 75
76#ifndef CONFIG_THUMB2_KERNEL 76#ifndef CONFIG_THUMB2_KERNEL
77 .macro svc_exit, rpsr 77 .macro svc_exit, rpsr, irq = 0
78 .if \irq != 0
79 @ IRQs already off
80#ifdef CONFIG_TRACE_IRQFLAGS
81 @ The parent context IRQs must have been enabled to get here in
82 @ the first place, so there's no point checking the PSR I bit.
83 bl trace_hardirqs_on
84#endif
85 .else
86 @ IRQs off again before pulling preserved data off the stack
87 disable_irq_notrace
88#ifdef CONFIG_TRACE_IRQFLAGS
89 tst \rpsr, #PSR_I_BIT
90 bleq trace_hardirqs_on
91 tst \rpsr, #PSR_I_BIT
92 blne trace_hardirqs_off
93#endif
94 .endif
78 msr spsr_cxsf, \rpsr 95 msr spsr_cxsf, \rpsr
79#if defined(CONFIG_CPU_V6) 96#if defined(CONFIG_CPU_V6)
80 ldr r0, [sp] 97 ldr r0, [sp]
@@ -120,7 +137,24 @@
120 mov pc, \reg 137 mov pc, \reg
121 .endm 138 .endm
122#else /* CONFIG_THUMB2_KERNEL */ 139#else /* CONFIG_THUMB2_KERNEL */
123 .macro svc_exit, rpsr 140 .macro svc_exit, rpsr, irq = 0
141 .if \irq != 0
142 @ IRQs already off
143#ifdef CONFIG_TRACE_IRQFLAGS
144 @ The parent context IRQs must have been enabled to get here in
145 @ the first place, so there's no point checking the PSR I bit.
146 bl trace_hardirqs_on
147#endif
148 .else
149 @ IRQs off again before pulling preserved data off the stack
150 disable_irq_notrace
151#ifdef CONFIG_TRACE_IRQFLAGS
152 tst \rpsr, #PSR_I_BIT
153 bleq trace_hardirqs_on
154 tst \rpsr, #PSR_I_BIT
155 blne trace_hardirqs_off
156#endif
157 .endif
124 ldr lr, [sp, #S_SP] @ top of the stack 158 ldr lr, [sp, #S_SP] @ top of the stack
125 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc 159 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
126 clrex @ clear the exclusive monitor 160 clrex @ clear the exclusive monitor
@@ -164,6 +198,34 @@
164#endif /* !CONFIG_THUMB2_KERNEL */ 198#endif /* !CONFIG_THUMB2_KERNEL */
165 199
166/* 200/*
201 * Context tracking subsystem. Used to instrument transitions
202 * between user and kernel mode.
203 */
204 .macro ct_user_exit, save = 1
205#ifdef CONFIG_CONTEXT_TRACKING
206 .if \save
207 stmdb sp!, {r0-r3, ip, lr}
208 bl user_exit
209 ldmia sp!, {r0-r3, ip, lr}
210 .else
211 bl user_exit
212 .endif
213#endif
214 .endm
215
216 .macro ct_user_enter, save = 1
217#ifdef CONFIG_CONTEXT_TRACKING
218 .if \save
219 stmdb sp!, {r0-r3, ip, lr}
220 bl user_enter
221 ldmia sp!, {r0-r3, ip, lr}
222 .else
223 bl user_enter
224 .endif
225#endif
226 .endm
227
228/*
167 * These are the registers used in the syscall handler, and allow us to 229 * These are the registers used in the syscall handler, and allow us to
168 * have in theory up to 7 arguments to a function - r0 to r6. 230 * have in theory up to 7 arguments to a function - r0 to r6.
169 * 231 *
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 854bd22380d3..5b391a689b47 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -98,8 +98,9 @@ __mmap_switched:
98 str r9, [r4] @ Save processor ID 98 str r9, [r4] @ Save processor ID
99 str r1, [r5] @ Save machine type 99 str r1, [r5] @ Save machine type
100 str r2, [r6] @ Save atags pointer 100 str r2, [r6] @ Save atags pointer
101 bic r4, r0, #CR_A @ Clear 'A' bit 101 cmp r7, #0
102 stmia r7, {r0, r4} @ Save control register values 102 bicne r4, r0, #CR_A @ Clear 'A' bit
103 stmneia r7, {r0, r4} @ Save control register values
103 b start_kernel 104 b start_kernel
104ENDPROC(__mmap_switched) 105ENDPROC(__mmap_switched)
105 106
@@ -113,7 +114,11 @@ __mmap_switched_data:
113 .long processor_id @ r4 114 .long processor_id @ r4
114 .long __machine_arch_type @ r5 115 .long __machine_arch_type @ r5
115 .long __atags_pointer @ r6 116 .long __atags_pointer @ r6
117#ifdef CONFIG_CPU_CP15
116 .long cr_alignment @ r7 118 .long cr_alignment @ r7
119#else
120 .long 0 @ r7
121#endif
117 .long init_thread_union + THREAD_START_SP @ sp 122 .long init_thread_union + THREAD_START_SP @ sp
118 .size __mmap_switched_data, . - __mmap_switched_data 123 .size __mmap_switched_data, . - __mmap_switched_data
119 124
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2c228a07e58c..6a2e09c952c7 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,15 +32,21 @@
32 * numbers for r1. 32 * numbers for r1.
33 * 33 *
34 */ 34 */
35 .arm
36 35
37 __HEAD 36 __HEAD
37
38#ifdef CONFIG_CPU_THUMBONLY
39 .thumb
40ENTRY(stext)
41#else
42 .arm
38ENTRY(stext) 43ENTRY(stext)
39 44
40 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. 45 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
41 THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 46 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
42 THUMB( .thumb ) @ switch to Thumb now. 47 THUMB( .thumb ) @ switch to Thumb now.
43 THUMB(1: ) 48 THUMB(1: )
49#endif
44 50
45 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 51 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
46 @ and irqs disabled 52 @ and irqs disabled
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index ae58d3b37d9d..f21970316836 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -407,15 +407,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
407 * atomic helpers and the signal restart code. Insert it into the 407 * atomic helpers and the signal restart code. Insert it into the
408 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. 408 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
409 */ 409 */
410static struct vm_area_struct gate_vma; 410static struct vm_area_struct gate_vma = {
411 .vm_start = 0xffff0000,
412 .vm_end = 0xffff0000 + PAGE_SIZE,
413 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
414 .vm_mm = &init_mm,
415};
411 416
412static int __init gate_vma_init(void) 417static int __init gate_vma_init(void)
413{ 418{
414 gate_vma.vm_start = 0xffff0000; 419 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
415 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
416 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
417 gate_vma.vm_flags = VM_READ | VM_EXEC |
418 VM_MAYREAD | VM_MAYEXEC;
419 return 0; 420 return 0;
420} 421}
421arch_initcall(gate_vma_init); 422arch_initcall(gate_vma_init);
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 8085417555dd..fafedd86885d 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
26 struct return_address_data *data = d; 26 struct return_address_data *data = d;
27 27
28 if (!data->level) { 28 if (!data->level) {
29 data->addr = (void *)frame->lr; 29 data->addr = (void *)frame->pc;
30 30
31 return 1; 31 return 1;
32 } else { 32 } else {
@@ -41,7 +41,8 @@ void *return_address(unsigned int level)
41 struct stackframe frame; 41 struct stackframe frame;
42 register unsigned long current_sp asm ("sp"); 42 register unsigned long current_sp asm ("sp");
43 43
44 data.level = level + 1; 44 data.level = level + 2;
45 data.addr = NULL;
45 46
46 frame.fp = (unsigned long)__builtin_frame_address(0); 47 frame.fp = (unsigned long)__builtin_frame_address(0);
47 frame.sp = current_sp; 48 frame.sp = current_sp;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 234e339196c0..728007c4a2b7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -290,10 +290,10 @@ static int cpu_has_aliasing_icache(unsigned int arch)
290 290
291static void __init cacheid_init(void) 291static void __init cacheid_init(void)
292{ 292{
293 unsigned int cachetype = read_cpuid_cachetype();
294 unsigned int arch = cpu_architecture(); 293 unsigned int arch = cpu_architecture();
295 294
296 if (arch >= CPU_ARCH_ARMv6) { 295 if (arch >= CPU_ARCH_ARMv6) {
296 unsigned int cachetype = read_cpuid_cachetype();
297 if ((cachetype & (7 << 29)) == 4 << 29) { 297 if ((cachetype & (7 << 29)) == 4 << 29) {
298 /* ARMv7 register format */ 298 /* ARMv7 register format */
299 arch = CPU_ARCH_ARMv7; 299 arch = CPU_ARCH_ARMv7;
@@ -389,7 +389,7 @@ static void __init feat_v6_fixup(void)
389 * 389 *
390 * cpu_init sets up the per-CPU stacks. 390 * cpu_init sets up the per-CPU stacks.
391 */ 391 */
392void cpu_init(void) 392void notrace cpu_init(void)
393{ 393{
394 unsigned int cpu = smp_processor_id(); 394 unsigned int cpu = smp_processor_id();
395 struct stack *stk = &stacks[cpu]; 395 struct stack *stk = &stacks[cpu];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 4619177bcfe6..47ab90563bf4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -211,6 +211,13 @@ void __cpuinit __cpu_die(unsigned int cpu)
211 } 211 }
212 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); 212 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
213 213
214 /*
215 * platform_cpu_kill() is generally expected to do the powering off
216 * and/or cutting of clocks to the dying CPU. Optionally, this may
217 * be done by the CPU which is dying in preference to supporting
218 * this call, but that means there is _no_ synchronisation between
219 * the requesting CPU and the dying CPU actually losing power.
220 */
214 if (!platform_cpu_kill(cpu)) 221 if (!platform_cpu_kill(cpu))
215 printk("CPU%u: unable to kill\n", cpu); 222 printk("CPU%u: unable to kill\n", cpu);
216} 223}
@@ -230,14 +237,41 @@ void __ref cpu_die(void)
230 idle_task_exit(); 237 idle_task_exit();
231 238
232 local_irq_disable(); 239 local_irq_disable();
233 mb();
234 240
235 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 241 /*
242 * Flush the data out of the L1 cache for this CPU. This must be
243 * before the completion to ensure that data is safely written out
244 * before platform_cpu_kill() gets called - which may disable
245 * *this* CPU and power down its cache.
246 */
247 flush_cache_louis();
248
249 /*
250 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
251 * this returns, power and/or clocks can be removed at any point
252 * from this CPU and its cache by platform_cpu_kill().
253 */
236 RCU_NONIDLE(complete(&cpu_died)); 254 RCU_NONIDLE(complete(&cpu_died));
237 255
238 /* 256 /*
239 * actual CPU shutdown procedure is at least platform (if not 257 * Ensure that the cache lines associated with that completion are
240 * CPU) specific. 258 * written out. This covers the case where _this_ CPU is doing the
259 * powering down, to ensure that the completion is visible to the
260 * CPU waiting for this one.
261 */
262 flush_cache_louis();
263
264 /*
265 * The actual CPU shutdown procedure is at least platform (if not
266 * CPU) specific. This may remove power, or it may simply spin.
267 *
268 * Platforms are generally expected *NOT* to return from this call,
269 * although there are some which do because they have no way to
270 * power down the CPU. These platforms are the _only_ reason we
271 * have a return path which uses the fragment of assembly below.
272 *
273 * The return path should not be used for platforms which can
274 * power off the CPU.
241 */ 275 */
242 if (smp_ops.cpu_die) 276 if (smp_ops.cpu_die)
243 smp_ops.cpu_die(cpu); 277 smp_ops.cpu_die(cpu);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 45eac87ed66a..5bc1a63284e3 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -41,7 +41,7 @@ void scu_enable(void __iomem *scu_base)
41 41
42#ifdef CONFIG_ARM_ERRATA_764369 42#ifdef CONFIG_ARM_ERRATA_764369
43 /* Cortex-A9 only */ 43 /* Cortex-A9 only */
44 if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { 44 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
45 scu_ctrl = __raw_readl(scu_base + 0x30); 45 scu_ctrl = __raw_readl(scu_base + 0x30);
46 if (!(scu_ctrl & 1)) 46 if (!(scu_ctrl & 1))
47 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); 47 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index e82e1d248772..9a52a07aa40e 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -98,21 +98,21 @@ static void broadcast_tlb_a15_erratum(void)
98 return; 98 return;
99 99
100 dummy_flush_tlb_a15_erratum(); 100 dummy_flush_tlb_a15_erratum();
101 smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, 101 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
102 NULL, 1);
103} 102}
104 103
105static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) 104static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
106{ 105{
107 int cpu; 106 int cpu, this_cpu;
108 cpumask_t mask = { CPU_BITS_NONE }; 107 cpumask_t mask = { CPU_BITS_NONE };
109 108
110 if (!erratum_a15_798181()) 109 if (!erratum_a15_798181())
111 return; 110 return;
112 111
113 dummy_flush_tlb_a15_erratum(); 112 dummy_flush_tlb_a15_erratum();
113 this_cpu = get_cpu();
114 for_each_online_cpu(cpu) { 114 for_each_online_cpu(cpu) {
115 if (cpu == smp_processor_id()) 115 if (cpu == this_cpu)
116 continue; 116 continue;
117 /* 117 /*
118 * We only need to send an IPI if the other CPUs are running 118 * We only need to send an IPI if the other CPUs are running
@@ -127,6 +127,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
127 cpumask_set_cpu(cpu, &mask); 127 cpumask_set_cpu(cpu, &mask);
128 } 128 }
129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130 put_cpu();
130} 131}
131 132
132void flush_tlb_all(void) 133void flush_tlb_all(void)
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index fc96ce6f2357..8dc5e76cb789 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
18 18
19obj-y += kvm-arm.o init.o interrupts.o 19obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o guest.o mmu.o emulate.o reset.o 20obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o coproc_a15.o mmio.o psci.o 21obj-y += coproc.o coproc_a15.o mmio.o psci.o
22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o 22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o 23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 842098d78f58..a0dfc2a53f91 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -30,11 +30,9 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "trace.h" 31#include "trace.h"
32 32
33#include <asm/unified.h>
34#include <asm/uaccess.h> 33#include <asm/uaccess.h>
35#include <asm/ptrace.h> 34#include <asm/ptrace.h>
36#include <asm/mman.h> 35#include <asm/mman.h>
37#include <asm/cputype.h>
38#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
39#include <asm/cacheflush.h> 37#include <asm/cacheflush.h>
40#include <asm/virt.h> 38#include <asm/virt.h>
@@ -44,14 +42,13 @@
44#include <asm/kvm_emulate.h> 42#include <asm/kvm_emulate.h>
45#include <asm/kvm_coproc.h> 43#include <asm/kvm_coproc.h>
46#include <asm/kvm_psci.h> 44#include <asm/kvm_psci.h>
47#include <asm/opcodes.h>
48 45
49#ifdef REQUIRES_VIRT 46#ifdef REQUIRES_VIRT
50__asm__(".arch_extension virt"); 47__asm__(".arch_extension virt");
51#endif 48#endif
52 49
53static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 50static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 51static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state;
55static unsigned long hyp_default_vectors; 52static unsigned long hyp_default_vectors;
56 53
57/* Per-CPU variable containing the currently running vcpu. */ 54/* Per-CPU variable containing the currently running vcpu. */
@@ -304,22 +301,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
304 return 0; 301 return 0;
305} 302}
306 303
307int __attribute_const__ kvm_target_cpu(void)
308{
309 unsigned long implementor = read_cpuid_implementor();
310 unsigned long part_number = read_cpuid_part_number();
311
312 if (implementor != ARM_CPU_IMP_ARM)
313 return -EINVAL;
314
315 switch (part_number) {
316 case ARM_CPU_PART_CORTEX_A15:
317 return KVM_ARM_TARGET_CORTEX_A15;
318 default:
319 return -EINVAL;
320 }
321}
322
323int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 304int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
324{ 305{
325 int ret; 306 int ret;
@@ -482,163 +463,6 @@ static void update_vttbr(struct kvm *kvm)
482 spin_unlock(&kvm_vmid_lock); 463 spin_unlock(&kvm_vmid_lock);
483} 464}
484 465
485static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
486{
487 /* SVC called from Hyp mode should never get here */
488 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
489 BUG();
490 return -EINVAL; /* Squash warning */
491}
492
493static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
494{
495 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
496 vcpu->arch.hsr & HSR_HVC_IMM_MASK);
497
498 if (kvm_psci_call(vcpu))
499 return 1;
500
501 kvm_inject_undefined(vcpu);
502 return 1;
503}
504
505static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
506{
507 if (kvm_psci_call(vcpu))
508 return 1;
509
510 kvm_inject_undefined(vcpu);
511 return 1;
512}
513
514static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
515{
516 /* The hypervisor should never cause aborts */
517 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
518 vcpu->arch.hxfar, vcpu->arch.hsr);
519 return -EFAULT;
520}
521
522static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
523{
524 /* This is either an error in the ws. code or an external abort */
525 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
526 vcpu->arch.hxfar, vcpu->arch.hsr);
527 return -EFAULT;
528}
529
530typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
531static exit_handle_fn arm_exit_handlers[] = {
532 [HSR_EC_WFI] = kvm_handle_wfi,
533 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
534 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
535 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
536 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
537 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
538 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
539 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
540 [HSR_EC_SVC_HYP] = handle_svc_hyp,
541 [HSR_EC_HVC] = handle_hvc,
542 [HSR_EC_SMC] = handle_smc,
543 [HSR_EC_IABT] = kvm_handle_guest_abort,
544 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
545 [HSR_EC_DABT] = kvm_handle_guest_abort,
546 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
547};
548
549/*
550 * A conditional instruction is allowed to trap, even though it
551 * wouldn't be executed. So let's re-implement the hardware, in
552 * software!
553 */
554static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
555{
556 unsigned long cpsr, cond, insn;
557
558 /*
559 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
560 * catch undefined instructions, and then we won't get past
561 * the arm_exit_handlers test anyway.
562 */
563 BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
564
565 /* Top two bits non-zero? Unconditional. */
566 if (vcpu->arch.hsr >> 30)
567 return true;
568
569 cpsr = *vcpu_cpsr(vcpu);
570
571 /* Is condition field valid? */
572 if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
573 cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
574 else {
575 /* This can happen in Thumb mode: examine IT state. */
576 unsigned long it;
577
578 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
579
580 /* it == 0 => unconditional. */
581 if (it == 0)
582 return true;
583
584 /* The cond for this insn works out as the top 4 bits. */
585 cond = (it >> 4);
586 }
587
588 /* Shift makes it look like an ARM-mode instruction */
589 insn = cond << 28;
590 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
591}
592
593/*
594 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
595 * proper exit to QEMU.
596 */
597static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
598 int exception_index)
599{
600 unsigned long hsr_ec;
601
602 switch (exception_index) {
603 case ARM_EXCEPTION_IRQ:
604 return 1;
605 case ARM_EXCEPTION_UNDEFINED:
606 kvm_err("Undefined exception in Hyp mode at: %#08x\n",
607 vcpu->arch.hyp_pc);
608 BUG();
609 panic("KVM: Hypervisor undefined exception!\n");
610 case ARM_EXCEPTION_DATA_ABORT:
611 case ARM_EXCEPTION_PREF_ABORT:
612 case ARM_EXCEPTION_HVC:
613 hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
614
615 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
616 || !arm_exit_handlers[hsr_ec]) {
617 kvm_err("Unknown exception class: %#08lx, "
618 "hsr: %#08x\n", hsr_ec,
619 (unsigned int)vcpu->arch.hsr);
620 BUG();
621 }
622
623 /*
624 * See ARM ARM B1.14.1: "Hyp traps on instructions
625 * that fail their condition code check"
626 */
627 if (!kvm_condition_valid(vcpu)) {
628 bool is_wide = vcpu->arch.hsr & HSR_IL;
629 kvm_skip_instr(vcpu, is_wide);
630 return 1;
631 }
632
633 return arm_exit_handlers[hsr_ec](vcpu, run);
634 default:
635 kvm_pr_unimpl("Unsupported exception type: %d",
636 exception_index);
637 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
638 return 0;
639 }
640}
641
642static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 466static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
643{ 467{
644 if (likely(vcpu->arch.has_run_once)) 468 if (likely(vcpu->arch.has_run_once))
@@ -973,7 +797,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
973static void cpu_init_hyp_mode(void *vector) 797static void cpu_init_hyp_mode(void *vector)
974{ 798{
975 unsigned long long pgd_ptr; 799 unsigned long long pgd_ptr;
976 unsigned long pgd_low, pgd_high;
977 unsigned long hyp_stack_ptr; 800 unsigned long hyp_stack_ptr;
978 unsigned long stack_page; 801 unsigned long stack_page;
979 unsigned long vector_ptr; 802 unsigned long vector_ptr;
@@ -982,20 +805,11 @@ static void cpu_init_hyp_mode(void *vector)
982 __hyp_set_vectors((unsigned long)vector); 805 __hyp_set_vectors((unsigned long)vector);
983 806
984 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); 807 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
985 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
986 pgd_high = (pgd_ptr >> 32ULL);
987 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); 808 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
988 hyp_stack_ptr = stack_page + PAGE_SIZE; 809 hyp_stack_ptr = stack_page + PAGE_SIZE;
989 vector_ptr = (unsigned long)__kvm_hyp_vector; 810 vector_ptr = (unsigned long)__kvm_hyp_vector;
990 811
991 /* 812 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
992 * Call initialization code, and switch to the full blown
993 * HYP code. The init code doesn't need to preserve these registers as
994 * r1-r3 and r12 are already callee save according to the AAPCS.
995 * Note that we slightly misuse the prototype by casing the pgd_low to
996 * a void *.
997 */
998 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
999} 813}
1000 814
1001/** 815/**
@@ -1078,7 +892,7 @@ static int init_hyp_mode(void)
1078 /* 892 /*
1079 * Map the host VFP structures 893 * Map the host VFP structures
1080 */ 894 */
1081 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); 895 kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t);
1082 if (!kvm_host_vfp_state) { 896 if (!kvm_host_vfp_state) {
1083 err = -ENOMEM; 897 err = -ENOMEM;
1084 kvm_err("Cannot allocate host VFP state\n"); 898 kvm_err("Cannot allocate host VFP state\n");
@@ -1086,7 +900,7 @@ static int init_hyp_mode(void)
1086 } 900 }
1087 901
1088 for_each_possible_cpu(cpu) { 902 for_each_possible_cpu(cpu) {
1089 struct vfp_hard_struct *vfp; 903 kvm_kernel_vfp_t *vfp;
1090 904
1091 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); 905 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
1092 err = create_hyp_mappings(vfp, vfp + 1); 906 err = create_hyp_mappings(vfp, vfp + 1);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7bed7556077a..8eea97be1ed5 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -76,7 +76,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
76 const struct coproc_params *p, 76 const struct coproc_params *p,
77 const struct coproc_reg *r) 77 const struct coproc_reg *r)
78{ 78{
79 u32 val; 79 unsigned long val;
80 int cpu; 80 int cpu;
81 81
82 if (!p->is_write) 82 if (!p->is_write)
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
293 293
294 if (likely(r->access(vcpu, params, r))) { 294 if (likely(r->access(vcpu, params, r))) {
295 /* Skip instruction, since it was emulated */ 295 /* Skip instruction, since it was emulated */
296 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); 296 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
297 return 1; 297 return 1;
298 } 298 }
299 /* If access function fails, it should complain. */ 299 /* If access function fails, it should complain. */
300 } else { 300 } else {
301 kvm_err("Unsupported guest CP15 access at: %08x\n", 301 kvm_err("Unsupported guest CP15 access at: %08lx\n",
302 *vcpu_pc(vcpu)); 302 *vcpu_pc(vcpu));
303 print_cp_instr(params); 303 print_cp_instr(params);
304 } 304 }
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
315{ 315{
316 struct coproc_params params; 316 struct coproc_params params;
317 317
318 params.CRm = (vcpu->arch.hsr >> 1) & 0xf; 318 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
319 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; 319 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
320 params.is_write = ((vcpu->arch.hsr & 1) == 0); 320 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
321 params.is_64bit = true; 321 params.is_64bit = true;
322 322
323 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; 323 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
324 params.Op2 = 0; 324 params.Op2 = 0;
325 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; 325 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
326 params.CRn = 0; 326 params.CRn = 0;
327 327
328 return emulate_cp15(vcpu, &params); 328 return emulate_cp15(vcpu, &params);
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
347{ 347{
348 struct coproc_params params; 348 struct coproc_params params;
349 349
350 params.CRm = (vcpu->arch.hsr >> 1) & 0xf; 350 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
351 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; 351 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
352 params.is_write = ((vcpu->arch.hsr & 1) == 0); 352 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
353 params.is_64bit = false; 353 params.is_64bit = false;
354 354
355 params.CRn = (vcpu->arch.hsr >> 10) & 0xf; 355 params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
356 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; 356 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
357 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; 357 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
358 params.Rt2 = 0; 358 params.Rt2 = 0;
359 359
360 return emulate_cp15(vcpu, &params); 360 return emulate_cp15(vcpu, &params);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 992adfafa2ff..b7301d3e4799 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
84static inline bool write_to_read_only(struct kvm_vcpu *vcpu, 84static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
85 const struct coproc_params *params) 85 const struct coproc_params *params)
86{ 86{
87 kvm_debug("CP15 write to read-only register at: %08x\n", 87 kvm_debug("CP15 write to read-only register at: %08lx\n",
88 *vcpu_pc(vcpu)); 88 *vcpu_pc(vcpu));
89 print_cp_instr(params); 89 print_cp_instr(params);
90 return false; 90 return false;
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
93static inline bool read_from_write_only(struct kvm_vcpu *vcpu, 93static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
94 const struct coproc_params *params) 94 const struct coproc_params *params)
95{ 95{
96 kvm_debug("CP15 read to write-only register at: %08x\n", 96 kvm_debug("CP15 read to write-only register at: %08lx\n",
97 *vcpu_pc(vcpu)); 97 *vcpu_pc(vcpu));
98 print_cp_instr(params); 98 print_cp_instr(params);
99 return false; 99 return false;
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index d61450ac6665..bdede9e7da51 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -20,6 +20,7 @@
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h> 21#include <asm/kvm_arm.h>
22#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <asm/opcodes.h>
23#include <trace/events/kvm.h> 24#include <trace/events/kvm.h>
24 25
25#include "trace.h" 26#include "trace.h"
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
109 * Return a pointer to the register number valid in the current mode of 110 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU. 111 * the virtual CPU.
111 */ 112 */
112u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) 113unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
113{ 114{
114 u32 *reg_array = (u32 *)&vcpu->arch.regs; 115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
115 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; 116 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
116 117
117 switch (mode) { 118 switch (mode) {
118 case USR_MODE...SVC_MODE: 119 case USR_MODE...SVC_MODE:
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
141/* 142/*
142 * Return the SPSR for the current mode of the virtual CPU. 143 * Return the SPSR for the current mode of the virtual CPU.
143 */ 144 */
144u32 *vcpu_spsr(struct kvm_vcpu *vcpu) 145unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
145{ 146{
146 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; 147 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
147 switch (mode) { 148 switch (mode) {
148 case SVC_MODE: 149 case SVC_MODE:
149 return &vcpu->arch.regs.KVM_ARM_SVC_spsr; 150 return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
160 } 161 }
161} 162}
162 163
163/** 164/*
164 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest 165 * A conditional instruction is allowed to trap, even though it
165 * @vcpu: the vcpu pointer 166 * wouldn't be executed. So let's re-implement the hardware, in
166 * @run: the kvm_run structure pointer 167 * software!
167 *
168 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
169 * halt execution of world-switches and schedule other host processes until
170 * there is an incoming IRQ or FIQ to the VM.
171 */ 168 */
172int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) 169bool kvm_condition_valid(struct kvm_vcpu *vcpu)
173{ 170{
174 trace_kvm_wfi(*vcpu_pc(vcpu)); 171 unsigned long cpsr, cond, insn;
175 kvm_vcpu_block(vcpu); 172
176 return 1; 173 /*
174 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
175 * catch undefined instructions, and then we won't get past
176 * the arm_exit_handlers test anyway.
177 */
178 BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
179
180 /* Top two bits non-zero? Unconditional. */
181 if (kvm_vcpu_get_hsr(vcpu) >> 30)
182 return true;
183
184 cpsr = *vcpu_cpsr(vcpu);
185
186 /* Is condition field valid? */
187 if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
188 cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
189 else {
190 /* This can happen in Thumb mode: examine IT state. */
191 unsigned long it;
192
193 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
194
195 /* it == 0 => unconditional. */
196 if (it == 0)
197 return true;
198
199 /* The cond for this insn works out as the top 4 bits. */
200 cond = (it >> 4);
201 }
202
203 /* Shift makes it look like an ARM-mode instruction */
204 insn = cond << 28;
205 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
177} 206}
178 207
179/** 208/**
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
257 */ 286 */
258void kvm_inject_undefined(struct kvm_vcpu *vcpu) 287void kvm_inject_undefined(struct kvm_vcpu *vcpu)
259{ 288{
260 u32 new_lr_value; 289 unsigned long new_lr_value;
261 u32 new_spsr_value; 290 unsigned long new_spsr_value;
262 u32 cpsr = *vcpu_cpsr(vcpu); 291 unsigned long cpsr = *vcpu_cpsr(vcpu);
263 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 292 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
264 bool is_thumb = (cpsr & PSR_T_BIT); 293 bool is_thumb = (cpsr & PSR_T_BIT);
265 u32 vect_offset = 4; 294 u32 vect_offset = 4;
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
291 */ 320 */
292static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) 321static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
293{ 322{
294 u32 new_lr_value; 323 unsigned long new_lr_value;
295 u32 new_spsr_value; 324 unsigned long new_spsr_value;
296 u32 cpsr = *vcpu_cpsr(vcpu); 325 unsigned long cpsr = *vcpu_cpsr(vcpu);
297 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 326 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
298 bool is_thumb = (cpsr & PSR_T_BIT); 327 bool is_thumb = (cpsr & PSR_T_BIT);
299 u32 vect_offset; 328 u32 vect_offset;
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 2339d9609d36..152d03612181 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <asm/cputype.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include <asm/kvm.h> 27#include <asm/kvm.h>
27#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
180 return -EINVAL; 181 return -EINVAL;
181} 182}
182 183
184int __attribute_const__ kvm_target_cpu(void)
185{
186 unsigned long implementor = read_cpuid_implementor();
187 unsigned long part_number = read_cpuid_part_number();
188
189 if (implementor != ARM_CPU_IMP_ARM)
190 return -EINVAL;
191
192 switch (part_number) {
193 case ARM_CPU_PART_CORTEX_A15:
194 return KVM_ARM_TARGET_CORTEX_A15;
195 default:
196 return -EINVAL;
197 }
198}
199
183int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 200int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
184 const struct kvm_vcpu_init *init) 201 const struct kvm_vcpu_init *init)
185{ 202{
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
new file mode 100644
index 000000000000..3d74a0be47db
--- /dev/null
+++ b/arch/arm/kvm/handle_exit.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <asm/kvm_emulate.h>
22#include <asm/kvm_coproc.h>
23#include <asm/kvm_mmu.h>
24#include <asm/kvm_psci.h>
25#include <trace/events/kvm.h>
26
27#include "trace.h"
28
29#include "trace.h"
30
31typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
32
33static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
34{
35 /* SVC called from Hyp mode should never get here */
36 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
37 BUG();
38 return -EINVAL; /* Squash warning */
39}
40
41static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
42{
43 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
44 kvm_vcpu_hvc_get_imm(vcpu));
45
46 if (kvm_psci_call(vcpu))
47 return 1;
48
49 kvm_inject_undefined(vcpu);
50 return 1;
51}
52
53static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
54{
55 if (kvm_psci_call(vcpu))
56 return 1;
57
58 kvm_inject_undefined(vcpu);
59 return 1;
60}
61
62static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
63{
64 /* The hypervisor should never cause aborts */
65 kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
66 kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
67 return -EFAULT;
68}
69
70static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
71{
72 /* This is either an error in the ws. code or an external abort */
73 kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
74 kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
75 return -EFAULT;
76}
77
78/**
79 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
80 * @vcpu: the vcpu pointer
81 * @run: the kvm_run structure pointer
82 *
83 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
84 * halt execution of world-switches and schedule other host processes until
85 * there is an incoming IRQ or FIQ to the VM.
86 */
87static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
88{
89 trace_kvm_wfi(*vcpu_pc(vcpu));
90 kvm_vcpu_block(vcpu);
91 return 1;
92}
93
94static exit_handle_fn arm_exit_handlers[] = {
95 [HSR_EC_WFI] = kvm_handle_wfi,
96 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
97 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
98 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
100 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
103 [HSR_EC_SVC_HYP] = handle_svc_hyp,
104 [HSR_EC_HVC] = handle_hvc,
105 [HSR_EC_SMC] = handle_smc,
106 [HSR_EC_IABT] = kvm_handle_guest_abort,
107 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
108 [HSR_EC_DABT] = kvm_handle_guest_abort,
109 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
110};
111
112static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
113{
114 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
115
116 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
117 !arm_exit_handlers[hsr_ec]) {
118 kvm_err("Unknown exception class: hsr: %#08x\n",
119 (unsigned int)kvm_vcpu_get_hsr(vcpu));
120 BUG();
121 }
122
123 return arm_exit_handlers[hsr_ec];
124}
125
126/*
127 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
128 * proper exit to userspace.
129 */
130int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
131 int exception_index)
132{
133 exit_handle_fn exit_handler;
134
135 switch (exception_index) {
136 case ARM_EXCEPTION_IRQ:
137 return 1;
138 case ARM_EXCEPTION_UNDEFINED:
139 kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
140 kvm_vcpu_get_hyp_pc(vcpu));
141 BUG();
142 panic("KVM: Hypervisor undefined exception!\n");
143 case ARM_EXCEPTION_DATA_ABORT:
144 case ARM_EXCEPTION_PREF_ABORT:
145 case ARM_EXCEPTION_HVC:
146 /*
147 * See ARM ARM B1.14.1: "Hyp traps on instructions
148 * that fail their condition code check"
149 */
150 if (!kvm_condition_valid(vcpu)) {
151 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
152 return 1;
153 }
154
155 exit_handler = kvm_get_exit_handler(vcpu);
156
157 return exit_handler(vcpu, run);
158 default:
159 kvm_pr_unimpl("Unsupported exception type: %d",
160 exception_index);
161 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
162 return 0;
163 }
164}
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 8ca87ab0919d..f7793df62f58 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
35/******************************************************************** 35/********************************************************************
36 * Flush per-VMID TLBs 36 * Flush per-VMID TLBs
37 * 37 *
38 * void __kvm_tlb_flush_vmid(struct kvm *kvm); 38 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
39 * 39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs 40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7 41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations). If we come across a non-IS SMP implementation, we'll 42 * implementations). If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple 43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version. 44 * hardware assisted version.
45 *
46 * As v7 does not support flushing per IPA, just nuke the whole TLB
47 * instead, ignoring the ipa value.
45 */ 48 */
46ENTRY(__kvm_tlb_flush_vmid) 49ENTRY(__kvm_tlb_flush_vmid_ipa)
47 push {r2, r3} 50 push {r2, r3}
48 51
49 add r0, r0, #KVM_VTTBR 52 add r0, r0, #KVM_VTTBR
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
60 63
61 pop {r2, r3} 64 pop {r2, r3}
62 bx lr 65 bx lr
63ENDPROC(__kvm_tlb_flush_vmid) 66ENDPROC(__kvm_tlb_flush_vmid_ipa)
64 67
65/******************************************************************** 68/********************************************************************
66 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable 69 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)
235 * instruction is issued since all traps are disabled when running the host 238 * instruction is issued since all traps are disabled when running the host
236 * kernel as per the Hyp-mode initialization at boot time. 239 * kernel as per the Hyp-mode initialization at boot time.
237 * 240 *
238 * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc 241 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
239 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the 242 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
240 * host kernel) and they cause a trap to the vector page + offset 0xc when HVC 243 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
241 * instructions are called from within Hyp-mode. 244 * instructions are called from within Hyp-mode.
242 * 245 *
243 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): 246 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 98a870ff1a5c..72a12f2171b2 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -33,16 +33,16 @@
33 */ 33 */
34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) 34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
35{ 35{
36 __u32 *dest; 36 unsigned long *dest;
37 unsigned int len; 37 unsigned int len;
38 int mask; 38 int mask;
39 39
40 if (!run->mmio.is_write) { 40 if (!run->mmio.is_write) {
41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); 41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
42 memset(dest, 0, sizeof(int)); 42 *dest = 0;
43 43
44 len = run->mmio.len; 44 len = run->mmio.len;
45 if (len > 4) 45 if (len > sizeof(unsigned long))
46 return -EINVAL; 46 return -EINVAL;
47 47
48 memcpy(dest, run->mmio.data, len); 48 memcpy(dest, run->mmio.data, len);
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, 50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
51 *((u64 *)run->mmio.data)); 51 *((u64 *)run->mmio.data));
52 52
53 if (vcpu->arch.mmio_decode.sign_extend && len < 4) { 53 if (vcpu->arch.mmio_decode.sign_extend &&
54 len < sizeof(unsigned long)) {
54 mask = 1U << ((len * 8) - 1); 55 mask = 1U << ((len * 8) - 1);
55 *dest = (*dest ^ mask) - mask; 56 *dest = (*dest ^ mask) - mask;
56 } 57 }
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
65 unsigned long rt, len; 66 unsigned long rt, len;
66 bool is_write, sign_extend; 67 bool is_write, sign_extend;
67 68
68 if ((vcpu->arch.hsr >> 8) & 1) { 69 if (kvm_vcpu_dabt_isextabt(vcpu)) {
69 /* cache operation on I/O addr, tell guest unsupported */ 70 /* cache operation on I/O addr, tell guest unsupported */
70 kvm_inject_dabt(vcpu, vcpu->arch.hxfar); 71 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
71 return 1; 72 return 1;
72 } 73 }
73 74
74 if ((vcpu->arch.hsr >> 7) & 1) { 75 if (kvm_vcpu_dabt_iss1tw(vcpu)) {
75 /* page table accesses IO mem: tell guest to fix its TTBR */ 76 /* page table accesses IO mem: tell guest to fix its TTBR */
76 kvm_inject_dabt(vcpu, vcpu->arch.hxfar); 77 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
77 return 1; 78 return 1;
78 } 79 }
79 80
80 switch ((vcpu->arch.hsr >> 22) & 0x3) { 81 len = kvm_vcpu_dabt_get_as(vcpu);
81 case 0: 82 if (unlikely(len < 0))
82 len = 1; 83 return len;
83 break;
84 case 1:
85 len = 2;
86 break;
87 case 2:
88 len = 4;
89 break;
90 default:
91 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
92 return -EFAULT;
93 }
94 84
95 is_write = vcpu->arch.hsr & HSR_WNR; 85 is_write = kvm_vcpu_dabt_iswrite(vcpu);
96 sign_extend = vcpu->arch.hsr & HSR_SSE; 86 sign_extend = kvm_vcpu_dabt_issext(vcpu);
97 rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; 87 rt = kvm_vcpu_dabt_get_rd(vcpu);
98 88
99 if (kvm_vcpu_reg_is_pc(vcpu, rt)) { 89 if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
100 /* IO memory trying to read/write pc */ 90 /* IO memory trying to read/write pc */
101 kvm_inject_pabt(vcpu, vcpu->arch.hxfar); 91 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
102 return 1; 92 return 1;
103 } 93 }
104 94
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
112 * The MMIO instruction is emulated and should not be re-executed 102 * The MMIO instruction is emulated and should not be re-executed
113 * in the guest. 103 * in the guest.
114 */ 104 */
115 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); 105 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
116 return 0; 106 return 0;
117} 107}
118 108
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
130 * space do its magic. 120 * space do its magic.
131 */ 121 */
132 122
133 if (vcpu->arch.hsr & HSR_ISV) { 123 if (kvm_vcpu_dabt_isvalid(vcpu)) {
134 ret = decode_hsr(vcpu, fault_ipa, &mmio); 124 ret = decode_hsr(vcpu, fault_ipa, &mmio);
135 if (ret) 125 if (ret)
136 return ret; 126 return ret;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 99e07c7dd745..2f12e4056408 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -20,7 +20,6 @@
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <trace/events/kvm.h> 22#include <trace/events/kvm.h>
23#include <asm/idmap.h>
24#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
25#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
26#include <asm/kvm_arm.h> 25#include <asm/kvm_arm.h>
@@ -28,8 +27,6 @@
28#include <asm/kvm_mmio.h> 27#include <asm/kvm_mmio.h>
29#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
30#include <asm/kvm_emulate.h> 29#include <asm/kvm_emulate.h>
31#include <asm/mach/map.h>
32#include <trace/events/kvm.h>
33 30
34#include "trace.h" 31#include "trace.h"
35 32
@@ -37,19 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
37 34
38static DEFINE_MUTEX(kvm_hyp_pgd_mutex); 35static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39 36
40static void kvm_tlb_flush_vmid(struct kvm *kvm) 37static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
41{ 38{
42 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); 39 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
43}
44
45static void kvm_set_pte(pte_t *pte, pte_t new_pte)
46{
47 pte_val(*pte) = new_pte;
48 /*
49 * flush_pmd_entry just takes a void pointer and cleans the necessary
50 * cache entries, so we can reuse the function for ptes.
51 */
52 flush_pmd_entry(pte);
53} 40}
54 41
55static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 42static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr)
98 } 85 }
99} 86}
100 87
88static void free_hyp_pgd_entry(unsigned long addr)
89{
90 pgd_t *pgd;
91 pud_t *pud;
92 pmd_t *pmd;
93 unsigned long hyp_addr = KERN_TO_HYP(addr);
94
95 pgd = hyp_pgd + pgd_index(hyp_addr);
96 pud = pud_offset(pgd, hyp_addr);
97
98 if (pud_none(*pud))
99 return;
100 BUG_ON(pud_bad(*pud));
101
102 pmd = pmd_offset(pud, hyp_addr);
103 free_ptes(pmd, addr);
104 pmd_free(NULL, pmd);
105 pud_clear(pud);
106}
107
101/** 108/**
102 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables 109 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
103 * 110 *
104 * Assumes this is a page table used strictly in Hyp-mode and therefore contains 111 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
105 * only mappings in the kernel memory area, which is above PAGE_OFFSET. 112 * either mappings in the kernel memory area (above PAGE_OFFSET), or
113 * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
106 */ 114 */
107void free_hyp_pmds(void) 115void free_hyp_pmds(void)
108{ 116{
109 pgd_t *pgd;
110 pud_t *pud;
111 pmd_t *pmd;
112 unsigned long addr; 117 unsigned long addr;
113 118
114 mutex_lock(&kvm_hyp_pgd_mutex); 119 mutex_lock(&kvm_hyp_pgd_mutex);
115 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { 120 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
116 pgd = hyp_pgd + pgd_index(addr); 121 free_hyp_pgd_entry(addr);
117 pud = pud_offset(pgd, addr); 122 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
118 123 free_hyp_pgd_entry(addr);
119 if (pud_none(*pud))
120 continue;
121 BUG_ON(pud_bad(*pud));
122
123 pmd = pmd_offset(pud, addr);
124 free_ptes(pmd, addr);
125 pmd_free(NULL, pmd);
126 pud_clear(pud);
127 }
128 mutex_unlock(&kvm_hyp_pgd_mutex); 124 mutex_unlock(&kvm_hyp_pgd_mutex);
129} 125}
130 126
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
136 struct page *page; 132 struct page *page;
137 133
138 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 134 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
139 pte = pte_offset_kernel(pmd, addr); 135 unsigned long hyp_addr = KERN_TO_HYP(addr);
136
137 pte = pte_offset_kernel(pmd, hyp_addr);
140 BUG_ON(!virt_addr_valid(addr)); 138 BUG_ON(!virt_addr_valid(addr));
141 page = virt_to_page(addr); 139 page = virt_to_page(addr);
142 kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); 140 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
151 unsigned long addr; 149 unsigned long addr;
152 150
153 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 151 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
154 pte = pte_offset_kernel(pmd, addr); 152 unsigned long hyp_addr = KERN_TO_HYP(addr);
153
154 pte = pte_offset_kernel(pmd, hyp_addr);
155 BUG_ON(pfn_valid(*pfn_base)); 155 BUG_ON(pfn_valid(*pfn_base));
156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); 156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
157 (*pfn_base)++; 157 (*pfn_base)++;
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
166 unsigned long addr, next; 166 unsigned long addr, next;
167 167
168 for (addr = start; addr < end; addr = next) { 168 for (addr = start; addr < end; addr = next) {
169 pmd = pmd_offset(pud, addr); 169 unsigned long hyp_addr = KERN_TO_HYP(addr);
170 pmd = pmd_offset(pud, hyp_addr);
170 171
171 BUG_ON(pmd_sect(*pmd)); 172 BUG_ON(pmd_sect(*pmd));
172 173
173 if (pmd_none(*pmd)) { 174 if (pmd_none(*pmd)) {
174 pte = pte_alloc_one_kernel(NULL, addr); 175 pte = pte_alloc_one_kernel(NULL, hyp_addr);
175 if (!pte) { 176 if (!pte) {
176 kvm_err("Cannot allocate Hyp pte\n"); 177 kvm_err("Cannot allocate Hyp pte\n");
177 return -ENOMEM; 178 return -ENOMEM;
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
206 unsigned long addr, next; 207 unsigned long addr, next;
207 int err = 0; 208 int err = 0;
208 209
209 BUG_ON(start > end); 210 if (start >= end)
210 if (start < PAGE_OFFSET) 211 return -EINVAL;
212 /* Check for a valid kernel memory mapping */
213 if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
214 return -EINVAL;
215 /* Check for a valid kernel IO mapping */
216 if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
211 return -EINVAL; 217 return -EINVAL;
212 218
213 mutex_lock(&kvm_hyp_pgd_mutex); 219 mutex_lock(&kvm_hyp_pgd_mutex);
214 for (addr = start; addr < end; addr = next) { 220 for (addr = start; addr < end; addr = next) {
215 pgd = hyp_pgd + pgd_index(addr); 221 unsigned long hyp_addr = KERN_TO_HYP(addr);
216 pud = pud_offset(pgd, addr); 222 pgd = hyp_pgd + pgd_index(hyp_addr);
223 pud = pud_offset(pgd, hyp_addr);
217 224
218 if (pud_none_or_clear_bad(pud)) { 225 if (pud_none_or_clear_bad(pud)) {
219 pmd = pmd_alloc_one(NULL, addr); 226 pmd = pmd_alloc_one(NULL, hyp_addr);
220 if (!pmd) { 227 if (!pmd) {
221 kvm_err("Cannot allocate Hyp pmd\n"); 228 kvm_err("Cannot allocate Hyp pmd\n");
222 err = -ENOMEM; 229 err = -ENOMEM;
@@ -236,12 +243,13 @@ out:
236} 243}
237 244
238/** 245/**
239 * create_hyp_mappings - map a kernel virtual address range in Hyp mode 246 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
240 * @from: The virtual kernel start address of the range 247 * @from: The virtual kernel start address of the range
241 * @to: The virtual kernel end address of the range (exclusive) 248 * @to: The virtual kernel end address of the range (exclusive)
242 * 249 *
243 * The same virtual address as the kernel virtual address is also used in 250 * The same virtual address as the kernel virtual address is also used
244 * Hyp-mode mapping to the same underlying physical pages. 251 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
252 * physical pages.
245 * 253 *
246 * Note: Wrapping around zero in the "to" address is not supported. 254 * Note: Wrapping around zero in the "to" address is not supported.
247 */ 255 */
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to)
251} 259}
252 260
253/** 261/**
254 * create_hyp_io_mappings - map a physical IO range in Hyp mode 262 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
255 * @from: The virtual HYP start address of the range 263 * @from: The kernel start VA of the range
256 * @to: The virtual HYP end address of the range (exclusive) 264 * @to: The kernel end VA of the range (exclusive)
257 * @addr: The physical start address which gets mapped 265 * @addr: The physical start address which gets mapped
266 *
267 * The resulting HYP VA is the same as the kernel VA, modulo
268 * HYP_PAGE_OFFSET.
258 */ 269 */
259int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) 270int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
260{ 271{
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
290 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); 301 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
291 302
292 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); 303 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
293 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); 304 kvm_clean_pgd(pgd);
294 kvm->arch.pgd = pgd; 305 kvm->arch.pgd = pgd;
295 306
296 return 0; 307 return 0;
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
422 return 0; /* ignore calls from kvm_set_spte_hva */ 433 return 0; /* ignore calls from kvm_set_spte_hva */
423 pmd = mmu_memory_cache_alloc(cache); 434 pmd = mmu_memory_cache_alloc(cache);
424 pud_populate(NULL, pud, pmd); 435 pud_populate(NULL, pud, pmd);
425 pmd += pmd_index(addr);
426 get_page(virt_to_page(pud)); 436 get_page(virt_to_page(pud));
427 } else 437 }
428 pmd = pmd_offset(pud, addr); 438
439 pmd = pmd_offset(pud, addr);
429 440
430 /* Create 2nd stage page table mapping - Level 2 */ 441 /* Create 2nd stage page table mapping - Level 2 */
431 if (pmd_none(*pmd)) { 442 if (pmd_none(*pmd)) {
432 if (!cache) 443 if (!cache)
433 return 0; /* ignore calls from kvm_set_spte_hva */ 444 return 0; /* ignore calls from kvm_set_spte_hva */
434 pte = mmu_memory_cache_alloc(cache); 445 pte = mmu_memory_cache_alloc(cache);
435 clean_pte_table(pte); 446 kvm_clean_pte(pte);
436 pmd_populate_kernel(NULL, pmd, pte); 447 pmd_populate_kernel(NULL, pmd, pte);
437 pte += pte_index(addr);
438 get_page(virt_to_page(pmd)); 448 get_page(virt_to_page(pmd));
439 } else 449 }
440 pte = pte_offset_kernel(pmd, addr); 450
451 pte = pte_offset_kernel(pmd, addr);
441 452
442 if (iomap && pte_present(*pte)) 453 if (iomap && pte_present(*pte))
443 return -EFAULT; 454 return -EFAULT;
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
446 old_pte = *pte; 457 old_pte = *pte;
447 kvm_set_pte(pte, *new_pte); 458 kvm_set_pte(pte, *new_pte);
448 if (pte_present(old_pte)) 459 if (pte_present(old_pte))
449 kvm_tlb_flush_vmid(kvm); 460 kvm_tlb_flush_vmid_ipa(kvm, addr);
450 else 461 else
451 get_page(virt_to_page(pte)); 462 get_page(virt_to_page(pte));
452 463
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
473 pfn = __phys_to_pfn(pa); 484 pfn = __phys_to_pfn(pa);
474 485
475 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { 486 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
476 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); 487 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
488 kvm_set_s2pte_writable(&pte);
477 489
478 ret = mmu_topup_memory_cache(&cache, 2, 2); 490 ret = mmu_topup_memory_cache(&cache, 2, 2);
479 if (ret) 491 if (ret)
@@ -492,29 +504,6 @@ out:
492 return ret; 504 return ret;
493} 505}
494 506
495static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
496{
497 /*
498 * If we are going to insert an instruction page and the icache is
499 * either VIPT or PIPT, there is a potential problem where the host
500 * (or another VM) may have used the same page as this guest, and we
501 * read incorrect data from the icache. If we're using a PIPT cache,
502 * we can invalidate just that page, but if we are using a VIPT cache
503 * we need to invalidate the entire icache - damn shame - as written
504 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
505 *
506 * VIVT caches are tagged using both the ASID and the VMID and doesn't
507 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
508 */
509 if (icache_is_pipt()) {
510 unsigned long hva = gfn_to_hva(kvm, gfn);
511 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
512 } else if (!icache_is_vivt_asid_tagged()) {
513 /* any kind of VIPT cache */
514 __flush_icache_all();
515 }
516}
517
518static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 507static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
519 gfn_t gfn, struct kvm_memory_slot *memslot, 508 gfn_t gfn, struct kvm_memory_slot *memslot,
520 unsigned long fault_status) 509 unsigned long fault_status)
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
526 unsigned long mmu_seq; 515 unsigned long mmu_seq;
527 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 516 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
528 517
529 write_fault = kvm_is_write_fault(vcpu->arch.hsr); 518 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
530 if (fault_status == FSC_PERM && !write_fault) { 519 if (fault_status == FSC_PERM && !write_fault) {
531 kvm_err("Unexpected L2 read permission error\n"); 520 kvm_err("Unexpected L2 read permission error\n");
532 return -EFAULT; 521 return -EFAULT;
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
560 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) 549 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
561 goto out_unlock; 550 goto out_unlock;
562 if (writable) { 551 if (writable) {
563 pte_val(new_pte) |= L_PTE_S2_RDWR; 552 kvm_set_s2pte_writable(&new_pte);
564 kvm_set_pfn_dirty(pfn); 553 kvm_set_pfn_dirty(pfn);
565 } 554 }
566 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); 555 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
@@ -585,7 +574,6 @@ out_unlock:
585 */ 574 */
586int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) 575int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
587{ 576{
588 unsigned long hsr_ec;
589 unsigned long fault_status; 577 unsigned long fault_status;
590 phys_addr_t fault_ipa; 578 phys_addr_t fault_ipa;
591 struct kvm_memory_slot *memslot; 579 struct kvm_memory_slot *memslot;
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
593 gfn_t gfn; 581 gfn_t gfn;
594 int ret, idx; 582 int ret, idx;
595 583
596 hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; 584 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
597 is_iabt = (hsr_ec == HSR_EC_IABT); 585 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
598 fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
599 586
600 trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, 587 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
601 vcpu->arch.hxfar, fault_ipa); 588 kvm_vcpu_get_hfar(vcpu), fault_ipa);
602 589
603 /* Check the stage-2 fault is trans. fault or write fault */ 590 /* Check the stage-2 fault is trans. fault or write fault */
604 fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); 591 fault_status = kvm_vcpu_trap_get_fault(vcpu);
605 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { 592 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
606 kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", 593 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
607 hsr_ec, fault_status); 594 kvm_vcpu_trap_get_class(vcpu), fault_status);
608 return -EFAULT; 595 return -EFAULT;
609 } 596 }
610 597
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
614 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { 601 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
615 if (is_iabt) { 602 if (is_iabt) {
616 /* Prefetch Abort on I/O address */ 603 /* Prefetch Abort on I/O address */
617 kvm_inject_pabt(vcpu, vcpu->arch.hxfar); 604 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
618 ret = 1; 605 ret = 1;
619 goto out_unlock; 606 goto out_unlock;
620 } 607 }
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
626 goto out_unlock; 613 goto out_unlock;
627 } 614 }
628 615
629 /* Adjust page offset */ 616 /*
630 fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; 617 * The IPA is reported as [MAX:12], so we need to
618 * complement it with the bottom 12 bits from the
619 * faulting VA. This is always 12 bits, irrespective
620 * of the page size.
621 */
622 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
631 ret = io_mem_abort(vcpu, run, fault_ipa); 623 ret = io_mem_abort(vcpu, run, fault_ipa);
632 goto out_unlock; 624 goto out_unlock;
633 } 625 }
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,
682static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) 674static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
683{ 675{
684 unmap_stage2_range(kvm, gpa, PAGE_SIZE); 676 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
685 kvm_tlb_flush_vmid(kvm); 677 kvm_tlb_flush_vmid_ipa(kvm, gpa);
686} 678}
687 679
688int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 680int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void)
776 pmd = pmd_offset(pud, addr); 768 pmd = pmd_offset(pud, addr);
777 769
778 pud_clear(pud); 770 pud_clear(pud);
779 clean_pmd_entry(pmd); 771 kvm_clean_pmd_entry(pmd);
780 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); 772 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
781 } while (pgd++, addr = next, addr < end); 773 } while (pgd++, addr = next, addr < end);
782} 774}
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index 0e4cfe123b38..17c5ac7d10ed 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -1477,7 +1477,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
1477 if (addr & ~KVM_PHYS_MASK) 1477 if (addr & ~KVM_PHYS_MASK)
1478 return -E2BIG; 1478 return -E2BIG;
1479 1479
1480 if (addr & ~PAGE_MASK) 1480 if (addr & (SZ_4K - 1))
1481 return -EINVAL; 1481 return -EINVAL;
1482 1482
1483 mutex_lock(&kvm->lock); 1483 mutex_lock(&kvm->lock);
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index c3f825b27947..af90cfa2f826 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -28,7 +28,6 @@ static inline void cpu_enter_lowpower_a9(void)
28{ 28{
29 unsigned int v; 29 unsigned int v;
30 30
31 flush_cache_all();
32 asm volatile( 31 asm volatile(
33 " mcr p15, 0, %1, c7, c5, 0\n" 32 " mcr p15, 0, %1, c7, c5, 0\n"
34 " mcr p15, 0, %1, c7, c10, 4\n" 33 " mcr p15, 0, %1, c7, c10, 4\n"
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index ab920e34bd0a..2517406e7f56 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -1252,7 +1252,7 @@ static void __init nuri_camera_init(void)
1252 } 1252 }
1253 1253
1254 m5mols_board_info.irq = s5p_register_gpio_interrupt(GPIO_CAM_8M_ISP_INT); 1254 m5mols_board_info.irq = s5p_register_gpio_interrupt(GPIO_CAM_8M_ISP_INT);
1255 if (!IS_ERR_VALUE(m5mols_board_info.irq)) 1255 if (m5mols_board_info.irq >= 0)
1256 s3c_gpio_cfgpin(GPIO_CAM_8M_ISP_INT, S3C_GPIO_SFN(0xF)); 1256 s3c_gpio_cfgpin(GPIO_CAM_8M_ISP_INT, S3C_GPIO_SFN(0xF));
1257 else 1257 else
1258 pr_err("%s: Failed to configure 8M_ISP_INT GPIO\n", __func__); 1258 pr_err("%s: Failed to configure 8M_ISP_INT GPIO\n", __func__);
diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c
index 890cae23c12a..a019e4e86e51 100644
--- a/arch/arm/mach-highbank/hotplug.c
+++ b/arch/arm/mach-highbank/hotplug.c
@@ -14,7 +14,6 @@
14 * this program. If not, see <http://www.gnu.org/licenses/>. 14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17
18#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
19 18
20#include "core.h" 19#include "core.h"
diff --git a/arch/arm/mach-imx/devices/devices.c b/arch/arm/mach-imx/devices/devices.c
index 1b37482407f9..1b4366a0e7c0 100644
--- a/arch/arm/mach-imx/devices/devices.c
+++ b/arch/arm/mach-imx/devices/devices.c
@@ -37,7 +37,7 @@ int __init mxc_device_init(void)
37 int ret; 37 int ret;
38 38
39 ret = device_register(&mxc_aips_bus); 39 ret = device_register(&mxc_aips_bus);
40 if (IS_ERR_VALUE(ret)) 40 if (ret < 0)
41 goto done; 41 goto done;
42 42
43 ret = device_register(&mxc_ahb_bus); 43 ret = device_register(&mxc_ahb_bus);
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 361a253e2b63..5e91112dcbee 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -11,7 +11,6 @@
11 */ 11 */
12 12
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <asm/cacheflush.h>
15#include <asm/cp15.h> 14#include <asm/cp15.h>
16 15
17#include "common.h" 16#include "common.h"
@@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void)
20{ 19{
21 unsigned int v; 20 unsigned int v;
22 21
23 flush_cache_all();
24 asm volatile( 22 asm volatile(
25 "mcr p15, 0, %1, c7, c5, 0\n" 23 "mcr p15, 0, %1, c7, c5, 0\n"
26 " mcr p15, 0, %1, c7, c10, 4\n" 24 " mcr p15, 0, %1, c7, c10, 4\n"
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index ea961445e0e9..b23c8e4f28e8 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -536,16 +536,14 @@ static void __init ap_init_of(void)
536 'A' + (ap_sc_id & 0x0f)); 536 'A' + (ap_sc_id & 0x0f));
537 537
538 soc_dev = soc_device_register(soc_dev_attr); 538 soc_dev = soc_device_register(soc_dev_attr);
539 if (IS_ERR_OR_NULL(soc_dev)) { 539 if (IS_ERR(soc_dev)) {
540 kfree(soc_dev_attr->revision); 540 kfree(soc_dev_attr->revision);
541 kfree(soc_dev_attr); 541 kfree(soc_dev_attr);
542 return; 542 return;
543 } 543 }
544 544
545 parent = soc_device_to_device(soc_dev); 545 parent = soc_device_to_device(soc_dev);
546 546 integrator_init_sysfs(parent, ap_sc_id);
547 if (!IS_ERR_OR_NULL(parent))
548 integrator_init_sysfs(parent, ap_sc_id);
549 547
550 of_platform_populate(root, of_default_bus_match_table, 548 of_platform_populate(root, of_default_bus_match_table,
551 ap_auxdata_lookup, parent); 549 ap_auxdata_lookup, parent);
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 2b0db82a5381..da1091be0887 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -360,17 +360,14 @@ static void __init intcp_init_of(void)
360 'A' + (intcp_sc_id & 0x0f)); 360 'A' + (intcp_sc_id & 0x0f));
361 361
362 soc_dev = soc_device_register(soc_dev_attr); 362 soc_dev = soc_device_register(soc_dev_attr);
363 if (IS_ERR_OR_NULL(soc_dev)) { 363 if (IS_ERR(soc_dev)) {
364 kfree(soc_dev_attr->revision); 364 kfree(soc_dev_attr->revision);
365 kfree(soc_dev_attr); 365 kfree(soc_dev_attr);
366 return; 366 return;
367 } 367 }
368 368
369 parent = soc_device_to_device(soc_dev); 369 parent = soc_device_to_device(soc_dev);
370 370 integrator_init_sysfs(parent, intcp_sc_id);
371 if (!IS_ERR_OR_NULL(parent))
372 integrator_init_sysfs(parent, intcp_sc_id);
373
374 of_platform_populate(root, of_default_bus_match_table, 371 of_platform_populate(root, of_default_bus_match_table,
375 intcp_auxdata_lookup, parent); 372 intcp_auxdata_lookup, parent);
376} 373}
diff --git a/arch/arm/mach-msm/hotplug.c b/arch/arm/mach-msm/hotplug.c
index 750446feb444..326a87261f9a 100644
--- a/arch/arm/mach-msm/hotplug.c
+++ b/arch/arm/mach-msm/hotplug.c
@@ -10,16 +10,12 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12 12
13#include <asm/cacheflush.h>
14#include <asm/smp_plat.h> 13#include <asm/smp_plat.h>
15 14
16#include "common.h" 15#include "common.h"
17 16
18static inline void cpu_enter_lowpower(void) 17static inline void cpu_enter_lowpower(void)
19{ 18{
20 /* Just flush the cache. Changing the coherency is not yet
21 * available on msm. */
22 flush_cache_all();
23} 19}
24 20
25static inline void cpu_leave_lowpower(void) 21static inline void cpu_leave_lowpower(void)
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 0ce91af753fa..fff141330a63 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -479,7 +479,7 @@ static int __init beagle_opp_init(void)
479 479
480 /* Initialize the omap3 opp table if not already created. */ 480 /* Initialize the omap3 opp table if not already created. */
481 r = omap3_opp_init(); 481 r = omap3_opp_init();
482 if (IS_ERR_VALUE(r) && (r != -EEXIST)) { 482 if (r < 0 && (r != -EEXIST)) {
483 pr_err("%s: opp default init failed\n", __func__); 483 pr_err("%s: opp default init failed\n", __func__);
484 return r; 484 return r;
485 } 485 }
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 8474c7d228ee..0c38ca96c840 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -611,7 +611,7 @@ int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
611 return -ENOENT; 611 return -ENOENT;
612 612
613 r = clk_set_rate(mpurate_ck, mpurate); 613 r = clk_set_rate(mpurate_ck, mpurate);
614 if (IS_ERR_VALUE(r)) { 614 if (r < 0) {
615 WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", 615 WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
616 mpurate_ck_name, mpurate, r); 616 mpurate_ck_name, mpurate, r);
617 clk_put(mpurate_ck); 617 clk_put(mpurate_ck);
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index fadd87435cd0..0d75889c0a6f 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -303,7 +303,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
303 t = omap2_onenand_calc_async_timings(); 303 t = omap2_onenand_calc_async_timings();
304 304
305 ret = gpmc_set_async_mode(gpmc_onenand_data->cs, &t); 305 ret = gpmc_set_async_mode(gpmc_onenand_data->cs, &t);
306 if (IS_ERR_VALUE(ret)) 306 if (ret < 0)
307 return ret; 307 return ret;
308 308
309 omap2_onenand_set_async_mode(onenand_base); 309 omap2_onenand_set_async_mode(onenand_base);
@@ -325,7 +325,7 @@ static int omap2_onenand_setup_sync(void __iomem *onenand_base, int *freq_ptr)
325 t = omap2_onenand_calc_sync_timings(gpmc_onenand_data, freq); 325 t = omap2_onenand_calc_sync_timings(gpmc_onenand_data, freq);
326 326
327 ret = gpmc_set_sync_mode(gpmc_onenand_data->cs, &t); 327 ret = gpmc_set_sync_mode(gpmc_onenand_data->cs, &t);
328 if (IS_ERR_VALUE(ret)) 328 if (ret < 0)
329 return ret; 329 return ret;
330 330
331 set_onenand_cfg(onenand_base); 331 set_onenand_cfg(onenand_base);
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 410e1bac7815..6de31739b45c 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -716,7 +716,7 @@ static int gpmc_setup_irq(void)
716 return -EINVAL; 716 return -EINVAL;
717 717
718 gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0); 718 gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
719 if (IS_ERR_VALUE(gpmc_irq_start)) { 719 if (gpmc_irq_start < 0) {
720 pr_err("irq_alloc_descs failed\n"); 720 pr_err("irq_alloc_descs failed\n");
721 return gpmc_irq_start; 721 return gpmc_irq_start;
722 } 722 }
@@ -801,7 +801,7 @@ static int gpmc_mem_init(void)
801 continue; 801 continue;
802 gpmc_cs_get_memconf(cs, &base, &size); 802 gpmc_cs_get_memconf(cs, &base, &size);
803 rc = gpmc_cs_insert_mem(cs, base, size); 803 rc = gpmc_cs_insert_mem(cs, base, size);
804 if (IS_ERR_VALUE(rc)) { 804 if (rc < 0) {
805 while (--cs >= 0) 805 while (--cs >= 0)
806 if (gpmc_cs_mem_enabled(cs)) 806 if (gpmc_cs_mem_enabled(cs))
807 gpmc_cs_delete_mem(cs); 807 gpmc_cs_delete_mem(cs);
@@ -1370,14 +1370,14 @@ static int gpmc_probe(struct platform_device *pdev)
1370 GPMC_REVISION_MINOR(l)); 1370 GPMC_REVISION_MINOR(l));
1371 1371
1372 rc = gpmc_mem_init(); 1372 rc = gpmc_mem_init();
1373 if (IS_ERR_VALUE(rc)) { 1373 if (rc < 0) {
1374 clk_disable_unprepare(gpmc_l3_clk); 1374 clk_disable_unprepare(gpmc_l3_clk);
1375 clk_put(gpmc_l3_clk); 1375 clk_put(gpmc_l3_clk);
1376 dev_err(gpmc_dev, "failed to reserve memory\n"); 1376 dev_err(gpmc_dev, "failed to reserve memory\n");
1377 return rc; 1377 return rc;
1378 } 1378 }
1379 1379
1380 if (IS_ERR_VALUE(gpmc_setup_irq())) 1380 if (gpmc_setup_irq() < 0)
1381 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); 1381 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
1382 1382
1383 /* Now the GPMC is initialised, unreserve the chip-selects */ 1383 /* Now the GPMC is initialised, unreserve the chip-selects */
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2fb17caa8683..0f4c18e6e60c 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -314,7 +314,7 @@ void __init omap3xxx_check_revision(void)
314 * If the processor type is Cortex-A8 and the revision is 0x0 314 * If the processor type is Cortex-A8 and the revision is 0x0
315 * it means its Cortex r0p0 which is 3430 ES1.0. 315 * it means its Cortex r0p0 which is 3430 ES1.0.
316 */ 316 */
317 cpuid = read_cpuid(CPUID_ID); 317 cpuid = read_cpuid_id();
318 if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { 318 if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {
319 omap_revision = OMAP3430_REV_ES1_0; 319 omap_revision = OMAP3430_REV_ES1_0;
320 cpu_rev = "1.0"; 320 cpu_rev = "1.0";
@@ -475,7 +475,7 @@ void __init omap4xxx_check_revision(void)
475 * Use ARM register to detect the correct ES version 475 * Use ARM register to detect the correct ES version
476 */ 476 */
477 if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) { 477 if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) {
478 idcode = read_cpuid(CPUID_ID); 478 idcode = read_cpuid_id();
479 rev = (idcode & 0xf) - 1; 479 rev = (idcode & 0xf) - 1;
480 } 480 }
481 481
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 61174b78dee6..2a551f997aea 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -174,7 +174,7 @@ static void __init omap4_smp_init_cpus(void)
174 unsigned int i = 0, ncores = 1, cpu_id; 174 unsigned int i = 0, ncores = 1, cpu_id;
175 175
176 /* Use ARM cpuid check here, as SoC detection will not work so early */ 176 /* Use ARM cpuid check here, as SoC detection will not work so early */
177 cpu_id = read_cpuid(CPUID_ID) & CPU_MASK; 177 cpu_id = read_cpuid_id() & CPU_MASK;
178 if (cpu_id == CPU_CORTEX_A9) { 178 if (cpu_id == CPU_CORTEX_A9) {
179 /* 179 /*
180 * Currently we can't call ioremap here because 180 * Currently we can't call ioremap here because
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 381be7ac0c17..eeea4fa28fbc 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -131,7 +131,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
131 int oh_cnt, i, ret = 0; 131 int oh_cnt, i, ret = 0;
132 132
133 oh_cnt = of_property_count_strings(node, "ti,hwmods"); 133 oh_cnt = of_property_count_strings(node, "ti,hwmods");
134 if (!oh_cnt || IS_ERR_VALUE(oh_cnt)) { 134 if (oh_cnt <= 0) {
135 dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n"); 135 dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n");
136 return -ENODEV; 136 return -ENODEV;
137 } 137 }
@@ -815,20 +815,17 @@ struct device *omap_device_get_by_hwmod_name(const char *oh_name)
815 } 815 }
816 816
817 oh = omap_hwmod_lookup(oh_name); 817 oh = omap_hwmod_lookup(oh_name);
818 if (IS_ERR_OR_NULL(oh)) { 818 if (!oh) {
819 WARN(1, "%s: no hwmod for %s\n", __func__, 819 WARN(1, "%s: no hwmod for %s\n", __func__,
820 oh_name); 820 oh_name);
821 return ERR_PTR(oh ? PTR_ERR(oh) : -ENODEV); 821 return ERR_PTR(-ENODEV);
822 } 822 }
823 if (IS_ERR_OR_NULL(oh->od)) { 823 if (!oh->od) {
824 WARN(1, "%s: no omap_device for %s\n", __func__, 824 WARN(1, "%s: no omap_device for %s\n", __func__,
825 oh_name); 825 oh_name);
826 return ERR_PTR(oh->od ? PTR_ERR(oh->od) : -ENODEV); 826 return ERR_PTR(-ENODEV);
827 } 827 }
828 828
829 if (IS_ERR_OR_NULL(oh->od->pdev))
830 return ERR_PTR(oh->od->pdev ? PTR_ERR(oh->od->pdev) : -ENODEV);
831
832 return &oh->od->pdev->dev; 829 return &oh->od->pdev->dev;
833} 830}
834 831
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 9553c9907d40..93f213b6a784 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1663,7 +1663,7 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
1663 return -ENOSYS; 1663 return -ENOSYS;
1664 1664
1665 ret = _lookup_hardreset(oh, name, &ohri); 1665 ret = _lookup_hardreset(oh, name, &ohri);
1666 if (IS_ERR_VALUE(ret)) 1666 if (ret < 0)
1667 return ret; 1667 return ret;
1668 1668
1669 if (oh->clkdm) { 1669 if (oh->clkdm) {
@@ -2413,7 +2413,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
2413 _init_mpu_rt_base(oh, NULL); 2413 _init_mpu_rt_base(oh, NULL);
2414 2414
2415 r = _init_clocks(oh, NULL); 2415 r = _init_clocks(oh, NULL);
2416 if (IS_ERR_VALUE(r)) { 2416 if (r < 0) {
2417 WARN(1, "omap_hwmod: %s: couldn't init clocks\n", oh->name); 2417 WARN(1, "omap_hwmod: %s: couldn't init clocks\n", oh->name);
2418 return -EINVAL; 2418 return -EINVAL;
2419 } 2419 }
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 1edd000a8143..0b339861d751 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -217,7 +217,7 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *dir)
217 return 0; 217 return 0;
218 218
219 d = debugfs_create_dir(pwrdm->name, (struct dentry *)dir); 219 d = debugfs_create_dir(pwrdm->name, (struct dentry *)dir);
220 if (!(IS_ERR_OR_NULL(d))) 220 if (d)
221 (void) debugfs_create_file("suspend", S_IRUGO|S_IWUSR, d, 221 (void) debugfs_create_file("suspend", S_IRUGO|S_IWUSR, d,
222 (void *)pwrdm, &pwrdm_suspend_fops); 222 (void *)pwrdm, &pwrdm_suspend_fops);
223 223
@@ -261,8 +261,8 @@ static int __init pm_dbg_init(void)
261 return 0; 261 return 0;
262 262
263 d = debugfs_create_dir("pm_debug", NULL); 263 d = debugfs_create_dir("pm_debug", NULL);
264 if (IS_ERR_OR_NULL(d)) 264 if (!d)
265 return PTR_ERR(d); 265 return -EINVAL;
266 266
267 (void) debugfs_create_file("count", S_IRUGO, 267 (void) debugfs_create_file("count", S_IRUGO,
268 d, (void *)DEBUG_FILE_COUNTERS, &debug_fops); 268 d, (void *)DEBUG_FILE_COUNTERS, &debug_fops);
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 89cad4a605dd..86babd740d41 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -1180,7 +1180,7 @@ bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm)
1180{ 1180{
1181 int i; 1181 int i;
1182 1182
1183 if (IS_ERR_OR_NULL(pwrdm)) { 1183 if (!pwrdm) {
1184 pr_debug("powerdomain: %s: invalid powerdomain pointer\n", 1184 pr_debug("powerdomain: %s: invalid powerdomain pointer\n",
1185 __func__); 1185 __func__);
1186 return 1; 1186 return 1;
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index ea6ea9aab092..63e6384fa72e 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -288,7 +288,7 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
288 r = -EINVAL; 288 r = -EINVAL;
289 } else { 289 } else {
290 r = clk_set_parent(timer->fclk, src); 290 r = clk_set_parent(timer->fclk, src);
291 if (IS_ERR_VALUE(r)) 291 if (r < 0)
292 pr_warn("%s: %s cannot set source\n", 292 pr_warn("%s: %s cannot set source\n",
293 __func__, oh->name); 293 __func__, oh->name);
294 clk_put(src); 294 clk_put(src);
diff --git a/arch/arm/mach-prima2/hotplug.c b/arch/arm/mach-prima2/hotplug.c
index f4b17cbababd..0ab2f8bae28e 100644
--- a/arch/arm/mach-prima2/hotplug.c
+++ b/arch/arm/mach-prima2/hotplug.c
@@ -10,13 +10,10 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12 12
13#include <asm/cacheflush.h>
14#include <asm/smp_plat.h> 13#include <asm/smp_plat.h>
15 14
16static inline void platform_do_lowpower(unsigned int cpu) 15static inline void platform_do_lowpower(unsigned int cpu)
17{ 16{
18 flush_cache_all();
19
20 /* we put the platform to just WFI */ 17 /* we put the platform to just WFI */
21 for (;;) { 18 for (;;) {
22 __asm__ __volatile__("dsb\n\t" "wfi\n\t" 19 __asm__ __volatile__("dsb\n\t" "wfi\n\t"
diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c
index 53818e5cd3ad..ac22dd41b135 100644
--- a/arch/arm/mach-realview/hotplug.c
+++ b/arch/arm/mach-realview/hotplug.c
@@ -12,7 +12,6 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14 14
15#include <asm/cacheflush.h>
16#include <asm/cp15.h> 15#include <asm/cp15.h>
17#include <asm/smp_plat.h> 16#include <asm/smp_plat.h>
18 17
@@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void)
20{ 19{
21 unsigned int v; 20 unsigned int v;
22 21
23 flush_cache_all();
24 asm volatile( 22 asm volatile(
25 " mcr p15, 0, %1, c7, c5, 0\n" 23 " mcr p15, 0, %1, c7, c5, 0\n"
26 " mcr p15, 0, %1, c7, c10, 4\n" 24 " mcr p15, 0, %1, c7, c10, 4\n"
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index bf79626ee5a4..496592b6c763 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -104,14 +104,6 @@ static int sh73a0_cpu_kill(unsigned int cpu)
104 104
105static void sh73a0_cpu_die(unsigned int cpu) 105static void sh73a0_cpu_die(unsigned int cpu)
106{ 106{
107 /*
108 * The ARM MPcore does not issue a cache coherency request for the L1
109 * cache when powering off single CPUs. We must take care of this and
110 * further caches.
111 */
112 dsb();
113 flush_cache_all();
114
115 /* Set power off mode. This takes the CPU out of the MP cluster */ 107 /* Set power off mode. This takes the CPU out of the MP cluster */
116 scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF); 108 scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF);
117 109
diff --git a/arch/arm/mach-spear/hotplug.c b/arch/arm/mach-spear/hotplug.c
index a7d2dd11a4f2..d97749c642ce 100644
--- a/arch/arm/mach-spear/hotplug.c
+++ b/arch/arm/mach-spear/hotplug.c
@@ -13,7 +13,6 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/smp.h> 15#include <linux/smp.h>
16#include <asm/cacheflush.h>
17#include <asm/cp15.h> 16#include <asm/cp15.h>
18#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
19 18
@@ -21,7 +20,6 @@ static inline void cpu_enter_lowpower(void)
21{ 20{
22 unsigned int v; 21 unsigned int v;
23 22
24 flush_cache_all();
25 asm volatile( 23 asm volatile(
26 " mcr p15, 0, %1, c7, c5, 0\n" 24 " mcr p15, 0, %1, c7, c5, 0\n"
27 " dsb\n" 25 " dsb\n"
diff --git a/arch/arm/mach-tegra/board-harmony-pcie.c b/arch/arm/mach-tegra/board-harmony-pcie.c
index d195db09ea32..035b240b9e15 100644
--- a/arch/arm/mach-tegra/board-harmony-pcie.c
+++ b/arch/arm/mach-tegra/board-harmony-pcie.c
@@ -56,9 +56,9 @@ int __init harmony_pcie_init(void)
56 gpio_direction_output(en_vdd_1v05, 1); 56 gpio_direction_output(en_vdd_1v05, 1);
57 57
58 regulator = regulator_get(NULL, "vdd_ldo0,vddio_pex_clk"); 58 regulator = regulator_get(NULL, "vdd_ldo0,vddio_pex_clk");
59 if (IS_ERR_OR_NULL(regulator)) { 59 if (IS_ERR(regulator)) {
60 pr_err("%s: regulator_get failed: %d\n", __func__, 60 err = PTR_ERR(regulator);
61 (int)PTR_ERR(regulator)); 61 pr_err("%s: regulator_get failed: %d\n", __func__, err);
62 goto err_reg; 62 goto err_reg;
63 } 63 }
64 64
diff --git a/arch/arm/mach-tegra/common.h b/arch/arm/mach-tegra/common.h
index 32f8eb3fe344..5900cc44f780 100644
--- a/arch/arm/mach-tegra/common.h
+++ b/arch/arm/mach-tegra/common.h
@@ -2,4 +2,3 @@ extern struct smp_operations tegra_smp_ops;
2 2
3extern int tegra_cpu_kill(unsigned int cpu); 3extern int tegra_cpu_kill(unsigned int cpu);
4extern void tegra_cpu_die(unsigned int cpu); 4extern void tegra_cpu_die(unsigned int cpu);
5extern int tegra_cpu_disable(unsigned int cpu);
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c
index 8da9f78475da..184914a68d73 100644
--- a/arch/arm/mach-tegra/hotplug.c
+++ b/arch/arm/mach-tegra/hotplug.c
@@ -11,7 +11,6 @@
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/clk/tegra.h> 12#include <linux/clk/tegra.h>
13 13
14#include <asm/cacheflush.h>
15#include <asm/smp_plat.h> 14#include <asm/smp_plat.h>
16 15
17#include "fuse.h" 16#include "fuse.h"
@@ -47,15 +46,6 @@ void __ref tegra_cpu_die(unsigned int cpu)
47 BUG(); 46 BUG();
48} 47}
49 48
50int tegra_cpu_disable(unsigned int cpu)
51{
52 /*
53 * we don't allow CPU 0 to be shutdown (it is still too special
54 * e.g. clock tick interrupts)
55 */
56 return cpu == 0 ? -EPERM : 0;
57}
58
59void __init tegra_hotplug_init(void) 49void __init tegra_hotplug_init(void)
60{ 50{
61 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 51 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 0c4963bd4b44..fad4226ef710 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -173,6 +173,5 @@ struct smp_operations tegra_smp_ops __initdata = {
173#ifdef CONFIG_HOTPLUG_CPU 173#ifdef CONFIG_HOTPLUG_CPU
174 .cpu_kill = tegra_cpu_kill, 174 .cpu_kill = tegra_cpu_kill,
175 .cpu_die = tegra_cpu_die, 175 .cpu_die = tegra_cpu_die,
176 .cpu_disable = tegra_cpu_disable,
177#endif 176#endif
178}; 177};
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c
index ce7ce42a1ac9..9e8bdfa2b369 100644
--- a/arch/arm/mach-tegra/tegra2_emc.c
+++ b/arch/arm/mach-tegra/tegra2_emc.c
@@ -276,7 +276,7 @@ static struct tegra_emc_pdata *tegra_emc_fill_pdata(struct platform_device *pdev
276 int i; 276 int i;
277 277
278 WARN_ON(pdev->dev.platform_data); 278 WARN_ON(pdev->dev.platform_data);
279 BUG_ON(IS_ERR_OR_NULL(c)); 279 BUG_ON(IS_ERR(c));
280 280
281 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 281 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
282 pdata->tables = devm_kzalloc(&pdev->dev, sizeof(*pdata->tables), 282 pdata->tables = devm_kzalloc(&pdev->dev, sizeof(*pdata->tables),
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 915e2636cbaa..b6145ea51641 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -149,14 +149,13 @@ struct device * __init ux500_soc_device_init(const char *soc_id)
149 soc_info_populate(soc_dev_attr, soc_id); 149 soc_info_populate(soc_dev_attr, soc_id);
150 150
151 soc_dev = soc_device_register(soc_dev_attr); 151 soc_dev = soc_device_register(soc_dev_attr);
152 if (IS_ERR_OR_NULL(soc_dev)) { 152 if (IS_ERR(soc_dev)) {
153 kfree(soc_dev_attr); 153 kfree(soc_dev_attr);
154 return NULL; 154 return NULL;
155 } 155 }
156 156
157 parent = soc_device_to_device(soc_dev); 157 parent = soc_device_to_device(soc_dev);
158 if (!IS_ERR_OR_NULL(parent)) 158 device_create_file(parent, &ux500_soc_attr);
159 device_create_file(parent, &ux500_soc_attr);
160 159
161 return parent; 160 return parent;
162} 161}
diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c
index 87abcf278432..2bc00b085e38 100644
--- a/arch/arm/mach-ux500/hotplug.c
+++ b/arch/arm/mach-ux500/hotplug.c
@@ -12,7 +12,6 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14 14
15#include <asm/cacheflush.h>
16#include <asm/smp_plat.h> 15#include <asm/smp_plat.h>
17 16
18#include "setup.h" 17#include "setup.h"
@@ -24,8 +23,6 @@
24 */ 23 */
25void __ref ux500_cpu_die(unsigned int cpu) 24void __ref ux500_cpu_die(unsigned int cpu)
26{ 25{
27 flush_cache_all();
28
29 /* directly enter low power state, skipping secure registers */ 26 /* directly enter low power state, skipping secure registers */
30 for (;;) { 27 for (;;) {
31 __asm__ __volatile__("dsb\n\t" "wfi\n\t" 28 __asm__ __volatile__("dsb\n\t" "wfi\n\t"
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c
index a141b98d84fe..f0ce6b8f5e71 100644
--- a/arch/arm/mach-vexpress/hotplug.c
+++ b/arch/arm/mach-vexpress/hotplug.c
@@ -12,7 +12,6 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14 14
15#include <asm/cacheflush.h>
16#include <asm/smp_plat.h> 15#include <asm/smp_plat.h>
17#include <asm/cp15.h> 16#include <asm/cp15.h>
18 17
@@ -20,7 +19,6 @@ static inline void cpu_enter_lowpower(void)
20{ 19{
21 unsigned int v; 20 unsigned int v;
22 21
23 flush_cache_all();
24 asm volatile( 22 asm volatile(
25 "mcr p15, 0, %1, c7, c5, 0\n" 23 "mcr p15, 0, %1, c7, c5, 0\n"
26 " mcr p15, 0, %1, c7, c10, 4\n" 24 " mcr p15, 0, %1, c7, c10, 4\n"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4045c4931a30..35955b54944c 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -397,6 +397,13 @@ config CPU_V7
397 select CPU_PABRT_V7 397 select CPU_PABRT_V7
398 select CPU_TLB_V7 if MMU 398 select CPU_TLB_V7 if MMU
399 399
400config CPU_THUMBONLY
401 bool
402 # There are no CPUs available with MMU that don't implement an ARM ISA:
403 depends on !MMU
404 help
405 Select this if your CPU doesn't support the 32 bit ARM instructions.
406
400# Figure out what processor architecture version we should be using. 407# Figure out what processor architecture version we should be using.
401# This defines the compiler instruction set which depends on the machine type. 408# This defines the compiler instruction set which depends on the machine type.
402config CPU_32v3 409config CPU_32v3
@@ -605,7 +612,7 @@ config ARCH_DMA_ADDR_T_64BIT
605 bool 612 bool
606 613
607config ARM_THUMB 614config ARM_THUMB
608 bool "Support Thumb user binaries" 615 bool "Support Thumb user binaries" if !CPU_THUMBONLY
609 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON 616 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
610 default y 617 default y
611 help 618 help
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index db26e2e543f4..6f4585b89078 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -961,12 +961,14 @@ static int __init alignment_init(void)
961 return -ENOMEM; 961 return -ENOMEM;
962#endif 962#endif
963 963
964#ifdef CONFIG_CPU_CP15
964 if (cpu_is_v6_unaligned()) { 965 if (cpu_is_v6_unaligned()) {
965 cr_alignment &= ~CR_A; 966 cr_alignment &= ~CR_A;
966 cr_no_alignment &= ~CR_A; 967 cr_no_alignment &= ~CR_A;
967 set_cr(cr_alignment); 968 set_cr(cr_alignment);
968 ai_usermode = safe_usermode(ai_usermode, false); 969 ai_usermode = safe_usermode(ai_usermode, false);
969 } 970 }
971#endif
970 972
971 hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, 973 hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
972 "alignment exception"); 974 "alignment exception");
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e9db6b4bf65a..ef3e0f3aac96 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -823,16 +823,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
823 if (PageHighMem(page)) { 823 if (PageHighMem(page)) {
824 if (len + offset > PAGE_SIZE) 824 if (len + offset > PAGE_SIZE)
825 len = PAGE_SIZE - offset; 825 len = PAGE_SIZE - offset;
826 vaddr = kmap_high_get(page); 826
827 if (vaddr) { 827 if (cache_is_vipt_nonaliasing()) {
828 vaddr += offset;
829 op(vaddr, len, dir);
830 kunmap_high(page);
831 } else if (cache_is_vipt()) {
832 /* unmapped pages might still be cached */
833 vaddr = kmap_atomic(page); 828 vaddr = kmap_atomic(page);
834 op(vaddr + offset, len, dir); 829 op(vaddr + offset, len, dir);
835 kunmap_atomic(vaddr); 830 kunmap_atomic(vaddr);
831 } else {
832 vaddr = kmap_high_get(page);
833 if (vaddr) {
834 op(vaddr + offset, len, dir);
835 kunmap_high(page);
836 }
836 } 837 }
837 } else { 838 } else {
838 vaddr = page_address(page) + offset; 839 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 1c8f7f564175..0d473cce501c 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -170,15 +170,18 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
170 if (!PageHighMem(page)) { 170 if (!PageHighMem(page)) {
171 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 171 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
172 } else { 172 } else {
173 void *addr = kmap_high_get(page); 173 void *addr;
174 if (addr) { 174
175 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 175 if (cache_is_vipt_nonaliasing()) {
176 kunmap_high(page);
177 } else if (cache_is_vipt()) {
178 /* unmapped pages might still be cached */
179 addr = kmap_atomic(page); 176 addr = kmap_atomic(page);
180 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 177 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
181 kunmap_atomic(addr); 178 kunmap_atomic(addr);
179 } else {
180 addr = kmap_high_get(page);
181 if (addr) {
182 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
183 kunmap_high(page);
184 }
182 } 185 }
183 } 186 }
184 187
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a84ff763ac39..e0d8565671a6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -113,6 +113,7 @@ static struct cachepolicy cache_policies[] __initdata = {
113 } 113 }
114}; 114};
115 115
116#ifdef CONFIG_CPU_CP15
116/* 117/*
117 * These are useful for identifying cache coherency 118 * These are useful for identifying cache coherency
118 * problems by allowing the cache or the cache and 119 * problems by allowing the cache or the cache and
@@ -211,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set)
211} 212}
212#endif 213#endif
213 214
215#else /* ifdef CONFIG_CPU_CP15 */
216
217static int __init early_cachepolicy(char *p)
218{
219 pr_warning("cachepolicy kernel parameter not supported without cp15\n");
220}
221early_param("cachepolicy", early_cachepolicy);
222
223static int __init noalign_setup(char *__unused)
224{
225 pr_warning("noalign kernel parameter not supported without cp15\n");
226}
227__setup("noalign", noalign_setup);
228
229#endif /* ifdef CONFIG_CPU_CP15 / else */
230
214#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN 231#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
215#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 232#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
216 233
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 5c07ee4fe3eb..919405e20b80 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle)
80 mov pc, lr 80 mov pc, lr
81 81
82ENTRY(cpu_v6_dcache_clean_area) 82ENTRY(cpu_v6_dcache_clean_area)
83#ifndef TLB_CAN_READ_FROM_L1_CACHE
841: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
85 add r0, r0, #D_CACHE_LINE_SIZE 84 add r0, r0, #D_CACHE_LINE_SIZE
86 subs r1, r1, #D_CACHE_LINE_SIZE 85 subs r1, r1, #D_CACHE_LINE_SIZE
87 bhi 1b 86 bhi 1b
88#endif
89 mov pc, lr 87 mov pc, lr
90 88
91/* 89/*
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 78f520bc0e99..9704097c450e 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext)
110 ARM( str r3, [r0, #2048]! ) 110 ARM( str r3, [r0, #2048]! )
111 THUMB( add r0, r0, #2048 ) 111 THUMB( add r0, r0, #2048 )
112 THUMB( str r3, [r0] ) 112 THUMB( str r3, [r0] )
113 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 113 ALT_SMP(mov pc,lr)
114 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
114#endif 115#endif
115 mov pc, lr 116 mov pc, lr
116ENDPROC(cpu_v7_set_pte_ext) 117ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 6ffd78c0f9ab..363027e811d6 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext)
73 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY 73 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
74 orreq r2, #L_PTE_RDONLY 74 orreq r2, #L_PTE_RDONLY
751: strd r2, r3, [r0] 751: strd r2, r3, [r0]
76 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 76 ALT_SMP(mov pc, lr)
77 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
77#endif 78#endif
78 mov pc, lr 79 mov pc, lr
79ENDPROC(cpu_v7_set_pte_ext) 80ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index f584d3f5b37c..2c73a7301ff7 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle)
75ENDPROC(cpu_v7_do_idle) 75ENDPROC(cpu_v7_do_idle)
76 76
77ENTRY(cpu_v7_dcache_clean_area) 77ENTRY(cpu_v7_dcache_clean_area)
78#ifndef TLB_CAN_READ_FROM_L1_CACHE 78 ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
79 ALT_UP(W(nop))
79 dcache_line_size r2, r3 80 dcache_line_size r2, r3
801: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 811: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
81 add r0, r0, r2 82 add r0, r0, r2
82 subs r1, r1, r2 83 subs r1, r1, r2
83 bhi 1b 84 bhi 1b
84 dsb 85 dsb
85#endif
86 mov pc, lr 86 mov pc, lr
87ENDPROC(cpu_v7_dcache_clean_area) 87ENDPROC(cpu_v7_dcache_clean_area)
88 88
@@ -402,6 +402,8 @@ __v7_ca9mp_proc_info:
402 __v7_proc __v7_ca9mp_setup 402 __v7_proc __v7_ca9mp_setup
403 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 403 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
404 404
405#endif /* CONFIG_ARM_LPAE */
406
405 /* 407 /*
406 * Marvell PJ4B processor. 408 * Marvell PJ4B processor.
407 */ 409 */
@@ -411,7 +413,6 @@ __v7_pj4b_proc_info:
411 .long 0xfffffff0 413 .long 0xfffffff0
412 __v7_proc __v7_pj4b_setup 414 __v7_proc __v7_pj4b_setup
413 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info 415 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
414#endif /* CONFIG_ARM_LPAE */
415 416
416 /* 417 /*
417 * ARM Ltd. Cortex A7 processor. 418 * ARM Ltd. Cortex A7 processor.
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index a0daa2fb5de6..e6dbc8dbe6a6 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -140,8 +140,7 @@ static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
140 */ 140 */
141 if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) { 141 if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
142 timer->fclk = clk_get(&timer->pdev->dev, "fck"); 142 timer->fclk = clk_get(&timer->pdev->dev, "fck");
143 if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) { 143 if (WARN_ON_ONCE(IS_ERR(timer->fclk))) {
144 timer->fclk = NULL;
145 dev_err(&timer->pdev->dev, ": No fclk handle.\n"); 144 dev_err(&timer->pdev->dev, ": No fclk handle.\n");
146 return -EINVAL; 145 return -EINVAL;
147 } 146 }
@@ -373,7 +372,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
373 372
374struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer) 373struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
375{ 374{
376 if (timer) 375 if (timer && !IS_ERR(timer->fclk))
377 return timer->fclk; 376 return timer->fclk;
378 return NULL; 377 return NULL;
379} 378}
@@ -482,7 +481,7 @@ int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
482 if (pdata && pdata->set_timer_src) 481 if (pdata && pdata->set_timer_src)
483 return pdata->set_timer_src(timer->pdev, source); 482 return pdata->set_timer_src(timer->pdev, source);
484 483
485 if (!timer->fclk) 484 if (IS_ERR(timer->fclk))
486 return -EINVAL; 485 return -EINVAL;
487 486
488 switch (source) { 487 switch (source) {
@@ -500,13 +499,13 @@ int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
500 } 499 }
501 500
502 parent = clk_get(&timer->pdev->dev, parent_name); 501 parent = clk_get(&timer->pdev->dev, parent_name);
503 if (IS_ERR_OR_NULL(parent)) { 502 if (IS_ERR(parent)) {
504 pr_err("%s: %s not found\n", __func__, parent_name); 503 pr_err("%s: %s not found\n", __func__, parent_name);
505 return -EINVAL; 504 return -EINVAL;
506 } 505 }
507 506
508 ret = clk_set_parent(timer->fclk, parent); 507 ret = clk_set_parent(timer->fclk, parent);
509 if (IS_ERR_VALUE(ret)) 508 if (ret < 0)
510 pr_err("%s: failed to set %s as parent\n", __func__, 509 pr_err("%s: failed to set %s as parent\n", __func__,
511 parent_name); 510 parent_name);
512 511
@@ -808,6 +807,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
808 return -ENOMEM; 807 return -ENOMEM;
809 } 808 }
810 809
810 timer->fclk = ERR_PTR(-ENODEV);
811 timer->io_base = devm_ioremap_resource(dev, mem); 811 timer->io_base = devm_ioremap_resource(dev, mem);
812 if (IS_ERR(timer->io_base)) 812 if (IS_ERR(timer->io_base))
813 return PTR_ERR(timer->io_base); 813 return PTR_ERR(timer->io_base);
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 831e1fdfdb2f..a10297da122b 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -16,7 +16,7 @@
16# are merged into mainline or have been edited in the machine database 16# are merged into mainline or have been edited in the machine database
17# within the last 12 months. References to machine_is_NAME() do not count! 17# within the last 12 months. References to machine_is_NAME() do not count!
18# 18#
19# Last update: Thu Apr 26 08:44:23 2012 19# Last update: Fri Mar 22 17:24:50 2013
20# 20#
21# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 21# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
22# 22#
@@ -64,8 +64,8 @@ h7201 ARCH_H7201 H7201 161
64h7202 ARCH_H7202 H7202 162 64h7202 ARCH_H7202 H7202 162
65iq80321 ARCH_IQ80321 IQ80321 169 65iq80321 ARCH_IQ80321 IQ80321 169
66ks8695 ARCH_KS8695 KS8695 180 66ks8695 ARCH_KS8695 KS8695 180
67karo ARCH_KARO KARO 190
68smdk2410 ARCH_SMDK2410 SMDK2410 193 67smdk2410 ARCH_SMDK2410 SMDK2410 193
68ceiva ARCH_CEIVA CEIVA 200
69voiceblue MACH_VOICEBLUE VOICEBLUE 218 69voiceblue MACH_VOICEBLUE VOICEBLUE 218
70h5400 ARCH_H5400 H5400 220 70h5400 ARCH_H5400 H5400 220
71omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 71omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234
@@ -95,6 +95,7 @@ lpd7a400 MACH_LPD7A400 LPD7A400 389
95lpd7a404 MACH_LPD7A404 LPD7A404 390 95lpd7a404 MACH_LPD7A404 LPD7A404 390
96csb337 MACH_CSB337 CSB337 399 96csb337 MACH_CSB337 CSB337 399
97mainstone MACH_MAINSTONE MAINSTONE 406 97mainstone MACH_MAINSTONE MAINSTONE 406
98lite300 MACH_LITE300 LITE300 408
98xcep MACH_XCEP XCEP 413 99xcep MACH_XCEP XCEP 413
99arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414 100arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414
100nomadik MACH_NOMADIK NOMADIK 420 101nomadik MACH_NOMADIK NOMADIK 420
@@ -131,12 +132,14 @@ kb9200 MACH_KB9200 KB9200 612
131sx1 MACH_SX1 SX1 613 132sx1 MACH_SX1 SX1 613
132ixdp465 MACH_IXDP465 IXDP465 618 133ixdp465 MACH_IXDP465 IXDP465 618
133ixdp2351 MACH_IXDP2351 IXDP2351 619 134ixdp2351 MACH_IXDP2351 IXDP2351 619
135cm4008 MACH_CM4008 CM4008 624
134iq80332 MACH_IQ80332 IQ80332 629 136iq80332 MACH_IQ80332 IQ80332 629
135gtwx5715 MACH_GTWX5715 GTWX5715 641 137gtwx5715 MACH_GTWX5715 GTWX5715 641
136csb637 MACH_CSB637 CSB637 648 138csb637 MACH_CSB637 CSB637 648
137n30 MACH_N30 N30 656 139n30 MACH_N30 N30 656
138nec_mp900 MACH_NEC_MP900 NEC_MP900 659 140nec_mp900 MACH_NEC_MP900 NEC_MP900 659
139kafa MACH_KAFA KAFA 662 141kafa MACH_KAFA KAFA 662
142cm41xx MACH_CM41XX CM41XX 672
140ts72xx MACH_TS72XX TS72XX 673 143ts72xx MACH_TS72XX TS72XX 673
141otom MACH_OTOM OTOM 680 144otom MACH_OTOM OTOM 680
142nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681 145nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681
@@ -149,6 +152,7 @@ colibri MACH_COLIBRI COLIBRI 729
149gateway7001 MACH_GATEWAY7001 GATEWAY7001 731 152gateway7001 MACH_GATEWAY7001 GATEWAY7001 731
150pcm027 MACH_PCM027 PCM027 732 153pcm027 MACH_PCM027 PCM027 732
151anubis MACH_ANUBIS ANUBIS 734 154anubis MACH_ANUBIS ANUBIS 734
155xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742
152akita MACH_AKITA AKITA 744 156akita MACH_AKITA AKITA 744
153e330 MACH_E330 E330 753 157e330 MACH_E330 E330 753
154nokia770 MACH_NOKIA770 NOKIA770 755 158nokia770 MACH_NOKIA770 NOKIA770 755
@@ -157,9 +161,11 @@ edb9315a MACH_EDB9315A EDB9315A 772
157stargate2 MACH_STARGATE2 STARGATE2 774 161stargate2 MACH_STARGATE2 STARGATE2 774
158intelmote2 MACH_INTELMOTE2 INTELMOTE2 775 162intelmote2 MACH_INTELMOTE2 INTELMOTE2 775
159trizeps4 MACH_TRIZEPS4 TRIZEPS4 776 163trizeps4 MACH_TRIZEPS4 TRIZEPS4 776
164pnx4008 MACH_PNX4008 PNX4008 782
160cpuat91 MACH_CPUAT91 CPUAT91 787 165cpuat91 MACH_CPUAT91 CPUAT91 787
161iq81340sc MACH_IQ81340SC IQ81340SC 799 166iq81340sc MACH_IQ81340SC IQ81340SC 799
162iq81340mc MACH_IQ81340MC IQ81340MC 801 167iq81340mc MACH_IQ81340MC IQ81340MC 801
168se4200 MACH_SE4200 SE4200 809
163micro9 MACH_MICRO9 MICRO9 811 169micro9 MACH_MICRO9 MICRO9 811
164micro9l MACH_MICRO9L MICRO9L 812 170micro9l MACH_MICRO9L MICRO9L 812
165omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817 171omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817
@@ -178,6 +184,7 @@ mx21ads MACH_MX21ADS MX21ADS 851
178ams_delta MACH_AMS_DELTA AMS_DELTA 862 184ams_delta MACH_AMS_DELTA AMS_DELTA 862
179nas100d MACH_NAS100D NAS100D 865 185nas100d MACH_NAS100D NAS100D 865
180magician MACH_MAGICIAN MAGICIAN 875 186magician MACH_MAGICIAN MAGICIAN 875
187cm4002 MACH_CM4002 CM4002 876
181nxdkn MACH_NXDKN NXDKN 880 188nxdkn MACH_NXDKN NXDKN 880
182palmtx MACH_PALMTX PALMTX 885 189palmtx MACH_PALMTX PALMTX 885
183s3c2413 MACH_S3C2413 S3C2413 887 190s3c2413 MACH_S3C2413 S3C2413 887
@@ -203,7 +210,6 @@ omap_fsample MACH_OMAP_FSAMPLE OMAP_FSAMPLE 970
203snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986 210snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986
204omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993 211omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993
205smdk2412 MACH_SMDK2412 SMDK2412 1009 212smdk2412 MACH_SMDK2412 SMDK2412 1009
206bkde303 MACH_BKDE303 BKDE303 1021
207smdk2413 MACH_SMDK2413 SMDK2413 1022 213smdk2413 MACH_SMDK2413 SMDK2413 1022
208aml_m5900 MACH_AML_M5900 AML_M5900 1024 214aml_m5900 MACH_AML_M5900 AML_M5900 1024
209balloon3 MACH_BALLOON3 BALLOON3 1029 215balloon3 MACH_BALLOON3 BALLOON3 1029
@@ -214,6 +220,7 @@ fsg MACH_FSG FSG 1091
214at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099 220at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099
215glantank MACH_GLANTANK GLANTANK 1100 221glantank MACH_GLANTANK GLANTANK 1100
216n2100 MACH_N2100 N2100 1101 222n2100 MACH_N2100 N2100 1101
223im42xx MACH_IM42XX IM42XX 1105
217qt2410 MACH_QT2410 QT2410 1108 224qt2410 MACH_QT2410 QT2410 1108
218kixrp435 MACH_KIXRP435 KIXRP435 1109 225kixrp435 MACH_KIXRP435 KIXRP435 1109
219cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114 226cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114
@@ -247,6 +254,7 @@ csb726 MACH_CSB726 CSB726 1359
247davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380 254davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380
248davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381 255davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381
249littleton MACH_LITTLETON LITTLETON 1388 256littleton MACH_LITTLETON LITTLETON 1388
257im4004 MACH_IM4004 IM4004 1400
250realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407 258realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407
251mx27_3ds MACH_MX27_3DS MX27_3DS 1430 259mx27_3ds MACH_MX27_3DS MX27_3DS 1430
252halibut MACH_HALIBUT HALIBUT 1439 260halibut MACH_HALIBUT HALIBUT 1439
@@ -268,6 +276,7 @@ dns323 MACH_DNS323 DNS323 1542
268omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546 276omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546
269nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548 277nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548
270pcm038 MACH_PCM038 PCM038 1551 278pcm038 MACH_PCM038 PCM038 1551
279sg310 MACH_SG310 SG310 1564
271ts209 MACH_TS209 TS209 1565 280ts209 MACH_TS209 TS209 1565
272at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566 281at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566
273mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574 282mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574
@@ -371,7 +380,6 @@ pcm043 MACH_PCM043 PCM043 2072
371sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097 380sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097
372avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104 381avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104
373mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125 382mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125
374tx37 MACH_TX37 TX37 2127
375rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135 383rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135
376dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138 384dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138
377ts219 MACH_TS219 TS219 2139 385ts219 MACH_TS219 TS219 2139
@@ -380,12 +388,12 @@ davinci_da850_evm MACH_DAVINCI_DA850_EVM DAVINCI_DA850_EVM 2157
380at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159 388at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159
381omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160 389omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160
382magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162 390magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162
383tx25 MACH_TX25 TX25 2177
384omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178 391omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178
385anw6410 MACH_ANW6410 ANW6410 2183 392anw6410 MACH_ANW6410 ANW6410 2183
386imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187 393imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187
387portuxg20 MACH_PORTUXG20 PORTUXG20 2191 394portuxg20 MACH_PORTUXG20 PORTUXG20 2191
388smdkc110 MACH_SMDKC110 SMDKC110 2193 395smdkc110 MACH_SMDKC110 SMDKC110 2193
396cabespresso MACH_CABESPRESSO CABESPRESSO 2194
389omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200 397omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200
390netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201 398netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201
391netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202 399netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202
@@ -404,6 +412,7 @@ bigdisk MACH_BIGDISK BIGDISK 2283
404at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288 412at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288
405bcmring MACH_BCMRING BCMRING 2289 413bcmring MACH_BCMRING BCMRING 2289
406mahimahi MACH_MAHIMAHI MAHIMAHI 2304 414mahimahi MACH_MAHIMAHI MAHIMAHI 2304
415cerebric MACH_CEREBRIC CEREBRIC 2311
407smdk6442 MACH_SMDK6442 SMDK6442 2324 416smdk6442 MACH_SMDK6442 SMDK6442 2324
408openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325 417openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325
409devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330 418devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330
@@ -423,10 +432,10 @@ raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413
423raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414 432raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414
424raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415 433raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415
425tnetv107x MACH_TNETV107X TNETV107X 2418 434tnetv107x MACH_TNETV107X TNETV107X 2418
426mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428
427smdkv210 MACH_SMDKV210 SMDKV210 2456 435smdkv210 MACH_SMDKV210 SMDKV210 2456
428omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464 436omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
429omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465 437omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
438cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466
430smartq7 MACH_SMARTQ7 SMARTQ7 2479 439smartq7 MACH_SMARTQ7 SMARTQ7 2479
431watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491 440watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491
432g4evm MACH_G4EVM G4EVM 2493 441g4evm MACH_G4EVM G4EVM 2493
@@ -434,12 +443,10 @@ omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
434ts41x MACH_TS41X TS41X 2502 443ts41x MACH_TS41X TS41X 2502
435phy3250 MACH_PHY3250 PHY3250 2511 444phy3250 MACH_PHY3250 PHY3250 2511
436mini6410 MACH_MINI6410 MINI6410 2520 445mini6410 MACH_MINI6410 MINI6410 2520
437tx51 MACH_TX51 TX51 2529
438mx28evk MACH_MX28EVK MX28EVK 2531 446mx28evk MACH_MX28EVK MX28EVK 2531
439smartq5 MACH_SMARTQ5 SMARTQ5 2534 447smartq5 MACH_SMARTQ5 SMARTQ5 2534
440davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548 448davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
441mxt_td60 MACH_MXT_TD60 MXT_TD60 2550 449mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
442pca101 MACH_PCA101 PCA101 2595
443capc7117 MACH_CAPC7117 CAPC7117 2612 450capc7117 MACH_CAPC7117 CAPC7117 2612
444icontrol MACH_ICONTROL ICONTROL 2624 451icontrol MACH_ICONTROL ICONTROL 2624
445gplugd MACH_GPLUGD GPLUGD 2625 452gplugd MACH_GPLUGD GPLUGD 2625
@@ -465,6 +472,7 @@ igep0030 MACH_IGEP0030 IGEP0030 2717
465sbc3530 MACH_SBC3530 SBC3530 2722 472sbc3530 MACH_SBC3530 SBC3530 2722
466saarb MACH_SAARB SAARB 2727 473saarb MACH_SAARB SAARB 2727
467harmony MACH_HARMONY HARMONY 2731 474harmony MACH_HARMONY HARMONY 2731
475cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733
468msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741 476msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741
469cm_t3517 MACH_CM_T3517 CM_T3517 2750 477cm_t3517 MACH_CM_T3517 CM_T3517 2750
470wbd222 MACH_WBD222 WBD222 2753 478wbd222 MACH_WBD222 WBD222 2753
@@ -480,10 +488,8 @@ eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
480eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822 488eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
481eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823 489eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
482smdkc210 MACH_SMDKC210 SMDKC210 2838 490smdkc210 MACH_SMDKC210 SMDKC210 2838
483pcaal1 MACH_PCAAL1 PCAAL1 2843
484t5325 MACH_T5325 T5325 2846 491t5325 MACH_T5325 T5325 2846
485income MACH_INCOME INCOME 2849 492income MACH_INCOME INCOME 2849
486mx257sx MACH_MX257SX MX257SX 2861
487goni MACH_GONI GONI 2862 493goni MACH_GONI GONI 2862
488bv07 MACH_BV07 BV07 2882 494bv07 MACH_BV07 BV07 2882
489openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884 495openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884
@@ -491,7 +497,6 @@ devixp MACH_DEVIXP DEVIXP 2885
491miccpt MACH_MICCPT MICCPT 2886 497miccpt MACH_MICCPT MICCPT 2886
492mic256 MACH_MIC256 MIC256 2887 498mic256 MACH_MIC256 MIC256 2887
493u5500 MACH_U5500 U5500 2890 499u5500 MACH_U5500 U5500 2890
494pov15hd MACH_POV15HD POV15HD 2910
495linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913 500linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913
496smdkv310 MACH_SMDKV310 SMDKV310 2925 501smdkv310 MACH_SMDKV310 SMDKV310 2925
497wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928 502wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928
@@ -518,7 +523,6 @@ prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103
518paz00 MACH_PAZ00 PAZ00 3128 523paz00 MACH_PAZ00 PAZ00 3128
519acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129 524acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129
520ag5evm MACH_AG5EVM AG5EVM 3189 525ag5evm MACH_AG5EVM AG5EVM 3189
521tsunagi MACH_TSUNAGI TSUNAGI 3197
522ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 526ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
523wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 527wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
524trimslice MACH_TRIMSLICE TRIMSLICE 3209 528trimslice MACH_TRIMSLICE TRIMSLICE 3209
@@ -529,8 +533,6 @@ msm8960_sim MACH_MSM8960_SIM MSM8960_SIM 3230
529msm8960_rumi3 MACH_MSM8960_RUMI3 MSM8960_RUMI3 3231 533msm8960_rumi3 MACH_MSM8960_RUMI3 MSM8960_RUMI3 3231
530gsia18s MACH_GSIA18S GSIA18S 3234 534gsia18s MACH_GSIA18S GSIA18S 3234
531mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 535mx53_loco MACH_MX53_LOCO MX53_LOCO 3273
532tx53 MACH_TX53 TX53 3279
533encore MACH_ENCORE ENCORE 3284
534wario MACH_WARIO WARIO 3288 536wario MACH_WARIO WARIO 3288
535cm_t3730 MACH_CM_T3730 CM_T3730 3290 537cm_t3730 MACH_CM_T3730 CM_T3730 3290
536hrefv60 MACH_HREFV60 HREFV60 3293 538hrefv60 MACH_HREFV60 HREFV60 3293
@@ -538,603 +540,24 @@ armlex4210 MACH_ARMLEX4210 ARMLEX4210 3361
538snowball MACH_SNOWBALL SNOWBALL 3363 540snowball MACH_SNOWBALL SNOWBALL 3363
539xilinx_ep107 MACH_XILINX_EP107 XILINX_EP107 3378 541xilinx_ep107 MACH_XILINX_EP107 XILINX_EP107 3378
540nuri MACH_NURI NURI 3379 542nuri MACH_NURI NURI 3379
541wtplug MACH_WTPLUG WTPLUG 3412
542veridis_a300 MACH_VERIDIS_A300 VERIDIS_A300 3448
543origen MACH_ORIGEN ORIGEN 3455 543origen MACH_ORIGEN ORIGEN 3455
544wm8650refboard MACH_WM8650REFBOARD WM8650REFBOARD 3472
545xarina MACH_XARINA XARINA 3476
546sdvr MACH_SDVR SDVR 3478
547acer_maya MACH_ACER_MAYA ACER_MAYA 3479
548pico MACH_PICO PICO 3480
549cwmx233 MACH_CWMX233 CWMX233 3481
550cwam1808 MACH_CWAM1808 CWAM1808 3482
551cwdm365 MACH_CWDM365 CWDM365 3483
552mx51_moray MACH_MX51_MORAY MX51_MORAY 3484
553thales_cbc MACH_THALES_CBC THALES_CBC 3485
554bluepoint MACH_BLUEPOINT BLUEPOINT 3486
555dir665 MACH_DIR665 DIR665 3487
556acmerover1 MACH_ACMEROVER1 ACMEROVER1 3488
557shooter_ct MACH_SHOOTER_CT SHOOTER_CT 3489
558bliss MACH_BLISS BLISS 3490
559blissc MACH_BLISSC BLISSC 3491
560thales_adc MACH_THALES_ADC THALES_ADC 3492
561ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
562atdgp318 MACH_ATDGP318 ATDGP318 3494
563dma210u MACH_DMA210U DMA210U 3495
564em_t3 MACH_EM_T3 EM_T3 3496
565htx3250 MACH_HTX3250 HTX3250 3497
566g50 MACH_G50 G50 3498
567eco5 MACH_ECO5 ECO5 3499
568wintergrasp MACH_WINTERGRASP WINTERGRASP 3500
569puro MACH_PURO PURO 3501
570shooter_k MACH_SHOOTER_K SHOOTER_K 3502
571nspire MACH_NSPIRE NSPIRE 3503 544nspire MACH_NSPIRE NSPIRE 3503
572mickxx MACH_MICKXX MICKXX 3504
573lxmb MACH_LXMB LXMB 3505
574adam MACH_ADAM ADAM 3507
575b1004 MACH_B1004 B1004 3508
576oboea MACH_OBOEA OBOEA 3509
577a1015 MACH_A1015 A1015 3510
578robin_vbdt30 MACH_ROBIN_VBDT30 ROBIN_VBDT30 3511
579tegra_enterprise MACH_TEGRA_ENTERPRISE TEGRA_ENTERPRISE 3512
580rfl108200_mk10 MACH_RFL108200_MK10 RFL108200_MK10 3513
581rfl108300_mk16 MACH_RFL108300_MK16 RFL108300_MK16 3514
582rover_v7 MACH_ROVER_V7 ROVER_V7 3515
583miphone MACH_MIPHONE MIPHONE 3516
584femtobts MACH_FEMTOBTS FEMTOBTS 3517
585monopoli MACH_MONOPOLI MONOPOLI 3518
586boss MACH_BOSS BOSS 3519
587davinci_dm368_vtam MACH_DAVINCI_DM368_VTAM DAVINCI_DM368_VTAM 3520
588clcon MACH_CLCON CLCON 3521
589nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522 545nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522
590tahiti MACH_TAHITI TAHITI 3523
591fighter MACH_FIGHTER FIGHTER 3524
592sgh_i710 MACH_SGH_I710 SGH_I710 3525
593integreproscb MACH_INTEGREPROSCB INTEGREPROSCB 3526
594monza MACH_MONZA MONZA 3527
595calimain MACH_CALIMAIN CALIMAIN 3528
596mx6q_sabreauto MACH_MX6Q_SABREAUTO MX6Q_SABREAUTO 3529
597gma01x MACH_GMA01X GMA01X 3530
598sbc51 MACH_SBC51 SBC51 3531
599fit MACH_FIT FIT 3532
600steelhead MACH_STEELHEAD STEELHEAD 3533
601panther MACH_PANTHER PANTHER 3534
602msm8960_liquid MACH_MSM8960_LIQUID MSM8960_LIQUID 3535
603lexikonct MACH_LEXIKONCT LEXIKONCT 3536
604ns2816_stb MACH_NS2816_STB NS2816_STB 3537
605sei_mm2_lpc3250 MACH_SEI_MM2_LPC3250 SEI_MM2_LPC3250 3538
606cmimx53 MACH_CMIMX53 CMIMX53 3539
607sandwich MACH_SANDWICH SANDWICH 3540
608chief MACH_CHIEF CHIEF 3541
609pogo_e02 MACH_POGO_E02 POGO_E02 3542
610mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543 546mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543
611htcmozart MACH_HTCMOZART HTCMOZART 3544
612htcgold MACH_HTCGOLD HTCGOLD 3545
613mt72xx MACH_MT72XX MT72XX 3546
614mx51_ivy MACH_MX51_IVY MX51_IVY 3547
615mx51_lvd MACH_MX51_LVD MX51_LVD 3548
616omap3_wiser2 MACH_OMAP3_WISER2 OMAP3_WISER2 3549
617dreamplug MACH_DREAMPLUG DREAMPLUG 3550
618cobas_c_111 MACH_COBAS_C_111 COBAS_C_111 3551
619cobas_u_411 MACH_COBAS_U_411 COBAS_U_411 3552
620hssd MACH_HSSD HSSD 3553
621iom35x MACH_IOM35X IOM35X 3554
622psom_omap MACH_PSOM_OMAP PSOM_OMAP 3555
623iphone_2g MACH_IPHONE_2G IPHONE_2G 3556
624iphone_3g MACH_IPHONE_3G IPHONE_3G 3557
625ipod_touch_1g MACH_IPOD_TOUCH_1G IPOD_TOUCH_1G 3558
626pharos_tpc MACH_PHAROS_TPC PHAROS_TPC 3559
627mx53_hydra MACH_MX53_HYDRA MX53_HYDRA 3560
628ns2816_dev_board MACH_NS2816_DEV_BOARD NS2816_DEV_BOARD 3561
629iphone_3gs MACH_IPHONE_3GS IPHONE_3GS 3562
630iphone_4 MACH_IPHONE_4 IPHONE_4 3563
631ipod_touch_4g MACH_IPOD_TOUCH_4G IPOD_TOUCH_4G 3564
632dragon_e1100 MACH_DRAGON_E1100 DRAGON_E1100 3565
633topside MACH_TOPSIDE TOPSIDE 3566
634irisiii MACH_IRISIII IRISIII 3567
635deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568 547deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568
636eti_d1 MACH_ETI_D1 ETI_D1 3569
637som3530sdk MACH_SOM3530SDK SOM3530SDK 3570
638oc_engine MACH_OC_ENGINE OC_ENGINE 3571
639apq8064_sim MACH_APQ8064_SIM APQ8064_SIM 3572
640alps MACH_ALPS ALPS 3575
641tny_t3730 MACH_TNY_T3730 TNY_T3730 3576
642geryon_nfe MACH_GERYON_NFE GERYON_NFE 3577
643ns2816_ref_board MACH_NS2816_REF_BOARD NS2816_REF_BOARD 3578
644silverstone MACH_SILVERSTONE SILVERSTONE 3579
645mtt2440 MACH_MTT2440 MTT2440 3580
646ynicdb MACH_YNICDB YNICDB 3581
647bct MACH_BCT BCT 3582
648tuscan MACH_TUSCAN TUSCAN 3583
649xbt_sam9g45 MACH_XBT_SAM9G45 XBT_SAM9G45 3584
650enbw_cmc MACH_ENBW_CMC ENBW_CMC 3585
651ch104mx257 MACH_CH104MX257 CH104MX257 3587
652openpri MACH_OPENPRI OPENPRI 3588
653am335xevm MACH_AM335XEVM AM335XEVM 3589
654picodmb MACH_PICODMB PICODMB 3590
655waluigi MACH_WALUIGI WALUIGI 3591
656punicag7 MACH_PUNICAG7 PUNICAG7 3592
657ipad_1g MACH_IPAD_1G IPAD_1G 3593
658appletv_2g MACH_APPLETV_2G APPLETV_2G 3594
659mach_ecog45 MACH_MACH_ECOG45 MACH_ECOG45 3595
660ait_cam_enc_4xx MACH_AIT_CAM_ENC_4XX AIT_CAM_ENC_4XX 3596
661runnymede MACH_RUNNYMEDE RUNNYMEDE 3597
662play MACH_PLAY PLAY 3598
663hw90260 MACH_HW90260 HW90260 3599
664tagh MACH_TAGH TAGH 3600
665filbert MACH_FILBERT FILBERT 3601
666getinge_netcomv3 MACH_GETINGE_NETCOMV3 GETINGE_NETCOMV3 3602
667cw20 MACH_CW20 CW20 3603
668cinema MACH_CINEMA CINEMA 3604
669cinema_tea MACH_CINEMA_TEA CINEMA_TEA 3605
670cinema_coffee MACH_CINEMA_COFFEE CINEMA_COFFEE 3606
671cinema_juice MACH_CINEMA_JUICE CINEMA_JUICE 3607
672mx53_mirage2 MACH_MX53_MIRAGE2 MX53_MIRAGE2 3609
673mx53_efikasb MACH_MX53_EFIKASB MX53_EFIKASB 3610
674stm_b2000 MACH_STM_B2000 STM_B2000 3612
675m28evk MACH_M28EVK M28EVK 3613 548m28evk MACH_M28EVK M28EVK 3613
676pda MACH_PDA PDA 3614
677meraki_mr58 MACH_MERAKI_MR58 MERAKI_MR58 3615
678kota2 MACH_KOTA2 KOTA2 3616 549kota2 MACH_KOTA2 KOTA2 3616
679letcool MACH_LETCOOL LETCOOL 3617
680mx27iat MACH_MX27IAT MX27IAT 3618
681apollo_td MACH_APOLLO_TD APOLLO_TD 3619
682arena MACH_ARENA ARENA 3620
683gsngateway MACH_GSNGATEWAY GSNGATEWAY 3621
684lf2000 MACH_LF2000 LF2000 3622
685bonito MACH_BONITO BONITO 3623 550bonito MACH_BONITO BONITO 3623
686asymptote MACH_ASYMPTOTE ASYMPTOTE 3624
687bst2brd MACH_BST2BRD BST2BRD 3625
688tx335s MACH_TX335S TX335S 3626
689pelco_tesla MACH_PELCO_TESLA PELCO_TESLA 3627
690rrhtestplat MACH_RRHTESTPLAT RRHTESTPLAT 3628
691vidtonic_pro MACH_VIDTONIC_PRO VIDTONIC_PRO 3629
692pl_apollo MACH_PL_APOLLO PL_APOLLO 3630
693pl_phoenix MACH_PL_PHOENIX PL_PHOENIX 3631
694m28cu3 MACH_M28CU3 M28CU3 3632
695vvbox_hd MACH_VVBOX_HD VVBOX_HD 3633
696coreware_sam9260_ MACH_COREWARE_SAM9260_ COREWARE_SAM9260_ 3634
697marmaduke MACH_MARMADUKE MARMADUKE 3635
698amg_xlcore_camera MACH_AMG_XLCORE_CAMERA AMG_XLCORE_CAMERA 3636
699omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637 551omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637
700smdk4212 MACH_SMDK4212 SMDK4212 3638 552smdk4212 MACH_SMDK4212 SMDK4212 3638
701dnp9200 MACH_DNP9200 DNP9200 3639
702tf101 MACH_TF101 TF101 3640
703omap3silvio MACH_OMAP3SILVIO OMAP3SILVIO 3641
704picasso2 MACH_PICASSO2 PICASSO2 3642
705vangogh2 MACH_VANGOGH2 VANGOGH2 3643
706olpc_xo_1_75 MACH_OLPC_XO_1_75 OLPC_XO_1_75 3644
707gx400 MACH_GX400 GX400 3645
708gs300 MACH_GS300 GS300 3646
709acer_a9 MACH_ACER_A9 ACER_A9 3647
710vivow_evm MACH_VIVOW_EVM VIVOW_EVM 3648
711veloce_cxq MACH_VELOCE_CXQ VELOCE_CXQ 3649
712veloce_cxm MACH_VELOCE_CXM VELOCE_CXM 3650
713p1852 MACH_P1852 P1852 3651
714naxy100 MACH_NAXY100 NAXY100 3652
715taishan MACH_TAISHAN TAISHAN 3653
716touchlink MACH_TOUCHLINK TOUCHLINK 3654
717stm32f103ze MACH_STM32F103ZE STM32F103ZE 3655
718mcx MACH_MCX MCX 3656
719stm_nmhdk_fli7610 MACH_STM_NMHDK_FLI7610 STM_NMHDK_FLI7610 3657
720top28x MACH_TOP28X TOP28X 3658
721okl4vp_microvisor MACH_OKL4VP_MICROVISOR OKL4VP_MICROVISOR 3659
722pop MACH_POP POP 3660
723layer MACH_LAYER LAYER 3661
724trondheim MACH_TRONDHEIM TRONDHEIM 3662
725eva MACH_EVA EVA 3663
726trust_taurus MACH_TRUST_TAURUS TRUST_TAURUS 3664
727ns2816_huashan MACH_NS2816_HUASHAN NS2816_HUASHAN 3665
728ns2816_yangcheng MACH_NS2816_YANGCHENG NS2816_YANGCHENG 3666
729p852 MACH_P852 P852 3667
730flea3 MACH_FLEA3 FLEA3 3668
731bowfin MACH_BOWFIN BOWFIN 3669
732mv88de3100 MACH_MV88DE3100 MV88DE3100 3670
733pia_am35x MACH_PIA_AM35X PIA_AM35X 3671
734cedar MACH_CEDAR CEDAR 3672
735picasso_e MACH_PICASSO_E PICASSO_E 3673
736samsung_e60 MACH_SAMSUNG_E60 SAMSUNG_E60 3674
737sdvr_mini MACH_SDVR_MINI SDVR_MINI 3676
738omap3_ij3k MACH_OMAP3_IJ3K OMAP3_IJ3K 3677
739modasmc1 MACH_MODASMC1 MODASMC1 3678
740apq8064_rumi3 MACH_APQ8064_RUMI3 APQ8064_RUMI3 3679
741matrix506 MACH_MATRIX506 MATRIX506 3680
742msm9615_mtp MACH_MSM9615_MTP MSM9615_MTP 3681
743dm36x_spawndc MACH_DM36X_SPAWNDC DM36X_SPAWNDC 3682
744sff792 MACH_SFF792 SFF792 3683
745am335xiaevm MACH_AM335XIAEVM AM335XIAEVM 3684
746g3c2440 MACH_G3C2440 G3C2440 3685
747tion270 MACH_TION270 TION270 3686
748w22q7arm02 MACH_W22Q7ARM02 W22Q7ARM02 3687
749omap_cat MACH_OMAP_CAT OMAP_CAT 3688
750at91sam9n12ek MACH_AT91SAM9N12EK AT91SAM9N12EK 3689
751morrison MACH_MORRISON MORRISON 3690
752svdu MACH_SVDU SVDU 3691
753lpp01 MACH_LPP01 LPP01 3692
754ubc283 MACH_UBC283 UBC283 3693
755zeppelin MACH_ZEPPELIN ZEPPELIN 3694
756motus MACH_MOTUS MOTUS 3695
757neomainboard MACH_NEOMAINBOARD NEOMAINBOARD 3696
758devkit3250 MACH_DEVKIT3250 DEVKIT3250 3697
759devkit7000 MACH_DEVKIT7000 DEVKIT7000 3698
760fmc_uic MACH_FMC_UIC FMC_UIC 3699
761fmc_dcm MACH_FMC_DCM FMC_DCM 3700
762batwm MACH_BATWM BATWM 3701
763atlas6cb MACH_ATLAS6CB ATLAS6CB 3702
764blue MACH_BLUE BLUE 3705
765colorado MACH_COLORADO COLORADO 3706
766popc MACH_POPC POPC 3707
767promwad_jade MACH_PROMWAD_JADE PROMWAD_JADE 3708
768amp MACH_AMP AMP 3709
769gnet_amp MACH_GNET_AMP GNET_AMP 3710
770toques MACH_TOQUES TOQUES 3711
771apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712 553apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712
772dct_storm MACH_DCT_STORM DCT_STORM 3713
773owl MACH_OWL OWL 3715
774cogent_csb1741 MACH_COGENT_CSB1741 COGENT_CSB1741 3716
775adillustra610 MACH_ADILLUSTRA610 ADILLUSTRA610 3718
776ecafe_na04 MACH_ECAFE_NA04 ECAFE_NA04 3719
777popct MACH_POPCT POPCT 3720
778omap3_helena MACH_OMAP3_HELENA OMAP3_HELENA 3721
779ach MACH_ACH ACH 3722
780module_dtb MACH_MODULE_DTB MODULE_DTB 3723
781oslo_elisabeth MACH_OSLO_ELISABETH OSLO_ELISABETH 3725
782tt01 MACH_TT01 TT01 3726
783msm8930_cdp MACH_MSM8930_CDP MSM8930_CDP 3727
784msm8930_mtp MACH_MSM8930_MTP MSM8930_MTP 3728
785msm8930_fluid MACH_MSM8930_FLUID MSM8930_FLUID 3729
786ltu11 MACH_LTU11 LTU11 3730
787am1808_spawnco MACH_AM1808_SPAWNCO AM1808_SPAWNCO 3731
788flx6410 MACH_FLX6410 FLX6410 3732
789mx6q_qsb MACH_MX6Q_QSB MX6Q_QSB 3733
790mx53_plt424 MACH_MX53_PLT424 MX53_PLT424 3734
791jasmine MACH_JASMINE JASMINE 3735
792l138_owlboard_plus MACH_L138_OWLBOARD_PLUS L138_OWLBOARD_PLUS 3736
793wr21 MACH_WR21 WR21 3737
794peaboy MACH_PEABOY PEABOY 3739
795mx28_plato MACH_MX28_PLATO MX28_PLATO 3740
796kacom2 MACH_KACOM2 KACOM2 3741
797slco MACH_SLCO SLCO 3742
798imx51pico MACH_IMX51PICO IMX51PICO 3743
799glink1 MACH_GLINK1 GLINK1 3744
800diamond MACH_DIAMOND DIAMOND 3745
801d9000 MACH_D9000 D9000 3746
802w5300e01 MACH_W5300E01 W5300E01 3747
803im6000 MACH_IM6000 IM6000 3748
804mx51_fred51 MACH_MX51_FRED51 MX51_FRED51 3749
805stm32f2 MACH_STM32F2 STM32F2 3750
806ville MACH_VILLE VILLE 3751
807ptip_murnau MACH_PTIP_MURNAU PTIP_MURNAU 3752
808ptip_classic MACH_PTIP_CLASSIC PTIP_CLASSIC 3753
809mx53grb MACH_MX53GRB MX53GRB 3754
810gagarin MACH_GAGARIN GAGARIN 3755
811nas2big MACH_NAS2BIG NAS2BIG 3757
812superfemto MACH_SUPERFEMTO SUPERFEMTO 3758
813teufel MACH_TEUFEL TEUFEL 3759
814dinara MACH_DINARA DINARA 3760
815vanquish MACH_VANQUISH VANQUISH 3761
816zipabox1 MACH_ZIPABOX1 ZIPABOX1 3762
817u9540 MACH_U9540 U9540 3763
818jet MACH_JET JET 3764
819smdk4412 MACH_SMDK4412 SMDK4412 3765 554smdk4412 MACH_SMDK4412 SMDK4412 3765
820elite MACH_ELITE ELITE 3766
821spear320_hmi MACH_SPEAR320_HMI SPEAR320_HMI 3767
822ontario MACH_ONTARIO ONTARIO 3768
823mx6q_sabrelite MACH_MX6Q_SABRELITE MX6Q_SABRELITE 3769
824vc200 MACH_VC200 VC200 3770
825msm7625a_ffa MACH_MSM7625A_FFA MSM7625A_FFA 3771
826msm7625a_surf MACH_MSM7625A_SURF MSM7625A_SURF 3772
827benthossbp MACH_BENTHOSSBP BENTHOSSBP 3773
828smdk5210 MACH_SMDK5210 SMDK5210 3774
829empq2300 MACH_EMPQ2300 EMPQ2300 3775
830minipos MACH_MINIPOS MINIPOS 3776
831omap5_sevm MACH_OMAP5_SEVM OMAP5_SEVM 3777
832shelter MACH_SHELTER SHELTER 3778
833omap3_devkit8500 MACH_OMAP3_DEVKIT8500 OMAP3_DEVKIT8500 3779
834edgetd MACH_EDGETD EDGETD 3780
835copperyard MACH_COPPERYARD COPPERYARD 3781
836edge_u MACH_EDGE_U EDGE_U 3783
837edge_td MACH_EDGE_TD EDGE_TD 3784
838wdss MACH_WDSS WDSS 3785
839dl_pb25 MACH_DL_PB25 DL_PB25 3786
840dss11 MACH_DSS11 DSS11 3787
841cpa MACH_CPA CPA 3788
842aptp2000 MACH_APTP2000 APTP2000 3789
843marzen MACH_MARZEN MARZEN 3790 555marzen MACH_MARZEN MARZEN 3790
844st_turbine MACH_ST_TURBINE ST_TURBINE 3791
845gtl_it3300 MACH_GTL_IT3300 GTL_IT3300 3792
846mx6_mule MACH_MX6_MULE MX6_MULE 3793
847v7pxa_dt MACH_V7PXA_DT V7PXA_DT 3794
848v7mmp_dt MACH_V7MMP_DT V7MMP_DT 3795
849dragon7 MACH_DRAGON7 DRAGON7 3796
850krome MACH_KROME KROME 3797 556krome MACH_KROME KROME 3797
851oratisdante MACH_ORATISDANTE ORATISDANTE 3798
852fathom MACH_FATHOM FATHOM 3799
853dns325 MACH_DNS325 DNS325 3800
854sarnen MACH_SARNEN SARNEN 3801
855ubisys_g1 MACH_UBISYS_G1 UBISYS_G1 3802
856mx53_pf1 MACH_MX53_PF1 MX53_PF1 3803
857asanti MACH_ASANTI ASANTI 3804
858volta MACH_VOLTA VOLTA 3805
859knight MACH_KNIGHT KNIGHT 3807
860beaglebone MACH_BEAGLEBONE BEAGLEBONE 3808
861becker MACH_BECKER BECKER 3809
862fc360 MACH_FC360 FC360 3810
863pmi2_xls MACH_PMI2_XLS PMI2_XLS 3811
864taranto MACH_TARANTO TARANTO 3812
865plutux MACH_PLUTUX PLUTUX 3813
866ipmp_medcom MACH_IPMP_MEDCOM IPMP_MEDCOM 3814
867absolut MACH_ABSOLUT ABSOLUT 3815
868awpb3 MACH_AWPB3 AWPB3 3816
869nfp32xx_dt MACH_NFP32XX_DT NFP32XX_DT 3817
870dl_pb53 MACH_DL_PB53 DL_PB53 3818
871acu_ii MACH_ACU_II ACU_II 3819
872avalon MACH_AVALON AVALON 3820
873sphinx MACH_SPHINX SPHINX 3821
874titan_t MACH_TITAN_T TITAN_T 3822
875harvest_boris MACH_HARVEST_BORIS HARVEST_BORIS 3823
876mach_msm7x30_m3s MACH_MACH_MSM7X30_M3S MACH_MSM7X30_M3S 3824
877smdk5250 MACH_SMDK5250 SMDK5250 3825
878imxt_lite MACH_IMXT_LITE IMXT_LITE 3826
879imxt_std MACH_IMXT_STD IMXT_STD 3827
880imxt_log MACH_IMXT_LOG IMXT_LOG 3828
881imxt_nav MACH_IMXT_NAV IMXT_NAV 3829
882imxt_full MACH_IMXT_FULL IMXT_FULL 3830
883ag09015 MACH_AG09015 AG09015 3831
884am3517_mt_ventoux MACH_AM3517_MT_VENTOUX AM3517_MT_VENTOUX 3832
885dp1arm9 MACH_DP1ARM9 DP1ARM9 3833
886picasso_m MACH_PICASSO_M PICASSO_M 3834
887video_gadget MACH_VIDEO_GADGET VIDEO_GADGET 3835
888mtt_om3x MACH_MTT_OM3X MTT_OM3X 3836
889mx6q_arm2 MACH_MX6Q_ARM2 MX6Q_ARM2 3837
890picosam9g45 MACH_PICOSAM9G45 PICOSAM9G45 3838
891vpm_dm365 MACH_VPM_DM365 VPM_DM365 3839
892bonfire MACH_BONFIRE BONFIRE 3840
893mt2p2d MACH_MT2P2D MT2P2D 3841
894sigpda01 MACH_SIGPDA01 SIGPDA01 3842
895cn27 MACH_CN27 CN27 3843
896mx25_cwtap MACH_MX25_CWTAP MX25_CWTAP 3844
897apf28 MACH_APF28 APF28 3845
898pelco_maxwell MACH_PELCO_MAXWELL PELCO_MAXWELL 3846
899ge_phoenix MACH_GE_PHOENIX GE_PHOENIX 3847
900empc_a500 MACH_EMPC_A500 EMPC_A500 3848
901ims_arm9 MACH_IMS_ARM9 IMS_ARM9 3849
902mini2416 MACH_MINI2416 MINI2416 3850
903mini2450 MACH_MINI2450 MINI2450 3851
904mini310 MACH_MINI310 MINI310 3852
905spear_hurricane MACH_SPEAR_HURRICANE SPEAR_HURRICANE 3853
906mt7208 MACH_MT7208 MT7208 3854
907lpc178x MACH_LPC178X LPC178X 3855
908farleys MACH_FARLEYS FARLEYS 3856
909efm32gg_dk3750 MACH_EFM32GG_DK3750 EFM32GG_DK3750 3857
910zeus_board MACH_ZEUS_BOARD ZEUS_BOARD 3858
911cc51 MACH_CC51 CC51 3859
912fxi_c210 MACH_FXI_C210 FXI_C210 3860
913msm8627_cdp MACH_MSM8627_CDP MSM8627_CDP 3861
914msm8627_mtp MACH_MSM8627_MTP MSM8627_MTP 3862
915armadillo800eva MACH_ARMADILLO800EVA ARMADILLO800EVA 3863 557armadillo800eva MACH_ARMADILLO800EVA ARMADILLO800EVA 3863
916primou MACH_PRIMOU PRIMOU 3864
917primoc MACH_PRIMOC PRIMOC 3865
918primoct MACH_PRIMOCT PRIMOCT 3866
919a9500 MACH_A9500 A9500 3867
920pluto MACH_PLUTO PLUTO 3869
921acfx100 MACH_ACFX100 ACFX100 3870
922msm8625_rumi3 MACH_MSM8625_RUMI3 MSM8625_RUMI3 3871
923valente MACH_VALENTE VALENTE 3872
924crfs_rfeye MACH_CRFS_RFEYE CRFS_RFEYE 3873
925rfeye MACH_RFEYE RFEYE 3874
926phidget_sbc3 MACH_PHIDGET_SBC3 PHIDGET_SBC3 3875
927tcw_mika MACH_TCW_MIKA TCW_MIKA 3876
928imx28_egf MACH_IMX28_EGF IMX28_EGF 3877
929valente_wx MACH_VALENTE_WX VALENTE_WX 3878
930huangshans MACH_HUANGSHANS HUANGSHANS 3879
931bosphorus1 MACH_BOSPHORUS1 BOSPHORUS1 3880
932prima MACH_PRIMA PRIMA 3881
933evita_ulk MACH_EVITA_ULK EVITA_ULK 3884
934merisc600 MACH_MERISC600 MERISC600 3885
935dolak MACH_DOLAK DOLAK 3886
936sbc53 MACH_SBC53 SBC53 3887
937elite_ulk MACH_ELITE_ULK ELITE_ULK 3888
938pov2 MACH_POV2 POV2 3889
939ipod_touch_2g MACH_IPOD_TOUCH_2G IPOD_TOUCH_2G 3890
940da850_pqab MACH_DA850_PQAB DA850_PQAB 3891
941fermi MACH_FERMI FERMI 3892
942ccardwmx28 MACH_CCARDWMX28 CCARDWMX28 3893
943ccardmx28 MACH_CCARDMX28 CCARDMX28 3894
944fs20_fcm2050 MACH_FS20_FCM2050 FS20_FCM2050 3895
945kinetis MACH_KINETIS KINETIS 3896
946kai MACH_KAI KAI 3897
947bcthb2 MACH_BCTHB2 BCTHB2 3898
948inels3_cu MACH_INELS3_CU INELS3_CU 3899
949da850_apollo MACH_DA850_APOLLO DA850_APOLLO 3901
950tracnas MACH_TRACNAS TRACNAS 3902
951mityarm335x MACH_MITYARM335X MITYARM335X 3903
952xcgz7x MACH_XCGZ7X XCGZ7X 3904
953cubox MACH_CUBOX CUBOX 3905
954terminator MACH_TERMINATOR TERMINATOR 3906
955eye03 MACH_EYE03 EYE03 3907
956kota3 MACH_KOTA3 KOTA3 3908
957pscpe MACH_PSCPE PSCPE 3910
958akt1100 MACH_AKT1100 AKT1100 3911
959pcaaxl2 MACH_PCAAXL2 PCAAXL2 3912
960primodd_ct MACH_PRIMODD_CT PRIMODD_CT 3913
961nsbc MACH_NSBC NSBC 3914
962meson2_skt MACH_MESON2_SKT MESON2_SKT 3915
963meson2_ref MACH_MESON2_REF MESON2_REF 3916
964ccardwmx28js MACH_CCARDWMX28JS CCARDWMX28JS 3917
965ccardmx28js MACH_CCARDMX28JS CCARDMX28JS 3918
966indico MACH_INDICO INDICO 3919
967msm8960dt MACH_MSM8960DT MSM8960DT 3920
968primods MACH_PRIMODS PRIMODS 3921
969beluga_m1388 MACH_BELUGA_M1388 BELUGA_M1388 3922
970primotd MACH_PRIMOTD PRIMOTD 3923
971varan_master MACH_VARAN_MASTER VARAN_MASTER 3924
972primodd MACH_PRIMODD PRIMODD 3925
973jetduo MACH_JETDUO JETDUO 3926
974mx53_umobo MACH_MX53_UMOBO MX53_UMOBO 3927 558mx53_umobo MACH_MX53_UMOBO MX53_UMOBO 3927
975trats MACH_TRATS TRATS 3928
976starcraft MACH_STARCRAFT STARCRAFT 3929
977qseven_tegra2 MACH_QSEVEN_TEGRA2 QSEVEN_TEGRA2 3930
978lichee_sun4i_devbd MACH_LICHEE_SUN4I_DEVBD LICHEE_SUN4I_DEVBD 3931
979movenow MACH_MOVENOW MOVENOW 3932
980golf_u MACH_GOLF_U GOLF_U 3933
981msm7627a_evb MACH_MSM7627A_EVB MSM7627A_EVB 3934
982rambo MACH_RAMBO RAMBO 3935
983golfu MACH_GOLFU GOLFU 3936
984mango310 MACH_MANGO310 MANGO310 3937
985dns343 MACH_DNS343 DNS343 3938
986var_som_om44 MACH_VAR_SOM_OM44 VAR_SOM_OM44 3939
987naon MACH_NAON NAON 3940
988vp4000 MACH_VP4000 VP4000 3941
989impcard MACH_IMPCARD IMPCARD 3942
990smoovcam MACH_SMOOVCAM SMOOVCAM 3943
991cobham3725 MACH_COBHAM3725 COBHAM3725 3944
992cobham3730 MACH_COBHAM3730 COBHAM3730 3945
993cobham3703 MACH_COBHAM3703 COBHAM3703 3946
994quetzal MACH_QUETZAL QUETZAL 3947
995apq8064_cdp MACH_APQ8064_CDP APQ8064_CDP 3948
996apq8064_mtp MACH_APQ8064_MTP APQ8064_MTP 3949
997apq8064_fluid MACH_APQ8064_FLUID APQ8064_FLUID 3950
998apq8064_liquid MACH_APQ8064_LIQUID APQ8064_LIQUID 3951
999mango210 MACH_MANGO210 MANGO210 3952
1000mango100 MACH_MANGO100 MANGO100 3953
1001mango24 MACH_MANGO24 MANGO24 3954
1002mango64 MACH_MANGO64 MANGO64 3955
1003nsa320 MACH_NSA320 NSA320 3956
1004elv_ccu2 MACH_ELV_CCU2 ELV_CCU2 3957
1005triton_x00 MACH_TRITON_X00 TRITON_X00 3958
1006triton_1500_2000 MACH_TRITON_1500_2000 TRITON_1500_2000 3959
1007pogoplugv4 MACH_POGOPLUGV4 POGOPLUGV4 3960
1008venus_cl MACH_VENUS_CL VENUS_CL 3961
1009vulcano_g20 MACH_VULCANO_G20 VULCANO_G20 3962
1010sgs_i9100 MACH_SGS_I9100 SGS_I9100 3963
1011stsv2 MACH_STSV2 STSV2 3964
1012csb1724 MACH_CSB1724 CSB1724 3965
1013omapl138_lcdk MACH_OMAPL138_LCDK OMAPL138_LCDK 3966
1014pvd_mx25 MACH_PVD_MX25 PVD_MX25 3968
1015meson6_skt MACH_MESON6_SKT MESON6_SKT 3969
1016meson6_ref MACH_MESON6_REF MESON6_REF 3970
1017pxm MACH_PXM PXM 3971
1018pogoplugv3 MACH_POGOPLUGV3 POGOPLUGV3 3973
1019mlp89626 MACH_MLP89626 MLP89626 3974
1020iomegahmndce MACH_IOMEGAHMNDCE IOMEGAHMNDCE 3975
1021pogoplugv3pci MACH_POGOPLUGV3PCI POGOPLUGV3PCI 3976
1022bntv250 MACH_BNTV250 BNTV250 3977
1023mx53_qseven MACH_MX53_QSEVEN MX53_QSEVEN 3978
1024gtl_it1100 MACH_GTL_IT1100 GTL_IT1100 3979
1025mx6q_sabresd MACH_MX6Q_SABRESD MX6Q_SABRESD 3980
1026mt4 MACH_MT4 MT4 3981 559mt4 MACH_MT4 MT4 3981
1027jumbo_d MACH_JUMBO_D JUMBO_D 3982
1028jumbo_i MACH_JUMBO_I JUMBO_I 3983
1029fs20_dmp MACH_FS20_DMP FS20_DMP 3984
1030dns320 MACH_DNS320 DNS320 3985
1031mx28bacos MACH_MX28BACOS MX28BACOS 3986
1032tl80 MACH_TL80 TL80 3987
1033polatis_nic_1001 MACH_POLATIS_NIC_1001 POLATIS_NIC_1001 3988
1034tely MACH_TELY TELY 3989
1035u8520 MACH_U8520 U8520 3990 560u8520 MACH_U8520 U8520 3990
1036manta MACH_MANTA MANTA 3991
1037mpq8064_cdp MACH_MPQ8064_CDP MPQ8064_CDP 3993
1038mpq8064_dtv MACH_MPQ8064_DTV MPQ8064_DTV 3995
1039dm368som MACH_DM368SOM DM368SOM 3996
1040gprisb2 MACH_GPRISB2 GPRISB2 3997
1041chammid MACH_CHAMMID CHAMMID 3998
1042seoul2 MACH_SEOUL2 SEOUL2 3999
1043omap4_nooktablet MACH_OMAP4_NOOKTABLET OMAP4_NOOKTABLET 4000
1044aalto MACH_AALTO AALTO 4001
1045metro MACH_METRO METRO 4002
1046cydm3730 MACH_CYDM3730 CYDM3730 4003
1047tqma53 MACH_TQMA53 TQMA53 4004
1048msm7627a_qrd3 MACH_MSM7627A_QRD3 MSM7627A_QRD3 4005
1049mx28_canby MACH_MX28_CANBY MX28_CANBY 4006
1050tiger MACH_TIGER TIGER 4007
1051pcats_9307_type_a MACH_PCATS_9307_TYPE_A PCATS_9307_TYPE_A 4008
1052pcats_9307_type_o MACH_PCATS_9307_TYPE_O PCATS_9307_TYPE_O 4009
1053pcats_9307_type_r MACH_PCATS_9307_TYPE_R PCATS_9307_TYPE_R 4010
1054streamplug MACH_STREAMPLUG STREAMPLUG 4011
1055icechicken_dev MACH_ICECHICKEN_DEV ICECHICKEN_DEV 4012
1056hedgehog MACH_HEDGEHOG HEDGEHOG 4013
1057yusend_obc MACH_YUSEND_OBC YUSEND_OBC 4014
1058imxninja MACH_IMXNINJA IMXNINJA 4015
1059omap4_jarod MACH_OMAP4_JAROD OMAP4_JAROD 4016
1060eco5_pk MACH_ECO5_PK ECO5_PK 4017
1061qj2440 MACH_QJ2440 QJ2440 4018
1062mx6q_mercury MACH_MX6Q_MERCURY MX6Q_MERCURY 4019
1063cm6810 MACH_CM6810 CM6810 4020
1064omap4_torpedo MACH_OMAP4_TORPEDO OMAP4_TORPEDO 4021
1065nsa310 MACH_NSA310 NSA310 4022
1066tmx536 MACH_TMX536 TMX536 4023
1067ktt20 MACH_KTT20 KTT20 4024
1068dragonix MACH_DRAGONIX DRAGONIX 4025
1069lungching MACH_LUNGCHING LUNGCHING 4026
1070bulogics MACH_BULOGICS BULOGICS 4027
1071mx535_sx MACH_MX535_SX MX535_SX 4028
1072ngui3250 MACH_NGUI3250 NGUI3250 4029
1073salutec_dac MACH_SALUTEC_DAC SALUTEC_DAC 4030
1074loco MACH_LOCO LOCO 4031
1075ctera_plug_usi MACH_CTERA_PLUG_USI CTERA_PLUG_USI 4032
1076scepter MACH_SCEPTER SCEPTER 4033
1077sga MACH_SGA SGA 4034
1078p_81_j5 MACH_P_81_J5 P_81_J5 4035
1079p_81_o4 MACH_P_81_O4 P_81_O4 4036
1080msm8625_surf MACH_MSM8625_SURF MSM8625_SURF 4037
1081carallon_shark MACH_CARALLON_SHARK CARALLON_SHARK 4038
1082ordog MACH_ORDOG ORDOG 4040
1083puente_io MACH_PUENTE_IO PUENTE_IO 4041
1084msm8625_evb MACH_MSM8625_EVB MSM8625_EVB 4042
1085ev_am1707 MACH_EV_AM1707 EV_AM1707 4043
1086ev_am1707e2 MACH_EV_AM1707E2 EV_AM1707E2 4044
1087ev_am3517e2 MACH_EV_AM3517E2 EV_AM3517E2 4045
1088calabria MACH_CALABRIA CALABRIA 4046
1089ev_imx287 MACH_EV_IMX287 EV_IMX287 4047
1090erau MACH_ERAU ERAU 4048
1091sichuan MACH_SICHUAN SICHUAN 4049
1092davinci_da850 MACH_DAVINCI_DA850 DAVINCI_DA850 4051
1093omap138_trunarc MACH_OMAP138_TRUNARC OMAP138_TRUNARC 4052
1094bcm4761 MACH_BCM4761 BCM4761 4053
1095picasso_e2 MACH_PICASSO_E2 PICASSO_E2 4054
1096picasso_mf MACH_PICASSO_MF PICASSO_MF 4055
1097miro MACH_MIRO MIRO 4056
1098at91sam9g20ewon3 MACH_AT91SAM9G20EWON3 AT91SAM9G20EWON3 4057
1099yoyo MACH_YOYO YOYO 4058
1100windjkl MACH_WINDJKL WINDJKL 4059
1101monarudo MACH_MONARUDO MONARUDO 4060
1102batan MACH_BATAN BATAN 4061
1103tadao MACH_TADAO TADAO 4062
1104baso MACH_BASO BASO 4063
1105mahon MACH_MAHON MAHON 4064
1106villec2 MACH_VILLEC2 VILLEC2 4065
1107asi1230 MACH_ASI1230 ASI1230 4066
1108alaska MACH_ALASKA ALASKA 4067
1109swarco_shdsl2 MACH_SWARCO_SHDSL2 SWARCO_SHDSL2 4068
1110oxrtu MACH_OXRTU OXRTU 4069
1111omap5_panda MACH_OMAP5_PANDA OMAP5_PANDA 4070
1112c8000 MACH_C8000 C8000 4072
1113bje_display3_5 MACH_BJE_DISPLAY3_5 BJE_DISPLAY3_5 4073
1114picomod7 MACH_PICOMOD7 PICOMOD7 4074
1115picocom5 MACH_PICOCOM5 PICOCOM5 4075
1116qblissa8 MACH_QBLISSA8 QBLISSA8 4076
1117armstonea8 MACH_ARMSTONEA8 ARMSTONEA8 4077
1118netdcu14 MACH_NETDCU14 NETDCU14 4078
1119at91sam9x5_epiphan MACH_AT91SAM9X5_EPIPHAN AT91SAM9X5_EPIPHAN 4079
1120p2u MACH_P2U P2U 4080
1121doris MACH_DORIS DORIS 4081
1122j49 MACH_J49 J49 4082
1123vdss2e MACH_VDSS2E VDSS2E 4083
1124vc300 MACH_VC300 VC300 4084
1125ns115_pad_test MACH_NS115_PAD_TEST NS115_PAD_TEST 4085
1126ns115_pad_ref MACH_NS115_PAD_REF NS115_PAD_REF 4086
1127ns115_phone_test MACH_NS115_PHONE_TEST NS115_PHONE_TEST 4087
1128ns115_phone_ref MACH_NS115_PHONE_REF NS115_PHONE_REF 4088
1129golfc MACH_GOLFC GOLFC 4089
1130xerox_olympus MACH_XEROX_OLYMPUS XEROX_OLYMPUS 4090
1131mx6sl_arm2 MACH_MX6SL_ARM2 MX6SL_ARM2 4091
1132csb1701_csb1726 MACH_CSB1701_CSB1726 CSB1701_CSB1726 4092
1133at91sam9xeek MACH_AT91SAM9XEEK AT91SAM9XEEK 4093
1134ebv210 MACH_EBV210 EBV210 4094
1135msm7627a_qrd7 MACH_MSM7627A_QRD7 MSM7627A_QRD7 4095
1136svthin MACH_SVTHIN SVTHIN 4096
1137duovero MACH_DUOVERO DUOVERO 4097
1138chupacabra MACH_CHUPACABRA CHUPACABRA 4098 561chupacabra MACH_CHUPACABRA CHUPACABRA 4098
1139scorpion MACH_SCORPION SCORPION 4099 562scorpion MACH_SCORPION SCORPION 4099
1140davinci_he_hmi10 MACH_DAVINCI_HE_HMI10 DAVINCI_HE_HMI10 4100 563davinci_he_hmi10 MACH_DAVINCI_HE_HMI10 DAVINCI_HE_HMI10 4100
@@ -1157,7 +580,6 @@ tam335x MACH_TAM335X TAM335X 4116
1157grouper MACH_GROUPER GROUPER 4117 580grouper MACH_GROUPER GROUPER 4117
1158mpcsa21_9g20 MACH_MPCSA21_9G20 MPCSA21_9G20 4118 581mpcsa21_9g20 MACH_MPCSA21_9G20 MPCSA21_9G20 4118
1159m6u_cpu MACH_M6U_CPU M6U_CPU 4119 582m6u_cpu MACH_M6U_CPU M6U_CPU 4119
1160davinci_dp10 MACH_DAVINCI_DP10 DAVINCI_DP10 4120
1161ginkgo MACH_GINKGO GINKGO 4121 583ginkgo MACH_GINKGO GINKGO 4121
1162cgt_qmx6 MACH_CGT_QMX6 CGT_QMX6 4122 584cgt_qmx6 MACH_CGT_QMX6 CGT_QMX6 4122
1163profpga MACH_PROFPGA PROFPGA 4123 585profpga MACH_PROFPGA PROFPGA 4123
@@ -1204,3 +626,384 @@ baileys MACH_BAILEYS BAILEYS 4169
1204familybox MACH_FAMILYBOX FAMILYBOX 4170 626familybox MACH_FAMILYBOX FAMILYBOX 4170
1205ensemble_mx35 MACH_ENSEMBLE_MX35 ENSEMBLE_MX35 4171 627ensemble_mx35 MACH_ENSEMBLE_MX35 ENSEMBLE_MX35 4171
1206sc_sps_1 MACH_SC_SPS_1 SC_SPS_1 4172 628sc_sps_1 MACH_SC_SPS_1 SC_SPS_1 4172
629ucsimply_sam9260 MACH_UCSIMPLY_SAM9260 UCSIMPLY_SAM9260 4173
630unicorn MACH_UNICORN UNICORN 4174
631m9g45a MACH_M9G45A M9G45A 4175
632mtwebif MACH_MTWEBIF MTWEBIF 4176
633playstone MACH_PLAYSTONE PLAYSTONE 4177
634chelsea MACH_CHELSEA CHELSEA 4178
635bayern MACH_BAYERN BAYERN 4179
636mitwo MACH_MITWO MITWO 4180
637mx25_noah MACH_MX25_NOAH MX25_NOAH 4181
638stm_b2020 MACH_STM_B2020 STM_B2020 4182
639annax_src MACH_ANNAX_SRC ANNAX_SRC 4183
640ionics_stratus MACH_IONICS_STRATUS IONICS_STRATUS 4184
641hugo MACH_HUGO HUGO 4185
642em300 MACH_EM300 EM300 4186
643mmp3_qseven MACH_MMP3_QSEVEN MMP3_QSEVEN 4187
644bosphorus2 MACH_BOSPHORUS2 BOSPHORUS2 4188
645tt2200 MACH_TT2200 TT2200 4189
646ocelot3 MACH_OCELOT3 OCELOT3 4190
647tek_cobra MACH_TEK_COBRA TEK_COBRA 4191
648protou MACH_PROTOU PROTOU 4192
649msm8625_evt MACH_MSM8625_EVT MSM8625_EVT 4193
650mx53_sellwood MACH_MX53_SELLWOOD MX53_SELLWOOD 4194
651somiq_am35 MACH_SOMIQ_AM35 SOMIQ_AM35 4195
652somiq_am37 MACH_SOMIQ_AM37 SOMIQ_AM37 4196
653k2_plc_cl MACH_K2_PLC_CL K2_PLC_CL 4197
654tc2 MACH_TC2 TC2 4198
655dulex_j MACH_DULEX_J DULEX_J 4199
656stm_b2044 MACH_STM_B2044 STM_B2044 4200
657deluxe_j MACH_DELUXE_J DELUXE_J 4201
658mango2443 MACH_MANGO2443 MANGO2443 4202
659cp2dcg MACH_CP2DCG CP2DCG 4203
660cp2dtg MACH_CP2DTG CP2DTG 4204
661cp2dug MACH_CP2DUG CP2DUG 4205
662var_som_am33 MACH_VAR_SOM_AM33 VAR_SOM_AM33 4206
663pepper MACH_PEPPER PEPPER 4207
664mango2450 MACH_MANGO2450 MANGO2450 4208
665valente_wx_c9 MACH_VALENTE_WX_C9 VALENTE_WX_C9 4209
666minitv MACH_MINITV MINITV 4210
667u8540 MACH_U8540 U8540 4211
668iv_atlas_i_z7e MACH_IV_ATLAS_I_Z7E IV_ATLAS_I_Z7E 4212
669mach_type_sky MACH_MACH_TYPE_SKY MACH_TYPE_SKY 4214
670bluesky MACH_BLUESKY BLUESKY 4215
671ngrouter MACH_NGROUTER NGROUTER 4216
672mx53_denetim MACH_MX53_DENETIM MX53_DENETIM 4217
673opal MACH_OPAL OPAL 4218
674gnet_us3gref MACH_GNET_US3GREF GNET_US3GREF 4219
675gnet_nc3g MACH_GNET_NC3G GNET_NC3G 4220
676gnet_ge3g MACH_GNET_GE3G GNET_GE3G 4221
677adp2 MACH_ADP2 ADP2 4222
678tqma28 MACH_TQMA28 TQMA28 4223
679kacom3 MACH_KACOM3 KACOM3 4224
680rrhdemo MACH_RRHDEMO RRHDEMO 4225
681protodug MACH_PROTODUG PROTODUG 4226
682lago MACH_LAGO LAGO 4227
683ktt30 MACH_KTT30 KTT30 4228
684ts43xx MACH_TS43XX TS43XX 4229
685mx6q_denso MACH_MX6Q_DENSO MX6Q_DENSO 4230
686comsat_gsmumts8 MACH_COMSAT_GSMUMTS8 COMSAT_GSMUMTS8 4231
687dreamx MACH_DREAMX DREAMX 4232
688thunderstonem MACH_THUNDERSTONEM THUNDERSTONEM 4233
689yoyopad MACH_YOYOPAD YOYOPAD 4234
690yoyopatient MACH_YOYOPATIENT YOYOPATIENT 4235
691a10l MACH_A10L A10L 4236
692mq60 MACH_MQ60 MQ60 4237
693linkstation_lsql MACH_LINKSTATION_LSQL LINKSTATION_LSQL 4238
694am3703gateway MACH_AM3703GATEWAY AM3703GATEWAY 4239
695accipiter MACH_ACCIPITER ACCIPITER 4240
696magnidug MACH_MAGNIDUG MAGNIDUG 4242
697hydra MACH_HYDRA HYDRA 4243
698sun3i MACH_SUN3I SUN3I 4244
699stm_b2078 MACH_STM_B2078 STM_B2078 4245
700at91sam9263deskv2 MACH_AT91SAM9263DESKV2 AT91SAM9263DESKV2 4246
701deluxe_r MACH_DELUXE_R DELUXE_R 4247
702p_98_v MACH_P_98_V P_98_V 4248
703p_98_c MACH_P_98_C P_98_C 4249
704davinci_am18xx_omn MACH_DAVINCI_AM18XX_OMN DAVINCI_AM18XX_OMN 4250
705socfpga_cyclone5 MACH_SOCFPGA_CYCLONE5 SOCFPGA_CYCLONE5 4251
706cabatuin MACH_CABATUIN CABATUIN 4252
707yoyopad_ft MACH_YOYOPAD_FT YOYOPAD_FT 4253
708dan2400evb MACH_DAN2400EVB DAN2400EVB 4254
709dan3400evb MACH_DAN3400EVB DAN3400EVB 4255
710edm_sf_imx6 MACH_EDM_SF_IMX6 EDM_SF_IMX6 4256
711edm_cf_imx6 MACH_EDM_CF_IMX6 EDM_CF_IMX6 4257
712vpos3xx MACH_VPOS3XX VPOS3XX 4258
713vulcano_9x5 MACH_VULCANO_9X5 VULCANO_9X5 4259
714spmp8000 MACH_SPMP8000 SPMP8000 4260
715catalina MACH_CATALINA CATALINA 4261
716rd88f5181l_fe MACH_RD88F5181L_FE RD88F5181L_FE 4262
717mx535_mx MACH_MX535_MX MX535_MX 4263
718armadillo840 MACH_ARMADILLO840 ARMADILLO840 4264
719spc9000baseboard MACH_SPC9000BASEBOARD SPC9000BASEBOARD 4265
720iris MACH_IRIS IRIS 4266
721protodcg MACH_PROTODCG PROTODCG 4267
722palmtree MACH_PALMTREE PALMTREE 4268
723novena MACH_NOVENA NOVENA 4269
724ma_um MACH_MA_UM MA_UM 4270
725ma_am MACH_MA_AM MA_AM 4271
726ems348 MACH_EMS348 EMS348 4272
727cm_fx6 MACH_CM_FX6 CM_FX6 4273
728arndale MACH_ARNDALE ARNDALE 4274
729q5xr5 MACH_Q5XR5 Q5XR5 4275
730willow MACH_WILLOW WILLOW 4276
731omap3621_odyv3 MACH_OMAP3621_ODYV3 OMAP3621_ODYV3 4277
732omapl138_presonus MACH_OMAPL138_PRESONUS OMAPL138_PRESONUS 4278
733dvf99 MACH_DVF99 DVF99 4279
734impression_j MACH_IMPRESSION_J IMPRESSION_J 4280
735qblissa9 MACH_QBLISSA9 QBLISSA9 4281
736robin_heliview10 MACH_ROBIN_HELIVIEW10 ROBIN_HELIVIEW10 4282
737sun7i MACH_SUN7I SUN7I 4283
738mx6q_hdmidongle MACH_MX6Q_HDMIDONGLE MX6Q_HDMIDONGLE 4284
739mx6_sid2 MACH_MX6_SID2 MX6_SID2 4285
740helios_v3 MACH_HELIOS_V3 HELIOS_V3 4286
741helios_v4 MACH_HELIOS_V4 HELIOS_V4 4287
742q7_imx6 MACH_Q7_IMX6 Q7_IMX6 4288
743odroidx MACH_ODROIDX ODROIDX 4289
744robpro MACH_ROBPRO ROBPRO 4290
745research59if_mk1 MACH_RESEARCH59IF_MK1 RESEARCH59IF_MK1 4291
746bobsleigh MACH_BOBSLEIGH BOBSLEIGH 4292
747dcshgwt3 MACH_DCSHGWT3 DCSHGWT3 4293
748gld1018 MACH_GLD1018 GLD1018 4294
749ev10 MACH_EV10 EV10 4295
750nitrogen6x MACH_NITROGEN6X NITROGEN6X 4296
751p_107_bb MACH_P_107_BB P_107_BB 4297
752evita_utl MACH_EVITA_UTL EVITA_UTL 4298
753falconwing MACH_FALCONWING FALCONWING 4299
754dct3 MACH_DCT3 DCT3 4300
755cpx2e_cell MACH_CPX2E_CELL CPX2E_CELL 4301
756amiro MACH_AMIRO AMIRO 4302
757mx6q_brassboard MACH_MX6Q_BRASSBOARD MX6Q_BRASSBOARD 4303
758dalmore MACH_DALMORE DALMORE 4304
759omap3_portal7cp MACH_OMAP3_PORTAL7CP OMAP3_PORTAL7CP 4305
760tegra_pluto MACH_TEGRA_PLUTO TEGRA_PLUTO 4306
761mx6sl_evk MACH_MX6SL_EVK MX6SL_EVK 4307
762m7 MACH_M7 M7 4308
763pxm2 MACH_PXM2 PXM2 4309
764haba_knx_lite MACH_HABA_KNX_LITE HABA_KNX_LITE 4310
765tai MACH_TAI TAI 4311
766prototd MACH_PROTOTD PROTOTD 4312
767dst_tonto MACH_DST_TONTO DST_TONTO 4313
768draco MACH_DRACO DRACO 4314
769dxr2 MACH_DXR2 DXR2 4315
770rut MACH_RUT RUT 4316
771am180x_wsc MACH_AM180X_WSC AM180X_WSC 4317
772deluxe_u MACH_DELUXE_U DELUXE_U 4318
773deluxe_ul MACH_DELUXE_UL DELUXE_UL 4319
774at91sam9260medths MACH_AT91SAM9260MEDTHS AT91SAM9260MEDTHS 4320
775matrix516 MACH_MATRIX516 MATRIX516 4321
776vid401x MACH_VID401X VID401X 4322
777helios_v5 MACH_HELIOS_V5 HELIOS_V5 4323
778playpaq2 MACH_PLAYPAQ2 PLAYPAQ2 4324
779igam MACH_IGAM IGAM 4325
780amico_i MACH_AMICO_I AMICO_I 4326
781amico_e MACH_AMICO_E AMICO_E 4327
782sentient_mm3_ck MACH_SENTIENT_MM3_CK SENTIENT_MM3_CK 4328
783smx6 MACH_SMX6 SMX6 4329
784pango MACH_PANGO PANGO 4330
785ns115_stick MACH_NS115_STICK NS115_STICK 4331
786bctrm3 MACH_BCTRM3 BCTRM3 4332
787doctorws MACH_DOCTORWS DOCTORWS 4333
788m2601 MACH_M2601 M2601 4334
789vgg1111 MACH_VGG1111 VGG1111 4337
790countach MACH_COUNTACH COUNTACH 4338
791visstrim_sm20 MACH_VISSTRIM_SM20 VISSTRIM_SM20 4339
792a639 MACH_A639 A639 4340
793spacemonkey MACH_SPACEMONKEY SPACEMONKEY 4341
794zpdu_stamp MACH_ZPDU_STAMP ZPDU_STAMP 4342
795htc_g7_clone MACH_HTC_G7_CLONE HTC_G7_CLONE 4343
796ft2080_corvus MACH_FT2080_CORVUS FT2080_CORVUS 4344
797fisland MACH_FISLAND FISLAND 4345
798zpdu MACH_ZPDU ZPDU 4346
799urt MACH_URT URT 4347
800conti_ovip MACH_CONTI_OVIP CONTI_OVIP 4348
801omapl138_nagra MACH_OMAPL138_NAGRA OMAPL138_NAGRA 4349
802da850_at3kp1 MACH_DA850_AT3KP1 DA850_AT3KP1 4350
803da850_at3kp2 MACH_DA850_AT3KP2 DA850_AT3KP2 4351
804surma MACH_SURMA SURMA 4352
805stm_b2092 MACH_STM_B2092 STM_B2092 4353
806mx535_ycr MACH_MX535_YCR MX535_YCR 4354
807m7_wl MACH_M7_WL M7_WL 4355
808m7_u MACH_M7_U M7_U 4356
809omap3_stndt_evm MACH_OMAP3_STNDT_EVM OMAP3_STNDT_EVM 4357
810m7_wlv MACH_M7_WLV M7_WLV 4358
811xam3517 MACH_XAM3517 XAM3517 4359
812a220 MACH_A220 A220 4360
813aclima_odie MACH_ACLIMA_ODIE ACLIMA_ODIE 4361
814vibble MACH_VIBBLE VIBBLE 4362
815k2_u MACH_K2_U K2_U 4363
816mx53_egf MACH_MX53_EGF MX53_EGF 4364
817novpek_imx53 MACH_NOVPEK_IMX53 NOVPEK_IMX53 4365
818novpek_imx6x MACH_NOVPEK_IMX6X NOVPEK_IMX6X 4366
819mx25_smartbox MACH_MX25_SMARTBOX MX25_SMARTBOX 4367
820eicg6410 MACH_EICG6410 EICG6410 4368
821picasso_e3 MACH_PICASSO_E3 PICASSO_E3 4369
822motonavigator MACH_MOTONAVIGATOR MOTONAVIGATOR 4370
823varioconnect2 MACH_VARIOCONNECT2 VARIOCONNECT2 4371
824deluxe_tw MACH_DELUXE_TW DELUXE_TW 4372
825kore3 MACH_KORE3 KORE3 4374
826mx6s_drs MACH_MX6S_DRS MX6S_DRS 4375
827cmimx6 MACH_CMIMX6 CMIMX6 4376
828roth MACH_ROTH ROTH 4377
829eq4ux MACH_EQ4UX EQ4UX 4378
830x1plus MACH_X1PLUS X1PLUS 4379
831modimx27 MACH_MODIMX27 MODIMX27 4380
832videon_hduac MACH_VIDEON_HDUAC VIDEON_HDUAC 4381
833blackbird MACH_BLACKBIRD BLACKBIRD 4382
834runmaster MACH_RUNMASTER RUNMASTER 4383
835ceres MACH_CERES CERES 4384
836nad435 MACH_NAD435 NAD435 4385
837ns115_proto_type MACH_NS115_PROTO_TYPE NS115_PROTO_TYPE 4386
838fs20_vcc MACH_FS20_VCC FS20_VCC 4387
839meson6tv_skt MACH_MESON6TV_SKT MESON6TV_SKT 4389
840keystone MACH_KEYSTONE KEYSTONE 4390
841pcm052 MACH_PCM052 PCM052 4391
842qrd_skud_prime MACH_QRD_SKUD_PRIME QRD_SKUD_PRIME 4393
843guf_santaro MACH_GUF_SANTARO GUF_SANTARO 4395
844sheepshead MACH_SHEEPSHEAD SHEEPSHEAD 4396
845mx6_iwg15m_mxm MACH_MX6_IWG15M_MXM MX6_IWG15M_MXM 4397
846mx6_iwg15m_q7 MACH_MX6_IWG15M_Q7 MX6_IWG15M_Q7 4398
847at91sam9263if8mic MACH_AT91SAM9263IF8MIC AT91SAM9263IF8MIC 4399
848marcopolo MACH_MARCOPOLO MARCOPOLO 4401
849mx535_sdcr MACH_MX535_SDCR MX535_SDCR 4402
850mx53_csb2733 MACH_MX53_CSB2733 MX53_CSB2733 4403
851diva MACH_DIVA DIVA 4404
852ncr_7744 MACH_NCR_7744 NCR_7744 4405
853macallan MACH_MACALLAN MACALLAN 4406
854wnr3500 MACH_WNR3500 WNR3500 4407
855pgavrf MACH_PGAVRF PGAVRF 4408
856helios_v6 MACH_HELIOS_V6 HELIOS_V6 4409
857lcct MACH_LCCT LCCT 4410
858csndug MACH_CSNDUG CSNDUG 4411
859wandboard_imx6 MACH_WANDBOARD_IMX6 WANDBOARD_IMX6 4412
860omap4_jet MACH_OMAP4_JET OMAP4_JET 4413
861tegra_roth MACH_TEGRA_ROTH TEGRA_ROTH 4414
862m7dcg MACH_M7DCG M7DCG 4415
863m7dug MACH_M7DUG M7DUG 4416
864m7dtg MACH_M7DTG M7DTG 4417
865ap42x MACH_AP42X AP42X 4418
866var_som_mx6 MACH_VAR_SOM_MX6 VAR_SOM_MX6 4419
867pdlu MACH_PDLU PDLU 4420
868hydrogen MACH_HYDROGEN HYDROGEN 4421
869npa211e MACH_NPA211E NPA211E 4422
870arcadia MACH_ARCADIA ARCADIA 4423
871arcadia_l MACH_ARCADIA_L ARCADIA_L 4424
872msm8930dt MACH_MSM8930DT MSM8930DT 4425
873ktam3874 MACH_KTAM3874 KTAM3874 4426
874cec4 MACH_CEC4 CEC4 4427
875ape6evm MACH_APE6EVM APE6EVM 4428
876tx6 MACH_TX6 TX6 4429
877cfa10037 MACH_CFA10037 CFA10037 4431
878ezp1000 MACH_EZP1000 EZP1000 4433
879wgr826v MACH_WGR826V WGR826V 4434
880exuma MACH_EXUMA EXUMA 4435
881fregate MACH_FREGATE FREGATE 4436
882osirisimx508 MACH_OSIRISIMX508 OSIRISIMX508 4437
883st_exigo MACH_ST_EXIGO ST_EXIGO 4438
884pismo MACH_PISMO PISMO 4439
885atc7 MACH_ATC7 ATC7 4440
886nspireclp MACH_NSPIRECLP NSPIRECLP 4441
887nspiretp MACH_NSPIRETP NSPIRETP 4442
888nspirecx MACH_NSPIRECX NSPIRECX 4443
889maya MACH_MAYA MAYA 4444
890wecct MACH_WECCT WECCT 4445
891m2s MACH_M2S M2S 4446
892msm8625q_evbd MACH_MSM8625Q_EVBD MSM8625Q_EVBD 4447
893tiny210 MACH_TINY210 TINY210 4448
894g3 MACH_G3 G3 4449
895hurricane MACH_HURRICANE HURRICANE 4450
896mx6_pod MACH_MX6_POD MX6_POD 4451
897elondcn MACH_ELONDCN ELONDCN 4452
898cwmx535 MACH_CWMX535 CWMX535 4453
899m7_wlj MACH_M7_WLJ M7_WLJ 4454
900qsp_arm MACH_QSP_ARM QSP_ARM 4455
901msm8625q_skud MACH_MSM8625Q_SKUD MSM8625Q_SKUD 4456
902htcmondrian MACH_HTCMONDRIAN HTCMONDRIAN 4457
903watson_ead MACH_WATSON_EAD WATSON_EAD 4458
904mitwoa MACH_MITWOA MITWOA 4459
905omap3_wolverine MACH_OMAP3_WOLVERINE OMAP3_WOLVERINE 4460
906mapletree MACH_MAPLETREE MAPLETREE 4461
907msm8625_fih_sae MACH_MSM8625_FIH_SAE MSM8625_FIH_SAE 4462
908epc35 MACH_EPC35 EPC35 4463
909smartrtu MACH_SMARTRTU SMARTRTU 4464
910rcm101 MACH_RCM101 RCM101 4465
911amx_imx53_mxx MACH_AMX_IMX53_MXX AMX_IMX53_MXX 4466
912acer_a12 MACH_ACER_A12 ACER_A12 4470
913sbc6x MACH_SBC6X SBC6X 4471
914u2 MACH_U2 U2 4472
915smdk4270 MACH_SMDK4270 SMDK4270 4473
916priscillag MACH_PRISCILLAG PRISCILLAG 4474
917priscillac MACH_PRISCILLAC PRISCILLAC 4475
918priscilla MACH_PRISCILLA PRISCILLA 4476
919innova_shpu_v2 MACH_INNOVA_SHPU_V2 INNOVA_SHPU_V2 4477
920mach_type_dep2410 MACH_MACH_TYPE_DEP2410 MACH_TYPE_DEP2410 4479
921bctre3 MACH_BCTRE3 BCTRE3 4480
922omap_m100 MACH_OMAP_M100 OMAP_M100 4481
923flo MACH_FLO FLO 4482
924nanobone MACH_NANOBONE NANOBONE 4483
925stm_b2105 MACH_STM_B2105 STM_B2105 4484
926omap4_bsc_bap_v3 MACH_OMAP4_BSC_BAP_V3 OMAP4_BSC_BAP_V3 4485
927ss1pam MACH_SS1PAM SS1PAM 4486
928primominiu MACH_PRIMOMINIU PRIMOMINIU 4488
929mrt_35hd_dualnas_e MACH_MRT_35HD_DUALNAS_E MRT_35HD_DUALNAS_E 4489
930kiwi MACH_KIWI KIWI 4490
931hw90496 MACH_HW90496 HW90496 4491
932mep2440 MACH_MEP2440 MEP2440 4492
933colibri_t30 MACH_COLIBRI_T30 COLIBRI_T30 4493
934cwv1 MACH_CWV1 CWV1 4494
935nsa325 MACH_NSA325 NSA325 4495
936dpxmtc MACH_DPXMTC DPXMTC 4497
937tt_stuttgart MACH_TT_STUTTGART TT_STUTTGART 4498
938miranda_apcii MACH_MIRANDA_APCII MIRANDA_APCII 4499
939mx6q_moderox MACH_MX6Q_MODEROX MX6Q_MODEROX 4500
940mudskipper MACH_MUDSKIPPER MUDSKIPPER 4501
941urania MACH_URANIA URANIA 4502
942stm_b2112 MACH_STM_B2112 STM_B2112 4503
943mx6q_ats_phoenix MACH_MX6Q_ATS_PHOENIX MX6Q_ATS_PHOENIX 4505
944stm_b2116 MACH_STM_B2116 STM_B2116 4506
945mythology MACH_MYTHOLOGY MYTHOLOGY 4507
946fc360v1 MACH_FC360V1 FC360V1 4508
947gps_sensor MACH_GPS_SENSOR GPS_SENSOR 4509
948gazelle MACH_GAZELLE GAZELLE 4510
949mpq8064_dma MACH_MPQ8064_DMA MPQ8064_DMA 4511
950wems_asd01 MACH_WEMS_ASD01 WEMS_ASD01 4512
951apalis_t30 MACH_APALIS_T30 APALIS_T30 4513
952armstonea9 MACH_ARMSTONEA9 ARMSTONEA9 4515
953omap_blazetablet MACH_OMAP_BLAZETABLET OMAP_BLAZETABLET 4516
954ar6mxq MACH_AR6MXQ AR6MXQ 4517
955ar6mxs MACH_AR6MXS AR6MXS 4518
956gwventana MACH_GWVENTANA GWVENTANA 4520
957igep0033 MACH_IGEP0033 IGEP0033 4521
958h52c1_concerto MACH_H52C1_CONCERTO H52C1_CONCERTO 4524
959fcmbrd MACH_FCMBRD FCMBRD 4525
960pcaaxs1 MACH_PCAAXS1 PCAAXS1 4526
961ls_orca MACH_LS_ORCA LS_ORCA 4527
962pcm051lb MACH_PCM051LB PCM051LB 4528
963mx6s_lp507_gvci MACH_MX6S_LP507_GVCI MX6S_LP507_GVCI 4529
964dido MACH_DIDO DIDO 4530
965swarco_itc3_9g20 MACH_SWARCO_ITC3_9G20 SWARCO_ITC3_9G20 4531
966robo_roady MACH_ROBO_ROADY ROBO_ROADY 4532
967rskrza1 MACH_RSKRZA1 RSKRZA1 4533
968swarco_sid MACH_SWARCO_SID SWARCO_SID 4534
969mx6_iwg15s_sbc MACH_MX6_IWG15S_SBC MX6_IWG15S_SBC 4535
970mx6q_camaro MACH_MX6Q_CAMARO MX6Q_CAMARO 4536
971hb6mxs MACH_HB6MXS HB6MXS 4537
972lager MACH_LAGER LAGER 4538
973lp8x4x MACH_LP8X4X LP8X4X 4539
974tegratab7 MACH_TEGRATAB7 TEGRATAB7 4540
975andromeda MACH_ANDROMEDA ANDROMEDA 4541
976bootes MACH_BOOTES BOOTES 4542
977nethmi MACH_NETHMI NETHMI 4543
978tegratab MACH_TEGRATAB TEGRATAB 4544
979som5_evb MACH_SOM5_EVB SOM5_EVB 4545
980venaticorum MACH_VENATICORUM VENATICORUM 4546
981stm_b2110 MACH_STM_B2110 STM_B2110 4547
982elux_hathor MACH_ELUX_HATHOR ELUX_HATHOR 4548
983helios_v7 MACH_HELIOS_V7 HELIOS_V7 4549
984xc10v1 MACH_XC10V1 XC10V1 4550
985cp2u MACH_CP2U CP2U 4551
986iap_f MACH_IAP_F IAP_F 4552
987iap_g MACH_IAP_G IAP_G 4553
988aae MACH_AAE AAE 4554
989pegasus MACH_PEGASUS PEGASUS 4555
990cygnus MACH_CYGNUS CYGNUS 4556
991centaurus MACH_CENTAURUS CENTAURUS 4557
992msm8930_qrd8930 MACH_MSM8930_QRD8930 MSM8930_QRD8930 4558
993quby_tim MACH_QUBY_TIM QUBY_TIM 4559
994zedi3250a MACH_ZEDI3250A ZEDI3250A 4560
995grus MACH_GRUS GRUS 4561
996apollo3 MACH_APOLLO3 APOLLO3 4562
997cowon_r7 MACH_COWON_R7 COWON_R7 4563
998tonga3 MACH_TONGA3 TONGA3 4564
999p535 MACH_P535 P535 4565
1000sa3874i MACH_SA3874I SA3874I 4566
1001mx6_navico_com MACH_MX6_NAVICO_COM MX6_NAVICO_COM 4567
1002proxmobil2 MACH_PROXMOBIL2 PROXMOBIL2 4568
1003ubinux1 MACH_UBINUX1 UBINUX1 4569
1004istos MACH_ISTOS ISTOS 4570
1005benvolio4 MACH_BENVOLIO4 BENVOLIO4 4571
1006eco5_bx2 MACH_ECO5_BX2 ECO5_BX2 4572
1007eukrea_cpuimx28sd MACH_EUKREA_CPUIMX28SD EUKREA_CPUIMX28SD 4573
1008domotab MACH_DOMOTAB DOMOTAB 4574
1009pfla03 MACH_PFLA03 PFLA03 4575