aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt12
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm/Kconfig25
-rw-r--r--arch/arm/common/mcpm_platsmp.c12
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h69
-rw-r--r--arch/arm/include/asm/barrier.h13
-rw-r--r--arch/arm/include/asm/bitops.h24
-rw-r--r--arch/arm/include/asm/cacheflush.h21
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/include/asm/domain.h53
-rw-r--r--arch/arm/include/asm/fixmap.h15
-rw-r--r--arch/arm/include/asm/futex.h19
-rw-r--r--arch/arm/include/asm/glue-cache.h2
-rw-r--r--arch/arm/include/asm/outercache.h17
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/psci.h23
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/smp_plat.h9
-rw-r--r--arch/arm/include/asm/thread_info.h23
-rw-r--r--arch/arm/include/asm/uaccess.h132
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/entry-armv.S32
-rw-r--r--arch/arm/kernel/entry-common.S63
-rw-r--r--arch/arm/kernel/entry-header.S112
-rw-r--r--arch/arm/kernel/head.S5
-rw-r--r--arch/arm/kernel/irq.c1
-rw-r--r--arch/arm/kernel/perf_event_v6.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c2
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
-rw-r--r--arch/arm/kernel/process.c56
-rw-r--r--arch/arm/kernel/psci.c299
-rw-r--r--arch/arm/kernel/psci_smp.c31
-rw-r--r--arch/arm/kernel/setup.c9
-rw-r--r--arch/arm/kernel/signal.c6
-rw-r--r--arch/arm/kernel/smp.c17
-rw-r--r--arch/arm/kernel/swp_emulate.c3
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/lib/clear_user.S6
-rw-r--r--arch/arm/lib/copy_from_user.S6
-rw-r--r--arch/arm/lib/copy_to_user.S6
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S14
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c4
-rw-r--r--arch/arm/mach-highbank/highbank.c2
-rw-r--r--arch/arm/mach-highbank/pm.c16
-rw-r--r--arch/arm/mach-mmp/pm-pxa910.c1
-rw-r--r--arch/arm/mach-omap2/Kconfig7
-rw-r--r--arch/arm/mach-omap2/common.c1
-rw-r--r--arch/arm/mach-omap2/common.h9
-rw-r--r--arch/arm/mach-omap2/include/mach/barriers.h33
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm/mach-omap2/omap4-common.c121
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S8
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-shmobile/common.h2
-rw-r--r--arch/arm/mach-shmobile/platsmp.c4
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7790.c2
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7791.c2
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c2
-rw-r--r--arch/arm/mach-ux500/cache-l2x0.c1
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c2
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/abort-ev4.S1
-rw-r--r--arch/arm/mm/abort-ev5t.S4
-rw-r--r--arch/arm/mm/abort-ev5tj.S4
-rw-r--r--arch/arm/mm/abort-ev6.S8
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-lv4t.S2
-rw-r--r--arch/arm/mm/abort-macro.S14
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c5
-rw-r--r--arch/arm/mm/dma-mapping.c22
-rw-r--r--arch/arm/mm/dma.h32
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/highmem.c6
-rw-r--r--arch/arm/mm/mmu.c92
-rw-r--r--arch/arm/mm/pgd.c10
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h4
-rw-r--r--arch/arm64/include/asm/psci.h28
-rw-r--r--arch/arm64/kernel/psci.c361
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c15
-rw-r--r--drivers/firmware/Kconfig3
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/psci.c382
-rw-r--r--drivers/firmware/qcom_scm-32.c4
-rw-r--r--drivers/perf/Kconfig15
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_pmu.c (renamed from arch/arm/kernel/perf_event.c)69
-rw-r--r--include/linux/perf/arm_pmu.h (renamed from arch/arm/include/asm/pmu.h)4
-rw-r--r--include/linux/psci.h52
96 files changed, 1457 insertions, 1114 deletions
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 2251dccb141e..06c88a4d28ac 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -67,6 +67,12 @@ Optional properties:
67 disable if zero. 67 disable if zero.
68- arm,prefetch-offset : Override prefetch offset value. Valid values are 68- arm,prefetch-offset : Override prefetch offset value. Valid values are
69 0-7, 15, 23, and 31. 69 0-7, 15, 23, and 31.
70- arm,shared-override : The default behavior of the pl310 cache controller with
71 respect to the shareable attribute is to transform "normal memory
72 non-cacheable transactions" into "cacheable no allocate" (for reads) or
73 "write through no write allocate" (for writes).
74 On systems where this may cause DMA buffer corruption, this property must be
75 specified to indicate that such transforms are precluded.
70- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1> 76- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
71 (forcibly enable), property absent (retain settings set by firmware) 77 (forcibly enable), property absent (retain settings set by firmware)
72- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable), 78- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 3b5f5d1088c6..435251fa9ce0 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -26,13 +26,19 @@ Required properties:
26 26
27Optional properties: 27Optional properties:
28 28
29- interrupt-affinity : Valid only when using SPIs, specifies a list of phandles 29- interrupt-affinity : When using SPIs, specifies a list of phandles to CPU
30 to CPU nodes corresponding directly to the affinity of 30 nodes corresponding directly to the affinity of
31 the SPIs listed in the interrupts property. 31 the SPIs listed in the interrupts property.
32 32
33 This property should be present when there is more than 33 When using a PPI, specifies a list of phandles to CPU
34 nodes corresponding to the set of CPUs which have
35 a PMU of this type signalling the PPI listed in the
36 interrupts property.
37
38 This property should be present when there is more than
34 a single SPI. 39 a single SPI.
35 40
41
36- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd 42- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
37 events. 43 events.
38 44
diff --git a/MAINTAINERS b/MAINTAINERS
index 205cd5d687e4..b4c6754f3144 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -806,11 +806,13 @@ F: arch/arm/include/asm/floppy.h
806ARM PMU PROFILING AND DEBUGGING 806ARM PMU PROFILING AND DEBUGGING
807M: Will Deacon <will.deacon@arm.com> 807M: Will Deacon <will.deacon@arm.com>
808S: Maintained 808S: Maintained
809F: arch/arm/kernel/perf_event* 809F: arch/arm/kernel/perf_*
810F: arch/arm/oprofile/common.c 810F: arch/arm/oprofile/common.c
811F: arch/arm/include/asm/pmu.h
812F: arch/arm/kernel/hw_breakpoint.c 811F: arch/arm/kernel/hw_breakpoint.c
813F: arch/arm/include/asm/hw_breakpoint.h 812F: arch/arm/include/asm/hw_breakpoint.h
813F: arch/arm/include/asm/perf_event.h
814F: drivers/perf/arm_pmu.c
815F: include/linux/perf/arm_pmu.h
814 816
815ARM PORT 817ARM PORT
816M: Russell King <linux@arm.linux.org.uk> 818M: Russell King <linux@arm.linux.org.uk>
@@ -8120,6 +8122,15 @@ F: include/linux/power_supply.h
8120F: drivers/power/ 8122F: drivers/power/
8121X: drivers/power/avs/ 8123X: drivers/power/avs/
8122 8124
8125POWER STATE COORDINATION INTERFACE (PSCI)
8126M: Mark Rutland <mark.rutland@arm.com>
8127M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
8128L: linux-arm-kernel@lists.infradead.org
8129S: Maintained
8130F: drivers/firmware/psci.c
8131F: include/linux/psci.h
8132F: include/uapi/linux/psci.h
8133
8123PNP SUPPORT 8134PNP SUPPORT
8124M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com> 8135M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
8125S: Maintained 8136S: Maintained
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 41cbb4a53066..0d1b717e1eca 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64
188config ARCH_HAS_BANDGAP 188config ARCH_HAS_BANDGAP
189 bool 189 bool
190 190
191config FIX_EARLYCON_MEM
192 def_bool y if MMU
193
191config GENERIC_HWEIGHT 194config GENERIC_HWEIGHT
192 bool 195 bool
193 default y 196 default y
@@ -1496,6 +1499,7 @@ config HOTPLUG_CPU
1496config ARM_PSCI 1499config ARM_PSCI
1497 bool "Support for the ARM Power State Coordination Interface (PSCI)" 1500 bool "Support for the ARM Power State Coordination Interface (PSCI)"
1498 depends on CPU_V7 1501 depends on CPU_V7
1502 select ARM_PSCI_FW
1499 help 1503 help
1500 Say Y here if you want Linux to communicate with system firmware 1504 Say Y here if you want Linux to communicate with system firmware
1501 implementing the PSCI specification for CPU-centric power 1505 implementing the PSCI specification for CPU-centric power
@@ -1700,13 +1704,24 @@ config HIGHPTE
1700 consumed by page tables. Setting this option will allow 1704 consumed by page tables. Setting this option will allow
1701 user-space 2nd level page tables to reside in high memory. 1705 user-space 2nd level page tables to reside in high memory.
1702 1706
1703config HW_PERF_EVENTS 1707config CPU_SW_DOMAIN_PAN
1704 bool "Enable hardware performance counter support for perf events" 1708 bool "Enable use of CPU domains to implement privileged no-access"
1705 depends on PERF_EVENTS 1709 depends on MMU && !ARM_LPAE
1706 default y 1710 default y
1707 help 1711 help
1708 Enable hardware performance counter support for perf events. If 1712 Increase kernel security by ensuring that normal kernel accesses
1709 disabled, perf events will use software events only. 1713 are unable to access userspace addresses. This can help prevent
1714 use-after-free bugs becoming an exploitable privilege escalation
1715 by ensuring that magic values (such as LIST_POISON) will always
1716 fault when dereferenced.
1717
1718 CPUs with low-vector mappings use a best-efforts implementation.
1719 Their lower 1MB needs to remain accessible for the vectors, but
1720 the remainder of userspace will become appropriately inaccessible.
1721
1722config HW_PERF_EVENTS
1723 def_bool y
1724 depends on ARM_PMU
1710 1725
1711config SYS_SUPPORTS_HUGETLBFS 1726config SYS_SUPPORTS_HUGETLBFS
1712 def_bool y 1727 def_bool y
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 92e54d7c6f46..2b25b6038f66 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu)
65 return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); 65 return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
66} 66}
67 67
68static int mcpm_cpu_disable(unsigned int cpu) 68static bool mcpm_cpu_can_disable(unsigned int cpu)
69{ 69{
70 /* 70 /* We assume all CPUs may be shut down. */
71 * We assume all CPUs may be shut down. 71 return true;
72 * This would be the hook to use for eventual Secure
73 * OS migration requests as described in the PSCI spec.
74 */
75 return 0;
76} 72}
77 73
78static void mcpm_cpu_die(unsigned int cpu) 74static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
92 .smp_secondary_init = mcpm_secondary_init, 88 .smp_secondary_init = mcpm_secondary_init,
93#ifdef CONFIG_HOTPLUG_CPU 89#ifdef CONFIG_HOTPLUG_CPU
94 .cpu_kill = mcpm_cpu_kill, 90 .cpu_kill = mcpm_cpu_kill,
95 .cpu_disable = mcpm_cpu_disable, 91 .cpu_can_disable = mcpm_cpu_can_disable,
96 .cpu_die = mcpm_cpu_die, 92 .cpu_die = mcpm_cpu_die,
97#endif 93#endif
98}; 94};
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 30b3bc1666d2..be648eb47cd9 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += irq_regs.h
12generic-y += kdebug.h 12generic-y += kdebug.h
13generic-y += local.h 13generic-y += local.h
14generic-y += local64.h 14generic-y += local64.h
15generic-y += mcs_spinlock.h
16generic-y += mm-arch-hooks.h 15generic-y += mm-arch-hooks.h
17generic-y += msgbuf.h 16generic-y += msgbuf.h
18generic-y += param.h 17generic-y += param.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe57279c66..7bbf325a4f31 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -108,33 +108,37 @@
108 .endm 108 .endm
109#endif 109#endif
110 110
111 .macro asm_trace_hardirqs_off 111 .macro asm_trace_hardirqs_off, save=1
112#if defined(CONFIG_TRACE_IRQFLAGS) 112#if defined(CONFIG_TRACE_IRQFLAGS)
113 .if \save
113 stmdb sp!, {r0-r3, ip, lr} 114 stmdb sp!, {r0-r3, ip, lr}
115 .endif
114 bl trace_hardirqs_off 116 bl trace_hardirqs_off
117 .if \save
115 ldmia sp!, {r0-r3, ip, lr} 118 ldmia sp!, {r0-r3, ip, lr}
119 .endif
116#endif 120#endif
117 .endm 121 .endm
118 122
119 .macro asm_trace_hardirqs_on_cond, cond 123 .macro asm_trace_hardirqs_on, cond=al, save=1
120#if defined(CONFIG_TRACE_IRQFLAGS) 124#if defined(CONFIG_TRACE_IRQFLAGS)
121 /* 125 /*
122 * actually the registers should be pushed and pop'd conditionally, but 126 * actually the registers should be pushed and pop'd conditionally, but
123 * after bl the flags are certainly clobbered 127 * after bl the flags are certainly clobbered
124 */ 128 */
129 .if \save
125 stmdb sp!, {r0-r3, ip, lr} 130 stmdb sp!, {r0-r3, ip, lr}
131 .endif
126 bl\cond trace_hardirqs_on 132 bl\cond trace_hardirqs_on
133 .if \save
127 ldmia sp!, {r0-r3, ip, lr} 134 ldmia sp!, {r0-r3, ip, lr}
135 .endif
128#endif 136#endif
129 .endm 137 .endm
130 138
131 .macro asm_trace_hardirqs_on 139 .macro disable_irq, save=1
132 asm_trace_hardirqs_on_cond al
133 .endm
134
135 .macro disable_irq
136 disable_irq_notrace 140 disable_irq_notrace
137 asm_trace_hardirqs_off 141 asm_trace_hardirqs_off \save
138 .endm 142 .endm
139 143
140 .macro enable_irq 144 .macro enable_irq
@@ -173,7 +177,7 @@
173 177
174 .macro restore_irqs, oldcpsr 178 .macro restore_irqs, oldcpsr
175 tst \oldcpsr, #PSR_I_BIT 179 tst \oldcpsr, #PSR_I_BIT
176 asm_trace_hardirqs_on_cond eq 180 asm_trace_hardirqs_on cond=eq
177 restore_irqs_notrace \oldcpsr 181 restore_irqs_notrace \oldcpsr
178 .endm 182 .endm
179 183
@@ -445,6 +449,53 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
445#endif 449#endif
446 .endm 450 .endm
447 451
452 .macro uaccess_disable, tmp, isb=1
453#ifdef CONFIG_CPU_SW_DOMAIN_PAN
454 /*
455 * Whenever we re-enter userspace, the domains should always be
456 * set appropriately.
457 */
458 mov \tmp, #DACR_UACCESS_DISABLE
459 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
460 .if \isb
461 instr_sync
462 .endif
463#endif
464 .endm
465
466 .macro uaccess_enable, tmp, isb=1
467#ifdef CONFIG_CPU_SW_DOMAIN_PAN
468 /*
469 * Whenever we re-enter userspace, the domains should always be
470 * set appropriately.
471 */
472 mov \tmp, #DACR_UACCESS_ENABLE
473 mcr p15, 0, \tmp, c3, c0, 0
474 .if \isb
475 instr_sync
476 .endif
477#endif
478 .endm
479
480 .macro uaccess_save, tmp
481#ifdef CONFIG_CPU_SW_DOMAIN_PAN
482 mrc p15, 0, \tmp, c3, c0, 0
483 str \tmp, [sp, #S_FRAME_SIZE]
484#endif
485 .endm
486
487 .macro uaccess_restore
488#ifdef CONFIG_CPU_SW_DOMAIN_PAN
489 ldr r0, [sp, #S_FRAME_SIZE]
490 mcr p15, 0, r0, c3, c0, 0
491#endif
492 .endm
493
494 .macro uaccess_save_and_disable, tmp
495 uaccess_save \tmp
496 uaccess_disable \tmp
497 .endm
498
448 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 499 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
449 .macro ret\c, reg 500 .macro ret\c, reg
450#if __LINUX_ARM_ARCH__ < 6 501#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 70393574e0fa..3ff5642d9788 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,7 +2,6 @@
2#define __ASM_BARRIER_H 2#define __ASM_BARRIER_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <asm/outercache.h>
6 5
7#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 6#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
8 7
@@ -37,12 +36,20 @@
37#define dmb(x) __asm__ __volatile__ ("" : : : "memory") 36#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
38#endif 37#endif
39 38
39#ifdef CONFIG_ARM_HEAVY_MB
40extern void (*soc_mb)(void);
41extern void arm_heavy_mb(void);
42#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
43#else
44#define __arm_heavy_mb(x...) dsb(x)
45#endif
46
40#ifdef CONFIG_ARCH_HAS_BARRIERS 47#ifdef CONFIG_ARCH_HAS_BARRIERS
41#include <mach/barriers.h> 48#include <mach/barriers.h>
42#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 49#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
43#define mb() do { dsb(); outer_sync(); } while (0) 50#define mb() __arm_heavy_mb()
44#define rmb() dsb() 51#define rmb() dsb()
45#define wmb() do { dsb(st); outer_sync(); } while (0) 52#define wmb() __arm_heavy_mb(st)
46#define dma_rmb() dmb(osh) 53#define dma_rmb() dmb(osh)
47#define dma_wmb() dmb(oshst) 54#define dma_wmb() dmb(oshst)
48#else 55#else
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 56380995f4c3..e943e6cee254 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
35static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) 35static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 unsigned long mask = 1UL << (bit & 31); 38 unsigned long mask = BIT_MASK(bit);
39 39
40 p += bit >> 5; 40 p += BIT_WORD(bit);
41 41
42 raw_local_irq_save(flags); 42 raw_local_irq_save(flags);
43 *p |= mask; 43 *p |= mask;
@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
47static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) 47static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
48{ 48{
49 unsigned long flags; 49 unsigned long flags;
50 unsigned long mask = 1UL << (bit & 31); 50 unsigned long mask = BIT_MASK(bit);
51 51
52 p += bit >> 5; 52 p += BIT_WORD(bit);
53 53
54 raw_local_irq_save(flags); 54 raw_local_irq_save(flags);
55 *p &= ~mask; 55 *p &= ~mask;
@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
59static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) 59static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
60{ 60{
61 unsigned long flags; 61 unsigned long flags;
62 unsigned long mask = 1UL << (bit & 31); 62 unsigned long mask = BIT_MASK(bit);
63 63
64 p += bit >> 5; 64 p += BIT_WORD(bit);
65 65
66 raw_local_irq_save(flags); 66 raw_local_irq_save(flags);
67 *p ^= mask; 67 *p ^= mask;
@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 unsigned int res; 75 unsigned int res;
76 unsigned long mask = 1UL << (bit & 31); 76 unsigned long mask = BIT_MASK(bit);
77 77
78 p += bit >> 5; 78 p += BIT_WORD(bit);
79 79
80 raw_local_irq_save(flags); 80 raw_local_irq_save(flags);
81 res = *p; 81 res = *p;
@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
90{ 90{
91 unsigned long flags; 91 unsigned long flags;
92 unsigned int res; 92 unsigned int res;
93 unsigned long mask = 1UL << (bit & 31); 93 unsigned long mask = BIT_MASK(bit);
94 94
95 p += bit >> 5; 95 p += BIT_WORD(bit);
96 96
97 raw_local_irq_save(flags); 97 raw_local_irq_save(flags);
98 res = *p; 98 res = *p;
@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
107{ 107{
108 unsigned long flags; 108 unsigned long flags;
109 unsigned int res; 109 unsigned int res;
110 unsigned long mask = 1UL << (bit & 31); 110 unsigned long mask = BIT_MASK(bit);
111 111
112 p += bit >> 5; 112 p += BIT_WORD(bit);
113 113
114 raw_local_irq_save(flags); 114 raw_local_irq_save(flags);
115 res = *p; 115 res = *p;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda8fd17..d5525bfc7e3e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
140 * is visible to DMA, or data written by DMA to system memory is 140 * is visible to DMA, or data written by DMA to system memory is
141 * visible to the CPU. 141 * visible to the CPU.
142 */ 142 */
143#define dmac_map_area cpu_cache.dma_map_area
144#define dmac_unmap_area cpu_cache.dma_unmap_area
145#define dmac_flush_range cpu_cache.dma_flush_range 143#define dmac_flush_range cpu_cache.dma_flush_range
146 144
147#else 145#else
@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
161 * is visible to DMA, or data written by DMA to system memory is 159 * is visible to DMA, or data written by DMA to system memory is
162 * visible to the CPU. 160 * visible to the CPU.
163 */ 161 */
164extern void dmac_map_area(const void *, size_t, int);
165extern void dmac_unmap_area(const void *, size_t, int);
166extern void dmac_flush_range(const void *, const void *); 162extern void dmac_flush_range(const void *, const void *);
167 163
168#endif 164#endif
@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
506void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, 502void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
507 void *kaddr, unsigned long len); 503 void *kaddr, unsigned long len);
508 504
505/**
506 * secure_flush_area - ensure coherency across the secure boundary
507 * @addr: virtual address
508 * @size: size of region
509 *
510 * Ensure that the specified area of memory is coherent across the secure
511 * boundary from the non-secure side. This is used when calling secure
512 * firmware where the secure firmware does not ensure coherency.
513 */
514static inline void secure_flush_area(const void *addr, size_t size)
515{
516 phys_addr_t phys = __pa(addr);
517
518 __cpuc_flush_dcache_area((void *)addr, size);
519 outer_flush_range(phys, phys + size);
520}
521
509#endif 522#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d37ec7..a68b9d8a71fe 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,7 +14,7 @@
14#include <xen/xen.h> 14#include <xen/xen.h>
15#include <asm/xen/hypervisor.h> 15#include <asm/xen/hypervisor.h>
16 16
17#define DMA_ERROR_CODE (~0) 17#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
18extern struct dma_map_ops arm_dma_ops; 18extern struct dma_map_ops arm_dma_ops;
19extern struct dma_map_ops arm_coherent_dma_ops; 19extern struct dma_map_ops arm_coherent_dma_ops;
20 20
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..e878129f2fee 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,15 +34,14 @@
34 */ 34 */
35#ifndef CONFIG_IO_36 35#ifndef CONFIG_IO_36
36#define DOMAIN_KERNEL 0 36#define DOMAIN_KERNEL 0
37#define DOMAIN_TABLE 0
38#define DOMAIN_USER 1 37#define DOMAIN_USER 1
39#define DOMAIN_IO 2 38#define DOMAIN_IO 2
40#else 39#else
41#define DOMAIN_KERNEL 2 40#define DOMAIN_KERNEL 2
42#define DOMAIN_TABLE 2
43#define DOMAIN_USER 1 41#define DOMAIN_USER 1
44#define DOMAIN_IO 0 42#define DOMAIN_IO 0
45#endif 43#endif
44#define DOMAIN_VECTORS 3
46 45
47/* 46/*
48 * Domain types 47 * Domain types
@@ -55,11 +54,46 @@
55#define DOMAIN_MANAGER 1 54#define DOMAIN_MANAGER 1
56#endif 55#endif
57 56
58#define domain_val(dom,type) ((type) << (2*(dom))) 57#define domain_mask(dom) ((3) << (2 * (dom)))
58#define domain_val(dom,type) ((type) << (2 * (dom)))
59
60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61#define DACR_INIT \
62 (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
63 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
64 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
65 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
66#else
67#define DACR_INIT \
68 (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
69 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
70 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
71 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
72#endif
73
74#define __DACR_DEFAULT \
75 domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
76 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
77 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
78
79#define DACR_UACCESS_DISABLE \
80 (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
81#define DACR_UACCESS_ENABLE \
82 (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
59 83
60#ifndef __ASSEMBLY__ 84#ifndef __ASSEMBLY__
61 85
62#ifdef CONFIG_CPU_USE_DOMAINS 86static inline unsigned int get_domain(void)
87{
88 unsigned int domain;
89
90 asm(
91 "mrc p15, 0, %0, c3, c0 @ get domain"
92 : "=r" (domain));
93
94 return domain;
95}
96
63static inline void set_domain(unsigned val) 97static inline void set_domain(unsigned val)
64{ 98{
65 asm volatile( 99 asm volatile(
@@ -68,17 +102,16 @@ static inline void set_domain(unsigned val)
68 isb(); 102 isb();
69} 103}
70 104
105#ifdef CONFIG_CPU_USE_DOMAINS
71#define modify_domain(dom,type) \ 106#define modify_domain(dom,type) \
72 do { \ 107 do { \
73 struct thread_info *thread = current_thread_info(); \ 108 unsigned int domain = get_domain(); \
74 unsigned int domain = thread->cpu_domain; \ 109 domain &= ~domain_mask(dom); \
75 domain &= ~domain_val(dom, DOMAIN_MANAGER); \ 110 domain = domain | domain_val(dom, type); \
76 thread->cpu_domain = domain | domain_val(dom, type); \ 111 set_domain(domain); \
77 set_domain(thread->cpu_domain); \
78 } while (0) 112 } while (0)
79 113
80#else 114#else
81static inline void set_domain(unsigned val) { }
82static inline void modify_domain(unsigned dom, unsigned type) { } 115static inline void modify_domain(unsigned dom, unsigned type) { }
83#endif 116#endif
84 117
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae1df27..58cfe9f1a687 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
6#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) 6#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
7 7
8#include <asm/kmap_types.h> 8#include <asm/kmap_types.h>
9#include <asm/pgtable.h>
9 10
10enum fixed_addresses { 11enum fixed_addresses {
11 FIX_KMAP_BEGIN, 12 FIX_EARLYCON_MEM_BASE,
13 __end_of_permanent_fixed_addresses,
14
15 FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
12 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 16 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
13 17
14 /* Support writing RO kernel text via kprobes, jump labels, etc. */ 18 /* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@ enum fixed_addresses {
18 __end_of_fixed_addresses 22 __end_of_fixed_addresses
19}; 23};
20 24
25#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
26
27#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
28
29/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
30#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
31#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
32
21void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); 33void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
34void __init early_fixmap_init(void);
22 35
23#include <asm-generic/fixmap.h> 36#include <asm-generic/fixmap.h>
24 37
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed82809d82..6795368ad023 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
22#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
23 23
24#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 24#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
25({ \
26 unsigned int __ua_flags; \
25 smp_mb(); \ 27 smp_mb(); \
26 prefetchw(uaddr); \ 28 prefetchw(uaddr); \
29 __ua_flags = uaccess_save_and_enable(); \
27 __asm__ __volatile__( \ 30 __asm__ __volatile__( \
28 "1: ldrex %1, [%3]\n" \ 31 "1: ldrex %1, [%3]\n" \
29 " " insn "\n" \ 32 " " insn "\n" \
@@ -34,12 +37,15 @@
34 __futex_atomic_ex_table("%5") \ 37 __futex_atomic_ex_table("%5") \
35 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ 38 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
36 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 39 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
37 : "cc", "memory") 40 : "cc", "memory"); \
41 uaccess_restore(__ua_flags); \
42})
38 43
39static inline int 44static inline int
40futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 45futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
41 u32 oldval, u32 newval) 46 u32 oldval, u32 newval)
42{ 47{
48 unsigned int __ua_flags;
43 int ret; 49 int ret;
44 u32 val; 50 u32 val;
45 51
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
49 smp_mb(); 55 smp_mb();
50 /* Prefetching cannot fault */ 56 /* Prefetching cannot fault */
51 prefetchw(uaddr); 57 prefetchw(uaddr);
58 __ua_flags = uaccess_save_and_enable();
52 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 59 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
53 "1: ldrex %1, [%4]\n" 60 "1: ldrex %1, [%4]\n"
54 " teq %1, %2\n" 61 " teq %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
61 : "=&r" (ret), "=&r" (val) 68 : "=&r" (ret), "=&r" (val)
62 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) 69 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
63 : "cc", "memory"); 70 : "cc", "memory");
71 uaccess_restore(__ua_flags);
64 smp_mb(); 72 smp_mb();
65 73
66 *uval = val; 74 *uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
73#include <asm/domain.h> 81#include <asm/domain.h>
74 82
75#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 83#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
84({ \
85 unsigned int __ua_flags = uaccess_save_and_enable(); \
76 __asm__ __volatile__( \ 86 __asm__ __volatile__( \
77 "1: " TUSER(ldr) " %1, [%3]\n" \ 87 "1: " TUSER(ldr) " %1, [%3]\n" \
78 " " insn "\n" \ 88 " " insn "\n" \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
81 __futex_atomic_ex_table("%5") \ 91 __futex_atomic_ex_table("%5") \
82 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ 92 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
83 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 93 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
84 : "cc", "memory") 94 : "cc", "memory"); \
95 uaccess_restore(__ua_flags); \
96})
85 97
86static inline int 98static inline int
87futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 99futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
88 u32 oldval, u32 newval) 100 u32 oldval, u32 newval)
89{ 101{
102 unsigned int __ua_flags;
90 int ret = 0; 103 int ret = 0;
91 u32 val; 104 u32 val;
92 105
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
94 return -EFAULT; 107 return -EFAULT;
95 108
96 preempt_disable(); 109 preempt_disable();
110 __ua_flags = uaccess_save_and_enable();
97 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 111 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
98 "1: " TUSER(ldr) " %1, [%4]\n" 112 "1: " TUSER(ldr) " %1, [%4]\n"
99 " teq %1, %2\n" 113 " teq %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
103 : "+r" (ret), "=&r" (val) 117 : "+r" (ret), "=&r" (val)
104 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) 118 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
105 : "cc", "memory"); 119 : "cc", "memory");
120 uaccess_restore(__ua_flags);
106 121
107 *uval = val; 122 *uval = val;
108 preempt_enable(); 123 preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd5b7c8..cab07f69382d 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
158#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) 158#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
159#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) 159#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
160 160
161#define dmac_map_area __glue(_CACHE,_dma_map_area)
162#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
163#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 161#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
164#endif 162#endif
165 163
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 563b92fc2f41..c2bf24f40177 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
129 129
130#endif 130#endif
131 131
132#ifdef CONFIG_OUTER_CACHE_SYNC
133/**
134 * outer_sync - perform a sync point for outer cache
135 *
136 * Ensure that all outer cache operations are complete and any store
137 * buffers are drained.
138 */
139static inline void outer_sync(void)
140{
141 if (outer_cache.sync)
142 outer_cache.sync();
143}
144#else
145static inline void outer_sync(void)
146{ }
147#endif
148
149#endif /* __ASM_OUTERCACHE_H */ 132#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278e953e..d0131ee6f6af 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
23#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ 23#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
24#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) 24#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
25#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) 25#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
26#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
26#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ 27#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
27/* 28/*
28 * - section 29 * - section
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index c25ef3ec6d1f..68ee3ce17b82 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -14,34 +14,11 @@
14#ifndef __ASM_ARM_PSCI_H 14#ifndef __ASM_ARM_PSCI_H
15#define __ASM_ARM_PSCI_H 15#define __ASM_ARM_PSCI_H
16 16
17#define PSCI_POWER_STATE_TYPE_STANDBY 0
18#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
19
20struct psci_power_state {
21 u16 id;
22 u8 type;
23 u8 affinity_level;
24};
25
26struct psci_operations {
27 int (*cpu_suspend)(struct psci_power_state state,
28 unsigned long entry_point);
29 int (*cpu_off)(struct psci_power_state state);
30 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
31 int (*migrate)(unsigned long cpuid);
32 int (*affinity_info)(unsigned long target_affinity,
33 unsigned long lowest_affinity_level);
34 int (*migrate_info_type)(void);
35};
36
37extern struct psci_operations psci_ops;
38extern struct smp_operations psci_smp_ops; 17extern struct smp_operations psci_smp_ops;
39 18
40#ifdef CONFIG_ARM_PSCI 19#ifdef CONFIG_ARM_PSCI
41int psci_init(void);
42bool psci_smp_available(void); 20bool psci_smp_available(void);
43#else 21#else
44static inline int psci_init(void) { return 0; }
45static inline bool psci_smp_available(void) { return false; } 22static inline bool psci_smp_available(void) { return false; }
46#endif 23#endif
47 24
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1ba6fb4..ef356659b4f4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
74extern int __cpu_disable(void); 74extern int __cpu_disable(void);
75 75
76extern void __cpu_die(unsigned int cpu); 76extern void __cpu_die(unsigned int cpu);
77extern void cpu_die(void);
78 77
79extern void arch_send_call_function_single_ipi(int cpu); 78extern void arch_send_call_function_single_ipi(int cpu);
80extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 79extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@ struct smp_operations {
105#ifdef CONFIG_HOTPLUG_CPU 104#ifdef CONFIG_HOTPLUG_CPU
106 int (*cpu_kill)(unsigned int cpu); 105 int (*cpu_kill)(unsigned int cpu);
107 void (*cpu_die)(unsigned int cpu); 106 void (*cpu_die)(unsigned int cpu);
107 bool (*cpu_can_disable)(unsigned int cpu);
108 int (*cpu_disable)(unsigned int cpu); 108 int (*cpu_disable)(unsigned int cpu);
109#endif 109#endif
110#endif 110#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e5224d8f7..f9080717fc88 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
107extern int platform_can_secondary_boot(void); 107extern int platform_can_secondary_boot(void);
108extern int platform_can_cpu_hotplug(void); 108extern int platform_can_cpu_hotplug(void);
109 109
110#ifdef CONFIG_HOTPLUG_CPU
111extern int platform_can_hotplug_cpu(unsigned int cpu);
112#else
113static inline int platform_can_hotplug_cpu(unsigned int cpu)
114{
115 return 0;
116}
117#endif
118
110#endif 119#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32eded3e50..d0a1119dcaf3 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@ struct thread_info {
74 .flags = 0, \ 74 .flags = 0, \
75 .preempt_count = INIT_PREEMPT_COUNT, \ 75 .preempt_count = INIT_PREEMPT_COUNT, \
76 .addr_limit = KERNEL_DS, \ 76 .addr_limit = KERNEL_DS, \
77 .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
78 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
79 domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
80} 77}
81 78
82#define init_thread_info (init_thread_union.thread_info) 79#define init_thread_info (init_thread_union.thread_info)
@@ -136,22 +133,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
136 133
137/* 134/*
138 * thread information flags: 135 * thread information flags:
139 * TIF_SYSCALL_TRACE - syscall trace active
140 * TIF_SYSCAL_AUDIT - syscall auditing active
141 * TIF_SIGPENDING - signal pending
142 * TIF_NEED_RESCHED - rescheduling necessary
143 * TIF_NOTIFY_RESUME - callback before returning to user
144 * TIF_USEDFPU - FPU was used by this task this quantum (SMP) 136 * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
145 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED 137 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
146 */ 138 */
147#define TIF_SIGPENDING 0 139#define TIF_SIGPENDING 0 /* signal pending */
148#define TIF_NEED_RESCHED 1 140#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
149#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ 141#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
150#define TIF_UPROBE 7 142#define TIF_UPROBE 3 /* breakpointed or singlestepping */
151#define TIF_SYSCALL_TRACE 8 143#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
152#define TIF_SYSCALL_AUDIT 9 144#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
153#define TIF_SYSCALL_TRACEPOINT 10 145#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ 146#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
147
155#define TIF_NOHZ 12 /* in adaptive nohz mode */ 148#define TIF_NOHZ 12 /* in adaptive nohz mode */
156#define TIF_USING_IWMMXT 17 149#define TIF_USING_IWMMXT 17
157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 150#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d09ef7a..8cc85a4ebec2 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@ struct exception_table_entry
50extern int fixup_exception(struct pt_regs *regs); 50extern int fixup_exception(struct pt_regs *regs);
51 51
52/* 52/*
53 * These two functions allow hooking accesses to userspace to increase
54 * system integrity by ensuring that the kernel can not inadvertantly
55 * perform such accesses (eg, via list poison values) which could then
56 * be exploited for priviledge escalation.
57 */
58static inline unsigned int uaccess_save_and_enable(void)
59{
60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61 unsigned int old_domain = get_domain();
62
63 /* Set the current domain access to permit user accesses */
64 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
65 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
66
67 return old_domain;
68#else
69 return 0;
70#endif
71}
72
73static inline void uaccess_restore(unsigned int flags)
74{
75#ifdef CONFIG_CPU_SW_DOMAIN_PAN
76 /* Restore the user access mask */
77 set_domain(flags);
78#endif
79}
80
81/*
53 * These two are intentionally not defined anywhere - if the kernel 82 * These two are intentionally not defined anywhere - if the kernel
54 * code generates any references to them, that's a bug. 83 * code generates any references to them, that's a bug.
55 */ 84 */
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
165 register typeof(x) __r2 asm("r2"); \ 194 register typeof(x) __r2 asm("r2"); \
166 register unsigned long __l asm("r1") = __limit; \ 195 register unsigned long __l asm("r1") = __limit; \
167 register int __e asm("r0"); \ 196 register int __e asm("r0"); \
197 unsigned int __ua_flags = uaccess_save_and_enable(); \
168 switch (sizeof(*(__p))) { \ 198 switch (sizeof(*(__p))) { \
169 case 1: \ 199 case 1: \
170 if (sizeof((x)) >= 8) \ 200 if (sizeof((x)) >= 8) \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
192 break; \ 222 break; \
193 default: __e = __get_user_bad(); break; \ 223 default: __e = __get_user_bad(); break; \
194 } \ 224 } \
225 uaccess_restore(__ua_flags); \
195 x = (typeof(*(p))) __r2; \ 226 x = (typeof(*(p))) __r2; \
196 __e; \ 227 __e; \
197 }) 228 })
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
224 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ 255 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
225 register unsigned long __l asm("r1") = __limit; \ 256 register unsigned long __l asm("r1") = __limit; \
226 register int __e asm("r0"); \ 257 register int __e asm("r0"); \
258 unsigned int __ua_flags = uaccess_save_and_enable(); \
227 switch (sizeof(*(__p))) { \ 259 switch (sizeof(*(__p))) { \
228 case 1: \ 260 case 1: \
229 __put_user_x(__r2, __p, __e, __l, 1); \ 261 __put_user_x(__r2, __p, __e, __l, 1); \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
239 break; \ 271 break; \
240 default: __e = __put_user_bad(); break; \ 272 default: __e = __put_user_bad(); break; \
241 } \ 273 } \
274 uaccess_restore(__ua_flags); \
242 __e; \ 275 __e; \
243 }) 276 })
244 277
@@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs)
300do { \ 333do { \
301 unsigned long __gu_addr = (unsigned long)(ptr); \ 334 unsigned long __gu_addr = (unsigned long)(ptr); \
302 unsigned long __gu_val; \ 335 unsigned long __gu_val; \
336 unsigned int __ua_flags; \
303 __chk_user_ptr(ptr); \ 337 __chk_user_ptr(ptr); \
304 might_fault(); \ 338 might_fault(); \
339 __ua_flags = uaccess_save_and_enable(); \
305 switch (sizeof(*(ptr))) { \ 340 switch (sizeof(*(ptr))) { \
306 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ 341 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
307 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ 342 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
308 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ 343 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
309 default: (__gu_val) = __get_user_bad(); \ 344 default: (__gu_val) = __get_user_bad(); \
310 } \ 345 } \
346 uaccess_restore(__ua_flags); \
311 (x) = (__typeof__(*(ptr)))__gu_val; \ 347 (x) = (__typeof__(*(ptr)))__gu_val; \
312} while (0) 348} while (0)
313 349
314#define __get_user_asm_byte(x, addr, err) \ 350#define __get_user_asm(x, addr, err, instr) \
315 __asm__ __volatile__( \ 351 __asm__ __volatile__( \
316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \ 352 "1: " TUSER(instr) " %1, [%2], #0\n" \
317 "2:\n" \ 353 "2:\n" \
318 " .pushsection .text.fixup,\"ax\"\n" \ 354 " .pushsection .text.fixup,\"ax\"\n" \
319 " .align 2\n" \ 355 " .align 2\n" \
@@ -329,6 +365,9 @@ do { \
329 : "r" (addr), "i" (-EFAULT) \ 365 : "r" (addr), "i" (-EFAULT) \
330 : "cc") 366 : "cc")
331 367
368#define __get_user_asm_byte(x, addr, err) \
369 __get_user_asm(x, addr, err, ldrb)
370
332#ifndef __ARMEB__ 371#ifndef __ARMEB__
333#define __get_user_asm_half(x, __gu_addr, err) \ 372#define __get_user_asm_half(x, __gu_addr, err) \
334({ \ 373({ \
@@ -348,22 +387,7 @@ do { \
348#endif 387#endif
349 388
350#define __get_user_asm_word(x, addr, err) \ 389#define __get_user_asm_word(x, addr, err) \
351 __asm__ __volatile__( \ 390 __get_user_asm(x, addr, err, ldr)
352 "1: " TUSER(ldr) " %1,[%2],#0\n" \
353 "2:\n" \
354 " .pushsection .text.fixup,\"ax\"\n" \
355 " .align 2\n" \
356 "3: mov %0, %3\n" \
357 " mov %1, #0\n" \
358 " b 2b\n" \
359 " .popsection\n" \
360 " .pushsection __ex_table,\"a\"\n" \
361 " .align 3\n" \
362 " .long 1b, 3b\n" \
363 " .popsection" \
364 : "+r" (err), "=&r" (x) \
365 : "r" (addr), "i" (-EFAULT) \
366 : "cc")
367 391
368#define __put_user(x, ptr) \ 392#define __put_user(x, ptr) \
369({ \ 393({ \
@@ -381,9 +405,11 @@ do { \
381#define __put_user_err(x, ptr, err) \ 405#define __put_user_err(x, ptr, err) \
382do { \ 406do { \
383 unsigned long __pu_addr = (unsigned long)(ptr); \ 407 unsigned long __pu_addr = (unsigned long)(ptr); \
408 unsigned int __ua_flags; \
384 __typeof__(*(ptr)) __pu_val = (x); \ 409 __typeof__(*(ptr)) __pu_val = (x); \
385 __chk_user_ptr(ptr); \ 410 __chk_user_ptr(ptr); \
386 might_fault(); \ 411 might_fault(); \
412 __ua_flags = uaccess_save_and_enable(); \
387 switch (sizeof(*(ptr))) { \ 413 switch (sizeof(*(ptr))) { \
388 case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ 414 case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
389 case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ 415 case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
@@ -391,11 +417,12 @@ do { \
391 case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ 417 case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
392 default: __put_user_bad(); \ 418 default: __put_user_bad(); \
393 } \ 419 } \
420 uaccess_restore(__ua_flags); \
394} while (0) 421} while (0)
395 422
396#define __put_user_asm_byte(x, __pu_addr, err) \ 423#define __put_user_asm(x, __pu_addr, err, instr) \
397 __asm__ __volatile__( \ 424 __asm__ __volatile__( \
398 "1: " TUSER(strb) " %1,[%2],#0\n" \ 425 "1: " TUSER(instr) " %1, [%2], #0\n" \
399 "2:\n" \ 426 "2:\n" \
400 " .pushsection .text.fixup,\"ax\"\n" \ 427 " .pushsection .text.fixup,\"ax\"\n" \
401 " .align 2\n" \ 428 " .align 2\n" \
@@ -410,6 +437,9 @@ do { \
410 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 437 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
411 : "cc") 438 : "cc")
412 439
440#define __put_user_asm_byte(x, __pu_addr, err) \
441 __put_user_asm(x, __pu_addr, err, strb)
442
413#ifndef __ARMEB__ 443#ifndef __ARMEB__
414#define __put_user_asm_half(x, __pu_addr, err) \ 444#define __put_user_asm_half(x, __pu_addr, err) \
415({ \ 445({ \
@@ -427,21 +457,7 @@ do { \
427#endif 457#endif
428 458
429#define __put_user_asm_word(x, __pu_addr, err) \ 459#define __put_user_asm_word(x, __pu_addr, err) \
430 __asm__ __volatile__( \ 460 __put_user_asm(x, __pu_addr, err, str)
431 "1: " TUSER(str) " %1,[%2],#0\n" \
432 "2:\n" \
433 " .pushsection .text.fixup,\"ax\"\n" \
434 " .align 2\n" \
435 "3: mov %0, %3\n" \
436 " b 2b\n" \
437 " .popsection\n" \
438 " .pushsection __ex_table,\"a\"\n" \
439 " .align 3\n" \
440 " .long 1b, 3b\n" \
441 " .popsection" \
442 : "+r" (err) \
443 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
444 : "cc")
445 461
446#ifndef __ARMEB__ 462#ifndef __ARMEB__
447#define __reg_oper0 "%R2" 463#define __reg_oper0 "%R2"
@@ -474,11 +490,46 @@ do { \
474 490
475 491
476#ifdef CONFIG_MMU 492#ifdef CONFIG_MMU
477extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); 493extern unsigned long __must_check
478extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); 494arm_copy_from_user(void *to, const void __user *from, unsigned long n);
479extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); 495
480extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 496static inline unsigned long __must_check
481extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); 497__copy_from_user(void *to, const void __user *from, unsigned long n)
498{
499 unsigned int __ua_flags = uaccess_save_and_enable();
500 n = arm_copy_from_user(to, from, n);
501 uaccess_restore(__ua_flags);
502 return n;
503}
504
505extern unsigned long __must_check
506arm_copy_to_user(void __user *to, const void *from, unsigned long n);
507extern unsigned long __must_check
508__copy_to_user_std(void __user *to, const void *from, unsigned long n);
509
510static inline unsigned long __must_check
511__copy_to_user(void __user *to, const void *from, unsigned long n)
512{
513 unsigned int __ua_flags = uaccess_save_and_enable();
514 n = arm_copy_to_user(to, from, n);
515 uaccess_restore(__ua_flags);
516 return n;
517}
518
519extern unsigned long __must_check
520arm_clear_user(void __user *addr, unsigned long n);
521extern unsigned long __must_check
522__clear_user_std(void __user *addr, unsigned long n);
523
524static inline unsigned long __must_check
525__clear_user(void __user *addr, unsigned long n)
526{
527 unsigned int __ua_flags = uaccess_save_and_enable();
528 n = arm_clear_user(addr, n);
529 uaccess_restore(__ua_flags);
530 return n;
531}
532
482#else 533#else
483#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) 534#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
484#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) 535#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
511 return n; 562 return n;
512} 563}
513 564
565/* These are from lib/ code, and use __get_user() and friends */
514extern long strncpy_from_user(char *dest, const char __user *src, long count); 566extern long strncpy_from_user(char *dest, const char __user *src, long count);
515 567
516extern __must_check long strlen_user(const char __user *str); 568extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e69f7a19735d..af9e59bf3831 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -71,8 +71,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
71obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o 71obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
72obj-$(CONFIG_IWMMXT) += iwmmxt.o 72obj-$(CONFIG_IWMMXT) += iwmmxt.o
73obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 73obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
74obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o \ 74obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
75 perf_event_xscale.o perf_event_v6.o \
76 perf_event_v7.o 75 perf_event_v7.o
77CFLAGS_pj4-cp0.o := -marm 76CFLAGS_pj4-cp0.o := -marm
78AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 77AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
@@ -89,7 +88,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
89 88
90obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 89obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
91ifeq ($(CONFIG_ARM_PSCI),y) 90ifeq ($(CONFIG_ARM_PSCI),y)
92obj-y += psci.o psci-call.o 91obj-y += psci-call.o
93obj-$(CONFIG_SMP) += psci_smp.o 92obj-$(CONFIG_SMP) += psci_smp.o
94endif 93endif
95 94
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5e5a51a99e68..f89811fb9a55 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
97#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
98EXPORT_SYMBOL(copy_page); 98EXPORT_SYMBOL(copy_page);
99 99
100EXPORT_SYMBOL(__copy_from_user); 100EXPORT_SYMBOL(arm_copy_from_user);
101EXPORT_SYMBOL(__copy_to_user); 101EXPORT_SYMBOL(arm_copy_to_user);
102EXPORT_SYMBOL(__clear_user); 102EXPORT_SYMBOL(arm_clear_user);
103 103
104EXPORT_SYMBOL(__get_user_1); 104EXPORT_SYMBOL(__get_user_1);
105EXPORT_SYMBOL(__get_user_2); 105EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cb4fb1e69778..3e1c26eb32b4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
149#define SPFIX(code...) 149#define SPFIX(code...)
150#endif 150#endif
151 151
152 .macro svc_entry, stack_hole=0, trace=1 152 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
153 UNWIND(.fnstart ) 153 UNWIND(.fnstart )
154 UNWIND(.save {r0 - pc} ) 154 UNWIND(.save {r0 - pc} )
155 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 155 sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
156#ifdef CONFIG_THUMB2_KERNEL 156#ifdef CONFIG_THUMB2_KERNEL
157 SPFIX( str r0, [sp] ) @ temporarily saved 157 SPFIX( str r0, [sp] ) @ temporarily saved
158 SPFIX( mov r0, sp ) 158 SPFIX( mov r0, sp )
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
167 ldmia r0, {r3 - r5} 167 ldmia r0, {r3 - r5}
168 add r7, sp, #S_SP - 4 @ here for interlock avoidance 168 add r7, sp, #S_SP - 4 @ here for interlock avoidance
169 mov r6, #-1 @ "" "" "" "" 169 mov r6, #-1 @ "" "" "" ""
170 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) 170 add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
171 SPFIX( addeq r2, r2, #4 ) 171 SPFIX( addeq r2, r2, #4 )
172 str r3, [sp, #-4]! @ save the "real" r0 copied 172 str r3, [sp, #-4]! @ save the "real" r0 copied
173 @ from the exception stack 173 @ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
185 @ 185 @
186 stmia r7, {r2 - r6} 186 stmia r7, {r2 - r6}
187 187
188 uaccess_save r0
189 .if \uaccess
190 uaccess_disable r0
191 .endif
192
188 .if \trace 193 .if \trace
189#ifdef CONFIG_TRACE_IRQFLAGS 194#ifdef CONFIG_TRACE_IRQFLAGS
190 bl trace_hardirqs_off 195 bl trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
194 199
195 .align 5 200 .align 5
196__dabt_svc: 201__dabt_svc:
197 svc_entry 202 svc_entry uaccess=0
198 mov r2, sp 203 mov r2, sp
199 dabt_helper 204 dabt_helper
200 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR 205 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
368#error "sizeof(struct pt_regs) must be a multiple of 8" 373#error "sizeof(struct pt_regs) must be a multiple of 8"
369#endif 374#endif
370 375
371 .macro usr_entry, trace=1 376 .macro usr_entry, trace=1, uaccess=1
372 UNWIND(.fnstart ) 377 UNWIND(.fnstart )
373 UNWIND(.cantunwind ) @ don't unwind the user space 378 UNWIND(.cantunwind ) @ don't unwind the user space
374 sub sp, sp, #S_FRAME_SIZE 379 sub sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
400 ARM( stmdb r0, {sp, lr}^ ) 405 ARM( stmdb r0, {sp, lr}^ )
401 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 406 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
402 407
408 .if \uaccess
409 uaccess_disable ip
410 .endif
411
403 @ Enable the alignment trap while in kernel mode 412 @ Enable the alignment trap while in kernel mode
404 ATRAP( teq r8, r7) 413 ATRAP( teq r8, r7)
405 ATRAP( mcrne p15, 0, r8, c1, c0, 0) 414 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
435 444
436 .align 5 445 .align 5
437__dabt_usr: 446__dabt_usr:
438 usr_entry 447 usr_entry uaccess=0
439 kuser_cmpxchg_check 448 kuser_cmpxchg_check
440 mov r2, sp 449 mov r2, sp
441 dabt_helper 450 dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
458 467
459 .align 5 468 .align 5
460__und_usr: 469__und_usr:
461 usr_entry 470 usr_entry uaccess=0
462 471
463 mov r2, r4 472 mov r2, r4
464 mov r3, r5 473 mov r3, r5
@@ -484,6 +493,8 @@ __und_usr:
4841: ldrt r0, [r4] 4931: ldrt r0, [r4]
485 ARM_BE8(rev r0, r0) @ little endian instruction 494 ARM_BE8(rev r0, r0) @ little endian instruction
486 495
496 uaccess_disable ip
497
487 @ r0 = 32-bit ARM instruction which caused the exception 498 @ r0 = 32-bit ARM instruction which caused the exception
488 @ r2 = PC value for the following instruction (:= regs->ARM_pc) 499 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
489 @ r4 = PC value for the faulting instruction 500 @ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
5182: ldrht r5, [r4] 5292: ldrht r5, [r4]
519ARM_BE8(rev16 r5, r5) @ little endian instruction 530ARM_BE8(rev16 r5, r5) @ little endian instruction
520 cmp r5, #0xe800 @ 32bit instruction if xx != 0 531 cmp r5, #0xe800 @ 32bit instruction if xx != 0
521 blo __und_usr_fault_16 @ 16bit undefined instruction 532 blo __und_usr_fault_16_pan @ 16bit undefined instruction
5223: ldrht r0, [r2] 5333: ldrht r0, [r2]
523ARM_BE8(rev16 r0, r0) @ little endian instruction 534ARM_BE8(rev16 r0, r0) @ little endian instruction
535 uaccess_disable ip
524 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 536 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
525 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update 537 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
526 orr r0, r0, r5, lsl #16 538 orr r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
715__und_usr_fault_32: 727__und_usr_fault_32:
716 mov r1, #4 728 mov r1, #4
717 b 1f 729 b 1f
730__und_usr_fault_16_pan:
731 uaccess_disable ip
718__und_usr_fault_16: 732__und_usr_fault_16:
719 mov r1, #2 733 mov r1, #2
7201: mov r0, sp 7341: mov r0, sp
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
770 ldr r4, [r2, #TI_TP_VALUE] 784 ldr r4, [r2, #TI_TP_VALUE]
771 ldr r5, [r2, #TI_TP_VALUE + 4] 785 ldr r5, [r2, #TI_TP_VALUE + 4]
772#ifdef CONFIG_CPU_USE_DOMAINS 786#ifdef CONFIG_CPU_USE_DOMAINS
787 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
788 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
773 ldr r6, [r2, #TI_CPU_DOMAIN] 789 ldr r6, [r2, #TI_CPU_DOMAIN]
774#endif 790#endif
775 switch_tls r1, r4, r5, r3, r7 791 switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b48dd4f37f80..30a7228eaceb 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -24,35 +24,55 @@
24 24
25 25
26 .align 5 26 .align 5
27#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
27/* 28/*
28 * This is the fast syscall return path. We do as little as 29 * This is the fast syscall return path. We do as little as possible here,
29 * possible here, and this includes saving r0 back into the SVC 30 * such as avoiding writing r0 to the stack. We only use this path if we
30 * stack. 31 * have tracing and context tracking disabled - the overheads from those
32 * features make this path too inefficient.
31 */ 33 */
32ret_fast_syscall: 34ret_fast_syscall:
33 UNWIND(.fnstart ) 35 UNWIND(.fnstart )
34 UNWIND(.cantunwind ) 36 UNWIND(.cantunwind )
35 disable_irq @ disable interrupts 37 disable_irq_notrace @ disable interrupts
36 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 38 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
37 tst r1, #_TIF_SYSCALL_WORK 39 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
38 bne __sys_trace_return
39 tst r1, #_TIF_WORK_MASK
40 bne fast_work_pending 40 bne fast_work_pending
41 asm_trace_hardirqs_on
42 41
43 /* perform architecture specific actions before user return */ 42 /* perform architecture specific actions before user return */
44 arch_ret_to_user r1, lr 43 arch_ret_to_user r1, lr
45 ct_user_enter
46 44
47 restore_user_regs fast = 1, offset = S_OFF 45 restore_user_regs fast = 1, offset = S_OFF
48 UNWIND(.fnend ) 46 UNWIND(.fnend )
47ENDPROC(ret_fast_syscall)
49 48
50/* 49 /* Ok, we need to do extra processing, enter the slow path. */
51 * Ok, we need to do extra processing, enter the slow path.
52 */
53fast_work_pending: 50fast_work_pending:
54 str r0, [sp, #S_R0+S_OFF]! @ returned r0 51 str r0, [sp, #S_R0+S_OFF]! @ returned r0
55work_pending: 52 /* fall through to work_pending */
53#else
54/*
55 * The "replacement" ret_fast_syscall for when tracing or context tracking
56 * is enabled. As we will need to call out to some C functions, we save
57 * r0 first to avoid needing to save registers around each C function call.
58 */
59ret_fast_syscall:
60 UNWIND(.fnstart )
61 UNWIND(.cantunwind )
62 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
63 disable_irq_notrace @ disable interrupts
64 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
65 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
66 beq no_work_pending
67 UNWIND(.fnend )
68ENDPROC(ret_fast_syscall)
69
70 /* Slower path - fall through to work_pending */
71#endif
72
73 tst r1, #_TIF_SYSCALL_WORK
74 bne __sys_trace_return_nosave
75slow_work_pending:
56 mov r0, sp @ 'regs' 76 mov r0, sp @ 'regs'
57 mov r2, why @ 'syscall' 77 mov r2, why @ 'syscall'
58 bl do_work_pending 78 bl do_work_pending
@@ -65,16 +85,19 @@ ENDPROC(ret_fast_syscall)
65 85
66/* 86/*
67 * "slow" syscall return path. "why" tells us if this was a real syscall. 87 * "slow" syscall return path. "why" tells us if this was a real syscall.
88 * IRQs may be enabled here, so always disable them. Note that we use the
89 * "notrace" version to avoid calling into the tracing code unnecessarily.
90 * do_work_pending() will update this state if necessary.
68 */ 91 */
69ENTRY(ret_to_user) 92ENTRY(ret_to_user)
70ret_slow_syscall: 93ret_slow_syscall:
71 disable_irq @ disable interrupts 94 disable_irq_notrace @ disable interrupts
72ENTRY(ret_to_user_from_irq) 95ENTRY(ret_to_user_from_irq)
73 ldr r1, [tsk, #TI_FLAGS] 96 ldr r1, [tsk, #TI_FLAGS]
74 tst r1, #_TIF_WORK_MASK 97 tst r1, #_TIF_WORK_MASK
75 bne work_pending 98 bne slow_work_pending
76no_work_pending: 99no_work_pending:
77 asm_trace_hardirqs_on 100 asm_trace_hardirqs_on save = 0
78 101
79 /* perform architecture specific actions before user return */ 102 /* perform architecture specific actions before user return */
80 arch_ret_to_user r1, lr 103 arch_ret_to_user r1, lr
@@ -174,6 +197,8 @@ ENTRY(vector_swi)
174 USER( ldr scno, [lr, #-4] ) @ get SWI instruction 197 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
175#endif 198#endif
176 199
200 uaccess_disable tbl
201
177 adr tbl, sys_call_table @ load syscall table pointer 202 adr tbl, sys_call_table @ load syscall table pointer
178 203
179#if defined(CONFIG_OABI_COMPAT) 204#if defined(CONFIG_OABI_COMPAT)
@@ -252,6 +277,12 @@ __sys_trace_return:
252 bl syscall_trace_exit 277 bl syscall_trace_exit
253 b ret_slow_syscall 278 b ret_slow_syscall
254 279
280__sys_trace_return_nosave:
281 enable_irq_notrace
282 mov r0, sp
283 bl syscall_trace_exit
284 b ret_slow_syscall
285
255 .align 5 286 .align 5
256#ifdef CONFIG_ALIGNMENT_TRAP 287#ifdef CONFIG_ALIGNMENT_TRAP
257 .type __cr_alignment, #object 288 .type __cr_alignment, #object
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045abead7..0d22ad206d52 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
196 msr cpsr_c, \rtemp @ switch back to the SVC mode 196 msr cpsr_c, \rtemp @ switch back to the SVC mode
197 .endm 197 .endm
198 198
199#ifndef CONFIG_THUMB2_KERNEL 199
200 .macro svc_exit, rpsr, irq = 0 200 .macro svc_exit, rpsr, irq = 0
201 .if \irq != 0 201 .if \irq != 0
202 @ IRQs already off 202 @ IRQs already off
@@ -215,6 +215,10 @@
215 blne trace_hardirqs_off 215 blne trace_hardirqs_off
216#endif 216#endif
217 .endif 217 .endif
218 uaccess_restore
219
220#ifndef CONFIG_THUMB2_KERNEL
221 @ ARM mode SVC restore
218 msr spsr_cxsf, \rpsr 222 msr spsr_cxsf, \rpsr
219#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 223#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
220 @ We must avoid clrex due to Cortex-A15 erratum #830321 224 @ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
222 strex r1, r2, [r0] @ clear the exclusive monitor 226 strex r1, r2, [r0] @ clear the exclusive monitor
223#endif 227#endif
224 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 228 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
229#else
230 @ Thumb mode SVC restore
231 ldr lr, [sp, #S_SP] @ top of the stack
232 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
233
234 @ We must avoid clrex due to Cortex-A15 erratum #830321
235 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
236
237 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
238 ldmia sp, {r0 - r12}
239 mov sp, lr
240 ldr lr, [sp], #4
241 rfeia sp!
242#endif
225 .endm 243 .endm
226 244
227 @ 245 @
@@ -241,6 +259,9 @@
241 @ on the stack remains correct). 259 @ on the stack remains correct).
242 @ 260 @
243 .macro svc_exit_via_fiq 261 .macro svc_exit_via_fiq
262 uaccess_restore
263#ifndef CONFIG_THUMB2_KERNEL
264 @ ARM mode restore
244 mov r0, sp 265 mov r0, sp
245 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will 266 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
246 @ clobber state restored below) 267 @ clobber state restored below)
@@ -250,9 +271,27 @@
250 msr spsr_cxsf, r9 271 msr spsr_cxsf, r9
251 ldr r0, [r0, #S_R0] 272 ldr r0, [r0, #S_R0]
252 ldmia r8, {pc}^ 273 ldmia r8, {pc}^
274#else
275 @ Thumb mode restore
276 add r0, sp, #S_R2
277 ldr lr, [sp, #S_LR]
278 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
279 @ clobber state restored below)
280 ldmia r0, {r2 - r12}
281 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
282 msr cpsr_c, r1
283 sub r0, #S_R2
284 add r8, r0, #S_PC
285 ldmia r0, {r0 - r1}
286 rfeia r8
287#endif
253 .endm 288 .endm
254 289
290
255 .macro restore_user_regs, fast = 0, offset = 0 291 .macro restore_user_regs, fast = 0, offset = 0
292 uaccess_enable r1, isb=0
293#ifndef CONFIG_THUMB2_KERNEL
294 @ ARM mode restore
256 mov r2, sp 295 mov r2, sp
257 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr 296 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
258 ldr lr, [r2, #\offset + S_PC]! @ get pc 297 ldr lr, [r2, #\offset + S_PC]! @ get pc
@@ -270,72 +309,16 @@
270 @ after ldm {}^ 309 @ after ldm {}^
271 add sp, sp, #\offset + S_FRAME_SIZE 310 add sp, sp, #\offset + S_FRAME_SIZE
272 movs pc, lr @ return & move spsr_svc into cpsr 311 movs pc, lr @ return & move spsr_svc into cpsr
273 .endm 312#elif defined(CONFIG_CPU_V7M)
274 313 @ V7M restore.
275#else /* CONFIG_THUMB2_KERNEL */ 314 @ Note that we don't need to do clrex here as clearing the local
276 .macro svc_exit, rpsr, irq = 0 315 @ monitor is part of the exception entry and exit sequence.
277 .if \irq != 0
278 @ IRQs already off
279#ifdef CONFIG_TRACE_IRQFLAGS
280 @ The parent context IRQs must have been enabled to get here in
281 @ the first place, so there's no point checking the PSR I bit.
282 bl trace_hardirqs_on
283#endif
284 .else
285 @ IRQs off again before pulling preserved data off the stack
286 disable_irq_notrace
287#ifdef CONFIG_TRACE_IRQFLAGS
288 tst \rpsr, #PSR_I_BIT
289 bleq trace_hardirqs_on
290 tst \rpsr, #PSR_I_BIT
291 blne trace_hardirqs_off
292#endif
293 .endif
294 ldr lr, [sp, #S_SP] @ top of the stack
295 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
296
297 @ We must avoid clrex due to Cortex-A15 erratum #830321
298 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
299
300 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
301 ldmia sp, {r0 - r12}
302 mov sp, lr
303 ldr lr, [sp], #4
304 rfeia sp!
305 .endm
306
307 @
308 @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
309 @
310 @ For full details see non-Thumb implementation above.
311 @
312 .macro svc_exit_via_fiq
313 add r0, sp, #S_R2
314 ldr lr, [sp, #S_LR]
315 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
316 @ clobber state restored below)
317 ldmia r0, {r2 - r12}
318 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
319 msr cpsr_c, r1
320 sub r0, #S_R2
321 add r8, r0, #S_PC
322 ldmia r0, {r0 - r1}
323 rfeia r8
324 .endm
325
326#ifdef CONFIG_CPU_V7M
327 /*
328 * Note we don't need to do clrex here as clearing the local monitor is
329 * part of each exception entry and exit sequence.
330 */
331 .macro restore_user_regs, fast = 0, offset = 0
332 .if \offset 316 .if \offset
333 add sp, #\offset 317 add sp, #\offset
334 .endif 318 .endif
335 v7m_exception_slow_exit ret_r0 = \fast 319 v7m_exception_slow_exit ret_r0 = \fast
336 .endm 320#else
337#else /* ifdef CONFIG_CPU_V7M */ 321 @ Thumb mode restore
338 .macro restore_user_regs, fast = 0, offset = 0
339 mov r2, sp 322 mov r2, sp
340 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 323 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
341 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 324 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
@@ -353,9 +336,8 @@
353 .endif 336 .endif
354 add sp, sp, #S_FRAME_SIZE - S_SP 337 add sp, sp, #S_FRAME_SIZE - S_SP
355 movs pc, lr @ return & move spsr_svc into cpsr 338 movs pc, lr @ return & move spsr_svc into cpsr
356 .endm
357#endif /* ifdef CONFIG_CPU_V7M / else */
358#endif /* !CONFIG_THUMB2_KERNEL */ 339#endif /* !CONFIG_THUMB2_KERNEL */
340 .endm
359 341
360/* 342/*
361 * Context tracking subsystem. Used to instrument transitions 343 * Context tracking subsystem. Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 29e2991465cb..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -464,10 +464,7 @@ __enable_mmu:
464#ifdef CONFIG_ARM_LPAE 464#ifdef CONFIG_ARM_LPAE
465 mcrr p15, 0, r4, r5, c2 @ load TTBR0 465 mcrr p15, 0, r4, r5, c2 @ load TTBR0
466#else 466#else
467 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 467 mov r5, #DACR_INIT
468 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
469 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
470 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
471 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 468 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
472 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 469 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
473#endif 470#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index baf8edebe26f..5ff4826cb154 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -39,6 +39,7 @@
39#include <linux/export.h> 39#include <linux/export.h>
40 40
41#include <asm/hardware/cache-l2x0.h> 41#include <asm/hardware/cache-l2x0.h>
42#include <asm/outercache.h>
42#include <asm/exception.h> 43#include <asm/exception.h>
43#include <asm/mach/arch.h> 44#include <asm/mach/arch.h>
44#include <asm/mach/irq.h> 45#include <asm/mach/irq.h>
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 09f83e414a72..09413e7b49aa 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -34,9 +34,9 @@
34 34
35#include <asm/cputype.h> 35#include <asm/cputype.h>
36#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
37#include <asm/pmu.h>
38 37
39#include <linux/of.h> 38#include <linux/of.h>
39#include <linux/perf/arm_pmu.h>
40#include <linux/platform_device.h> 40#include <linux/platform_device.h>
41 41
42enum armv6_perf_types { 42enum armv6_perf_types {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f9b37f876e20..126dc679b230 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -21,11 +21,11 @@
21#include <asm/cp15.h> 21#include <asm/cp15.h>
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/irq_regs.h> 23#include <asm/irq_regs.h>
24#include <asm/pmu.h>
25#include <asm/vfp.h> 24#include <asm/vfp.h>
26#include "../vfp/vfpinstr.h" 25#include "../vfp/vfpinstr.h"
27 26
28#include <linux/of.h> 27#include <linux/of.h>
28#include <linux/perf/arm_pmu.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30 30
31/* 31/*
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 304d056d5b25..aa0499e2eef7 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -16,9 +16,9 @@
16 16
17#include <asm/cputype.h> 17#include <asm/cputype.h>
18#include <asm/irq_regs.h> 18#include <asm/irq_regs.h>
19#include <asm/pmu.h>
20 19
21#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/perf/arm_pmu.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23 23
24enum xscale_perf_types { 24enum xscale_perf_types {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a41719..a3089bacb8d8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void)
91 ledtrig_cpu(CPU_LED_IDLE_END); 91 ledtrig_cpu(CPU_LED_IDLE_END);
92} 92}
93 93
94#ifdef CONFIG_HOTPLUG_CPU
95void arch_cpu_idle_dead(void)
96{
97 cpu_die();
98}
99#endif
100
101void __show_regs(struct pt_regs *regs) 94void __show_regs(struct pt_regs *regs)
102{ 95{
103 unsigned long flags; 96 unsigned long flags;
@@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs)
129 buf[4] = '\0'; 122 buf[4] = '\0';
130 123
131#ifndef CONFIG_CPU_V7M 124#ifndef CONFIG_CPU_V7M
132 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", 125 {
133 buf, interrupts_enabled(regs) ? "n" : "ff", 126 unsigned int domain = get_domain();
134 fast_interrupts_enabled(regs) ? "n" : "ff", 127 const char *segment;
135 processor_modes[processor_mode(regs)], 128
136 isa_modes[isa_mode(regs)], 129#ifdef CONFIG_CPU_SW_DOMAIN_PAN
137 get_fs() == get_ds() ? "kernel" : "user"); 130 /*
131 * Get the domain register for the parent context. In user
132 * mode, we don't save the DACR, so lets use what it should
133 * be. For other modes, we place it after the pt_regs struct.
134 */
135 if (user_mode(regs))
136 domain = DACR_UACCESS_ENABLE;
137 else
138 domain = *(unsigned int *)(regs + 1);
139#endif
140
141 if ((domain & domain_mask(DOMAIN_USER)) ==
142 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
143 segment = "none";
144 else if (get_fs() == get_ds())
145 segment = "kernel";
146 else
147 segment = "user";
148
149 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
150 buf, interrupts_enabled(regs) ? "n" : "ff",
151 fast_interrupts_enabled(regs) ? "n" : "ff",
152 processor_modes[processor_mode(regs)],
153 isa_modes[isa_mode(regs)], segment);
154 }
138#else 155#else
139 printk("xPSR: %08lx\n", regs->ARM_cpsr); 156 printk("xPSR: %08lx\n", regs->ARM_cpsr);
140#endif 157#endif
@@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs)
146 buf[0] = '\0'; 163 buf[0] = '\0';
147#ifdef CONFIG_CPU_CP15_MMU 164#ifdef CONFIG_CPU_CP15_MMU
148 { 165 {
149 unsigned int transbase, dac; 166 unsigned int transbase, dac = get_domain();
150 asm("mrc p15, 0, %0, c2, c0\n\t" 167 asm("mrc p15, 0, %0, c2, c0\n\t"
151 "mrc p15, 0, %1, c3, c0\n" 168 : "=r" (transbase));
152 : "=r" (transbase), "=r" (dac));
153 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 169 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
154 transbase, dac); 170 transbase, dac);
155 } 171 }
@@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
210 226
211 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 227 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
212 228
229 /*
230 * Copy the initial value of the domain access control register
231 * from the current thread: thread->addr_limit will have been
232 * copied from the current thread via setup_thread_stack() in
233 * kernel/fork.c
234 */
235 thread->cpu_domain = get_domain();
236
213 if (likely(!(p->flags & PF_KTHREAD))) { 237 if (likely(!(p->flags & PF_KTHREAD))) {
214 *childregs = *current_pt_regs(); 238 *childregs = *current_pt_regs();
215 childregs->ARM_r0 = 0; 239 childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
deleted file mode 100644
index 2e6024334790..000000000000
--- a/arch/arm/kernel/psci.c
+++ /dev/null
@@ -1,299 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 *
13 * Author: Will Deacon <will.deacon@arm.com>
14 */
15
16#define pr_fmt(fmt) "psci: " fmt
17
18#include <linux/init.h>
19#include <linux/of.h>
20#include <linux/reboot.h>
21#include <linux/pm.h>
22#include <uapi/linux/psci.h>
23
24#include <asm/compiler.h>
25#include <asm/errno.h>
26#include <asm/psci.h>
27#include <asm/system_misc.h>
28
29struct psci_operations psci_ops;
30
31static int (*invoke_psci_fn)(u32, u32, u32, u32);
32typedef int (*psci_initcall_t)(const struct device_node *);
33
34asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
35asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
36
37enum psci_function {
38 PSCI_FN_CPU_SUSPEND,
39 PSCI_FN_CPU_ON,
40 PSCI_FN_CPU_OFF,
41 PSCI_FN_MIGRATE,
42 PSCI_FN_AFFINITY_INFO,
43 PSCI_FN_MIGRATE_INFO_TYPE,
44 PSCI_FN_MAX,
45};
46
47static u32 psci_function_id[PSCI_FN_MAX];
48
49static int psci_to_linux_errno(int errno)
50{
51 switch (errno) {
52 case PSCI_RET_SUCCESS:
53 return 0;
54 case PSCI_RET_NOT_SUPPORTED:
55 return -EOPNOTSUPP;
56 case PSCI_RET_INVALID_PARAMS:
57 return -EINVAL;
58 case PSCI_RET_DENIED:
59 return -EPERM;
60 };
61
62 return -EINVAL;
63}
64
65static u32 psci_power_state_pack(struct psci_power_state state)
66{
67 return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
68 & PSCI_0_2_POWER_STATE_ID_MASK) |
69 ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
70 & PSCI_0_2_POWER_STATE_TYPE_MASK) |
71 ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
72 & PSCI_0_2_POWER_STATE_AFFL_MASK);
73}
74
75static int psci_get_version(void)
76{
77 int err;
78
79 err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
80 return err;
81}
82
83static int psci_cpu_suspend(struct psci_power_state state,
84 unsigned long entry_point)
85{
86 int err;
87 u32 fn, power_state;
88
89 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
90 power_state = psci_power_state_pack(state);
91 err = invoke_psci_fn(fn, power_state, entry_point, 0);
92 return psci_to_linux_errno(err);
93}
94
95static int psci_cpu_off(struct psci_power_state state)
96{
97 int err;
98 u32 fn, power_state;
99
100 fn = psci_function_id[PSCI_FN_CPU_OFF];
101 power_state = psci_power_state_pack(state);
102 err = invoke_psci_fn(fn, power_state, 0, 0);
103 return psci_to_linux_errno(err);
104}
105
106static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
107{
108 int err;
109 u32 fn;
110
111 fn = psci_function_id[PSCI_FN_CPU_ON];
112 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
113 return psci_to_linux_errno(err);
114}
115
116static int psci_migrate(unsigned long cpuid)
117{
118 int err;
119 u32 fn;
120
121 fn = psci_function_id[PSCI_FN_MIGRATE];
122 err = invoke_psci_fn(fn, cpuid, 0, 0);
123 return psci_to_linux_errno(err);
124}
125
126static int psci_affinity_info(unsigned long target_affinity,
127 unsigned long lowest_affinity_level)
128{
129 int err;
130 u32 fn;
131
132 fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
133 err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
134 return err;
135}
136
137static int psci_migrate_info_type(void)
138{
139 int err;
140 u32 fn;
141
142 fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
143 err = invoke_psci_fn(fn, 0, 0, 0);
144 return err;
145}
146
147static int get_set_conduit_method(struct device_node *np)
148{
149 const char *method;
150
151 pr_info("probing for conduit method from DT.\n");
152
153 if (of_property_read_string(np, "method", &method)) {
154 pr_warn("missing \"method\" property\n");
155 return -ENXIO;
156 }
157
158 if (!strcmp("hvc", method)) {
159 invoke_psci_fn = __invoke_psci_fn_hvc;
160 } else if (!strcmp("smc", method)) {
161 invoke_psci_fn = __invoke_psci_fn_smc;
162 } else {
163 pr_warn("invalid \"method\" property: %s\n", method);
164 return -EINVAL;
165 }
166 return 0;
167}
168
169static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
170{
171 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
172}
173
174static void psci_sys_poweroff(void)
175{
176 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
177}
178
179/*
180 * PSCI Function IDs for v0.2+ are well defined so use
181 * standard values.
182 */
183static int psci_0_2_init(struct device_node *np)
184{
185 int err, ver;
186
187 err = get_set_conduit_method(np);
188
189 if (err)
190 goto out_put_node;
191
192 ver = psci_get_version();
193
194 if (ver == PSCI_RET_NOT_SUPPORTED) {
195 /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
196 pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
197 err = -EOPNOTSUPP;
198 goto out_put_node;
199 } else {
200 pr_info("PSCIv%d.%d detected in firmware.\n",
201 PSCI_VERSION_MAJOR(ver),
202 PSCI_VERSION_MINOR(ver));
203
204 if (PSCI_VERSION_MAJOR(ver) == 0 &&
205 PSCI_VERSION_MINOR(ver) < 2) {
206 err = -EINVAL;
207 pr_err("Conflicting PSCI version detected.\n");
208 goto out_put_node;
209 }
210 }
211
212 pr_info("Using standard PSCI v0.2 function IDs\n");
213 psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND;
214 psci_ops.cpu_suspend = psci_cpu_suspend;
215
216 psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
217 psci_ops.cpu_off = psci_cpu_off;
218
219 psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON;
220 psci_ops.cpu_on = psci_cpu_on;
221
222 psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE;
223 psci_ops.migrate = psci_migrate;
224
225 psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
226 psci_ops.affinity_info = psci_affinity_info;
227
228 psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
229 PSCI_0_2_FN_MIGRATE_INFO_TYPE;
230 psci_ops.migrate_info_type = psci_migrate_info_type;
231
232 arm_pm_restart = psci_sys_reset;
233
234 pm_power_off = psci_sys_poweroff;
235
236out_put_node:
237 of_node_put(np);
238 return err;
239}
240
241/*
242 * PSCI < v0.2 get PSCI Function IDs via DT.
243 */
244static int psci_0_1_init(struct device_node *np)
245{
246 u32 id;
247 int err;
248
249 err = get_set_conduit_method(np);
250
251 if (err)
252 goto out_put_node;
253
254 pr_info("Using PSCI v0.1 Function IDs from DT\n");
255
256 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
257 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
258 psci_ops.cpu_suspend = psci_cpu_suspend;
259 }
260
261 if (!of_property_read_u32(np, "cpu_off", &id)) {
262 psci_function_id[PSCI_FN_CPU_OFF] = id;
263 psci_ops.cpu_off = psci_cpu_off;
264 }
265
266 if (!of_property_read_u32(np, "cpu_on", &id)) {
267 psci_function_id[PSCI_FN_CPU_ON] = id;
268 psci_ops.cpu_on = psci_cpu_on;
269 }
270
271 if (!of_property_read_u32(np, "migrate", &id)) {
272 psci_function_id[PSCI_FN_MIGRATE] = id;
273 psci_ops.migrate = psci_migrate;
274 }
275
276out_put_node:
277 of_node_put(np);
278 return err;
279}
280
281static const struct of_device_id const psci_of_match[] __initconst = {
282 { .compatible = "arm,psci", .data = psci_0_1_init},
283 { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
284 {},
285};
286
287int __init psci_init(void)
288{
289 struct device_node *np;
290 const struct of_device_id *matched_np;
291 psci_initcall_t init_fn;
292
293 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
294 if (!np)
295 return -ENODEV;
296
297 init_fn = (psci_initcall_t)matched_np->data;
298 return init_fn(np);
299}
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 28a1db4da704..61c04b02faeb 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -17,6 +17,8 @@
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/psci.h>
21
20#include <uapi/linux/psci.h> 22#include <uapi/linux/psci.h>
21 23
22#include <asm/psci.h> 24#include <asm/psci.h>
@@ -51,22 +53,34 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
51{ 53{
52 if (psci_ops.cpu_on) 54 if (psci_ops.cpu_on)
53 return psci_ops.cpu_on(cpu_logical_map(cpu), 55 return psci_ops.cpu_on(cpu_logical_map(cpu),
54 __pa(secondary_startup)); 56 virt_to_idmap(&secondary_startup));
55 return -ENODEV; 57 return -ENODEV;
56} 58}
57 59
58#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
61int psci_cpu_disable(unsigned int cpu)
62{
63 /* Fail early if we don't have CPU_OFF support */
64 if (!psci_ops.cpu_off)
65 return -EOPNOTSUPP;
66
67 /* Trusted OS will deny CPU_OFF */
68 if (psci_tos_resident_on(cpu))
69 return -EPERM;
70
71 return 0;
72}
73
59void __ref psci_cpu_die(unsigned int cpu) 74void __ref psci_cpu_die(unsigned int cpu)
60{ 75{
61 const struct psci_power_state ps = { 76 u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
62 .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, 77 PSCI_0_2_POWER_STATE_TYPE_SHIFT;
63 };
64 78
65 if (psci_ops.cpu_off) 79 if (psci_ops.cpu_off)
66 psci_ops.cpu_off(ps); 80 psci_ops.cpu_off(state);
67 81
68 /* We should never return */ 82 /* We should never return */
69 panic("psci: cpu %d failed to shutdown\n", cpu); 83 panic("psci: cpu %d failed to shutdown\n", cpu);
70} 84}
71 85
72int __ref psci_cpu_kill(unsigned int cpu) 86int __ref psci_cpu_kill(unsigned int cpu)
@@ -109,6 +123,7 @@ bool __init psci_smp_available(void)
109struct smp_operations __initdata psci_smp_ops = { 123struct smp_operations __initdata psci_smp_ops = {
110 .smp_boot_secondary = psci_boot_secondary, 124 .smp_boot_secondary = psci_boot_secondary,
111#ifdef CONFIG_HOTPLUG_CPU 125#ifdef CONFIG_HOTPLUG_CPU
126 .cpu_disable = psci_cpu_disable,
112 .cpu_die = psci_cpu_die, 127 .cpu_die = psci_cpu_die,
113 .cpu_kill = psci_cpu_kill, 128 .cpu_kill = psci_cpu_kill,
114#endif 129#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 36c18b73c1f4..20edd349d379 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -31,12 +31,14 @@
31#include <linux/bug.h> 31#include <linux/bug.h>
32#include <linux/compiler.h> 32#include <linux/compiler.h>
33#include <linux/sort.h> 33#include <linux/sort.h>
34#include <linux/psci.h>
34 35
35#include <asm/unified.h> 36#include <asm/unified.h>
36#include <asm/cp15.h> 37#include <asm/cp15.h>
37#include <asm/cpu.h> 38#include <asm/cpu.h>
38#include <asm/cputype.h> 39#include <asm/cputype.h>
39#include <asm/elf.h> 40#include <asm/elf.h>
41#include <asm/fixmap.h>
40#include <asm/procinfo.h> 42#include <asm/procinfo.h>
41#include <asm/psci.h> 43#include <asm/psci.h>
42#include <asm/sections.h> 44#include <asm/sections.h>
@@ -954,6 +956,9 @@ void __init setup_arch(char **cmdline_p)
954 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 956 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
955 *cmdline_p = cmd_line; 957 *cmdline_p = cmd_line;
956 958
959 if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
960 early_fixmap_init();
961
957 parse_early_param(); 962 parse_early_param();
958 963
959#ifdef CONFIG_MMU 964#ifdef CONFIG_MMU
@@ -972,7 +977,7 @@ void __init setup_arch(char **cmdline_p)
972 unflatten_device_tree(); 977 unflatten_device_tree();
973 978
974 arm_dt_init_cpu_maps(); 979 arm_dt_init_cpu_maps();
975 psci_init(); 980 psci_dt_init();
976 xen_early_init(); 981 xen_early_init();
977#ifdef CONFIG_SMP 982#ifdef CONFIG_SMP
978 if (is_smp()) { 983 if (is_smp()) {
@@ -1015,7 +1020,7 @@ static int __init topology_init(void)
1015 1020
1016 for_each_possible_cpu(cpu) { 1021 for_each_possible_cpu(cpu) {
1017 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 1022 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1018 cpuinfo->cpu.hotpluggable = 1; 1023 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1019 register_cpu(&cpuinfo->cpu, cpu); 1024 register_cpu(&cpuinfo->cpu, cpu);
1020 } 1025 }
1021 1026
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e23791..b6cda06b455f 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall)
562asmlinkage int 562asmlinkage int
563do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) 563do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
564{ 564{
565 /*
566 * The assembly code enters us with IRQs off, but it hasn't
567 * informed the tracing code of that for efficiency reasons.
568 * Update the trace code with the current status.
569 */
570 trace_hardirqs_off();
565 do { 571 do {
566 if (likely(thread_flags & _TIF_NEED_RESCHED)) { 572 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
567 schedule(); 573 schedule();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3d6b7821cff8..ba0063c539c3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu)
175 if (smp_ops.cpu_disable) 175 if (smp_ops.cpu_disable)
176 return smp_ops.cpu_disable(cpu); 176 return smp_ops.cpu_disable(cpu);
177 177
178 return 0;
179}
180
181int platform_can_hotplug_cpu(unsigned int cpu)
182{
183 /* cpu_die must be specified to support hotplug */
184 if (!smp_ops.cpu_die)
185 return 0;
186
187 if (smp_ops.cpu_can_disable)
188 return smp_ops.cpu_can_disable(cpu);
189
178 /* 190 /*
179 * By default, allow disabling all CPUs except the first one, 191 * By default, allow disabling all CPUs except the first one,
180 * since this is special on a lot of platforms, e.g. because 192 * since this is special on a lot of platforms, e.g. because
181 * of clock tick interrupts. 193 * of clock tick interrupts.
182 */ 194 */
183 return cpu == 0 ? -EPERM : 0; 195 return cpu != 0;
184} 196}
197
185/* 198/*
186 * __cpu_disable runs on the processor to be shutdown. 199 * __cpu_disable runs on the processor to be shutdown.
187 */ 200 */
@@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu)
253 * of the other hotplug-cpu capable cores, so presumably coming 266 * of the other hotplug-cpu capable cores, so presumably coming
254 * out of idle fixes this. 267 * out of idle fixes this.
255 */ 268 */
256void __ref cpu_die(void) 269void arch_cpu_idle_dead(void)
257{ 270{
258 unsigned int cpu = smp_processor_id(); 271 unsigned int cpu = smp_processor_id();
259 272
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756782c7..5b26e7efa9ea 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
141 141
142 while (1) { 142 while (1) {
143 unsigned long temp; 143 unsigned long temp;
144 unsigned int __ua_flags;
144 145
146 __ua_flags = uaccess_save_and_enable();
145 if (type == TYPE_SWPB) 147 if (type == TYPE_SWPB)
146 __user_swpb_asm(*data, address, res, temp); 148 __user_swpb_asm(*data, address, res, temp);
147 else 149 else
148 __user_swp_asm(*data, address, res, temp); 150 __user_swp_asm(*data, address, res, temp);
151 uaccess_restore(__ua_flags);
149 152
150 if (likely(res != -EAGAIN) || signal_pending(current)) 153 if (likely(res != -EAGAIN) || signal_pending(current))
151 break; 154 break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226236f2..969f9d9e665f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
870 kuser_init(vectors_base); 870 kuser_init(vectors_base);
871 871
872 flush_icache_range(vectors, vectors + PAGE_SIZE * 2); 872 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
873 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
874#else /* ifndef CONFIG_CPU_V7M */ 873#else /* ifndef CONFIG_CPU_V7M */
875 /* 874 /*
876 * on V7-M there is no need to copy the vector table to a dedicated 875 * on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7db2d5..970d6c043774 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
12 12
13 .text 13 .text
14 14
15/* Prototype: int __clear_user(void *addr, size_t sz) 15/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
16 * Purpose : clear some user memory 16 * Purpose : clear some user memory
17 * Params : addr - user memory address to clear 17 * Params : addr - user memory address to clear
18 * : sz - number of bytes to clear 18 * : sz - number of bytes to clear
19 * Returns : number of bytes NOT cleared 19 * Returns : number of bytes NOT cleared
20 */ 20 */
21ENTRY(__clear_user_std) 21ENTRY(__clear_user_std)
22WEAK(__clear_user) 22WEAK(arm_clear_user)
23 stmfd sp!, {r1, lr} 23 stmfd sp!, {r1, lr}
24 mov r2, #0 24 mov r2, #0
25 cmp r1, #4 25 cmp r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
44USER( strnebt r2, [r0]) 44USER( strnebt r2, [r0])
45 mov r0, #0 45 mov r0, #0
46 ldmfd sp!, {r1, pc} 46 ldmfd sp!, {r1, pc}
47ENDPROC(__clear_user) 47ENDPROC(arm_clear_user)
48ENDPROC(__clear_user_std) 48ENDPROC(__clear_user_std)
49 49
50 .pushsection .text.fixup,"ax" 50 .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9952be..1512bebfbf1b 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
17/* 17/*
18 * Prototype: 18 * Prototype:
19 * 19 *
20 * size_t __copy_from_user(void *to, const void *from, size_t n) 20 * size_t arm_copy_from_user(void *to, const void *from, size_t n)
21 * 21 *
22 * Purpose: 22 * Purpose:
23 * 23 *
@@ -89,11 +89,11 @@
89 89
90 .text 90 .text
91 91
92ENTRY(__copy_from_user) 92ENTRY(arm_copy_from_user)
93 93
94#include "copy_template.S" 94#include "copy_template.S"
95 95
96ENDPROC(__copy_from_user) 96ENDPROC(arm_copy_from_user)
97 97
98 .pushsection .fixup,"ax" 98 .pushsection .fixup,"ax"
99 .align 0 99 .align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b0675a3e..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
17/* 17/*
18 * Prototype: 18 * Prototype:
19 * 19 *
20 * size_t __copy_to_user(void *to, const void *from, size_t n) 20 * size_t arm_copy_to_user(void *to, const void *from, size_t n)
21 * 21 *
22 * Purpose: 22 * Purpose:
23 * 23 *
@@ -93,11 +93,11 @@
93 .text 93 .text
94 94
95ENTRY(__copy_to_user_std) 95ENTRY(__copy_to_user_std)
96WEAK(__copy_to_user) 96WEAK(arm_copy_to_user)
97 97
98#include "copy_template.S" 98#include "copy_template.S"
99 99
100ENDPROC(__copy_to_user) 100ENDPROC(arm_copy_to_user)
101ENDPROC(__copy_to_user_std) 101ENDPROC(__copy_to_user_std)
102 102
103 .pushsection .text.fixup,"ax" 103 .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e61f89..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
17 17
18 .text 18 .text
19 19
20#ifdef CONFIG_CPU_SW_DOMAIN_PAN
21 .macro save_regs
22 mrc p15, 0, ip, c3, c0, 0
23 stmfd sp!, {r1, r2, r4 - r8, ip, lr}
24 uaccess_enable ip
25 .endm
26
27 .macro load_regs
28 ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
29 mcr p15, 0, ip, c3, c0, 0
30 ret lr
31 .endm
32#else
20 .macro save_regs 33 .macro save_regs
21 stmfd sp!, {r1, r2, r4 - r8, lr} 34 stmfd sp!, {r1, r2, r4 - r8, lr}
22 .endm 35 .endm
@@ -24,6 +37,7 @@
24 .macro load_regs 37 .macro load_regs
25 ldmfd sp!, {r1, r2, r4 - r8, pc} 38 ldmfd sp!, {r1, r2, r4 - r8, pc}
26 .endm 39 .endm
40#endif
27 41
28 .macro load1b, reg1 42 .macro load1b, reg1
29 ldrusr \reg1, r0, 1 43 ldrusr \reg1, r0, 1
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 4b39af2dfda9..d72b90905132 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -136,7 +136,7 @@ out:
136} 136}
137 137
138unsigned long 138unsigned long
139__copy_to_user(void __user *to, const void *from, unsigned long n) 139arm_copy_to_user(void __user *to, const void *from, unsigned long n)
140{ 140{
141 /* 141 /*
142 * This test is stubbed out of the main function above to keep 142 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
190 return n; 190 return n;
191} 191}
192 192
193unsigned long __clear_user(void __user *addr, unsigned long n) 193unsigned long arm_clear_user(void __user *addr, unsigned long n)
194{ 194{
195 /* See rational for this in __copy_to_user() above. */ 195 /* See rational for this in __copy_to_user() above. */
196 if (n < 64) 196 if (n < 64)
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 231fba0d03e5..6050a14faee6 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -28,8 +28,8 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/amba/bus.h> 29#include <linux/amba/bus.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/psci.h>
31 32
32#include <asm/psci.h>
33#include <asm/hardware/cache-l2x0.h> 33#include <asm/hardware/cache-l2x0.h>
34#include <asm/mach/arch.h> 34#include <asm/mach/arch.h>
35#include <asm/mach/map.h> 35#include <asm/mach/map.h>
diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c
index 7f2bd85eb935..400311695548 100644
--- a/arch/arm/mach-highbank/pm.c
+++ b/arch/arm/mach-highbank/pm.c
@@ -16,19 +16,21 @@
16 16
17#include <linux/cpu_pm.h> 17#include <linux/cpu_pm.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/psci.h>
19#include <linux/suspend.h> 20#include <linux/suspend.h>
20 21
21#include <asm/suspend.h> 22#include <asm/suspend.h>
22#include <asm/psci.h> 23
24#include <uapi/linux/psci.h>
25
26#define HIGHBANK_SUSPEND_PARAM \
27 ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
28 (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
29 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
23 30
24static int highbank_suspend_finish(unsigned long val) 31static int highbank_suspend_finish(unsigned long val)
25{ 32{
26 const struct psci_power_state ps = { 33 return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume));
27 .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
28 .affinity_level = 1,
29 };
30
31 return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
32} 34}
33 35
34static int highbank_pm_enter(suspend_state_t state) 36static int highbank_pm_enter(suspend_state_t state)
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 04c9daf9f8d7..7db5870d127f 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -18,6 +18,7 @@
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <asm/mach-types.h> 20#include <asm/mach-types.h>
21#include <asm/outercache.h>
21#include <mach/hardware.h> 22#include <mach/hardware.h>
22#include <mach/cputype.h> 23#include <mach/cputype.h>
23#include <mach/addr-map.h> 24#include <mach/addr-map.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 9e2a68456b81..07d2e100caab 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -29,6 +29,7 @@ config ARCH_OMAP4
29 select HAVE_ARM_SCU if SMP 29 select HAVE_ARM_SCU if SMP
30 select HAVE_ARM_TWD if SMP 30 select HAVE_ARM_TWD if SMP
31 select OMAP_INTERCONNECT 31 select OMAP_INTERCONNECT
32 select OMAP_INTERCONNECT_BARRIER
32 select PL310_ERRATA_588369 if CACHE_L2X0 33 select PL310_ERRATA_588369 if CACHE_L2X0
33 select PL310_ERRATA_727915 if CACHE_L2X0 34 select PL310_ERRATA_727915 if CACHE_L2X0
34 select PM_OPP if PM 35 select PM_OPP if PM
@@ -46,6 +47,7 @@ config SOC_OMAP5
46 select HAVE_ARM_TWD if SMP 47 select HAVE_ARM_TWD if SMP
47 select HAVE_ARM_ARCH_TIMER 48 select HAVE_ARM_ARCH_TIMER
48 select ARM_ERRATA_798181 if SMP 49 select ARM_ERRATA_798181 if SMP
50 select OMAP_INTERCONNECT_BARRIER
49 51
50config SOC_AM33XX 52config SOC_AM33XX
51 bool "TI AM33XX" 53 bool "TI AM33XX"
@@ -71,6 +73,7 @@ config SOC_DRA7XX
71 select HAVE_ARM_ARCH_TIMER 73 select HAVE_ARM_ARCH_TIMER
72 select IRQ_CROSSBAR 74 select IRQ_CROSSBAR
73 select ARM_ERRATA_798181 if SMP 75 select ARM_ERRATA_798181 if SMP
76 select OMAP_INTERCONNECT_BARRIER
74 77
75config ARCH_OMAP2PLUS 78config ARCH_OMAP2PLUS
76 bool 79 bool
@@ -92,6 +95,10 @@ config ARCH_OMAP2PLUS
92 help 95 help
93 Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 96 Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
94 97
98config OMAP_INTERCONNECT_BARRIER
99 bool
100 select ARM_HEAVY_MB
101
95 102
96if ARCH_OMAP2PLUS 103if ARCH_OMAP2PLUS
97 104
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index eae6a0e87c90..484cdadfb187 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
30void __init omap_reserve(void) 30void __init omap_reserve(void)
31{ 31{
32 omap_secure_ram_reserve_memblock(); 32 omap_secure_ram_reserve_memblock();
33 omap_barrier_reserve_memblock();
33} 34}
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 749d50bb4ca5..92e92cfc2775 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
189} 189}
190#endif 190#endif
191 191
192#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
193void omap_barrier_reserve_memblock(void);
194void omap_barriers_init(void);
195#else
196static inline void omap_barrier_reserve_memblock(void)
197{
198}
199#endif
200
192/* This gets called from mach-omap2/io.c, do not call this */ 201/* This gets called from mach-omap2/io.c, do not call this */
193void __init omap2_set_globals_tap(u32 class, void __iomem *tap); 202void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
194 203
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
deleted file mode 100644
index 1c582a8592b9..000000000000
--- a/arch/arm/mach-omap2/include/mach/barriers.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * OMAP memory barrier header.
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Richard Woodruff <r-woodruff2@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef __MACH_BARRIERS_H
23#define __MACH_BARRIERS_H
24
25#include <asm/outercache.h>
26
27extern void omap_bus_sync(void);
28
29#define rmb() dsb()
30#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
31#define mb() wmb()
32
33#endif /* __MACH_BARRIERS_H */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 6a4822dbb4ea..980c9372e6fd 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -352,6 +352,7 @@ void __init am33xx_map_io(void)
352void __init omap4_map_io(void) 352void __init omap4_map_io(void)
353{ 353{
354 iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); 354 iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
355 omap_barriers_init();
355} 356}
356#endif 357#endif
357 358
@@ -359,6 +360,7 @@ void __init omap4_map_io(void)
359void __init omap5_map_io(void) 360void __init omap5_map_io(void)
360{ 361{
361 iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc)); 362 iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
363 omap_barriers_init();
362} 364}
363#endif 365#endif
364 366
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 16350eefa66c..949696b6f17b 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -51,6 +51,127 @@ static void __iomem *twd_base;
51 51
52#define IRQ_LOCALTIMER 29 52#define IRQ_LOCALTIMER 29
53 53
54#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
55
56/* Used to implement memory barrier on DRAM path */
57#define OMAP4_DRAM_BARRIER_VA 0xfe600000
58
59static void __iomem *dram_sync, *sram_sync;
60static phys_addr_t dram_sync_paddr;
61static u32 dram_sync_size;
62
63/*
64 * The OMAP4 bus structure contains asynchrnous bridges which can buffer
65 * data writes from the MPU. These asynchronous bridges can be found on
66 * paths between the MPU to EMIF, and the MPU to L3 interconnects.
67 *
68 * We need to be careful about re-ordering which can happen as a result
69 * of different accesses being performed via different paths, and
70 * therefore different asynchronous bridges.
71 */
72
73/*
74 * OMAP4 interconnect barrier which is called for each mb() and wmb().
75 * This is to ensure that normal paths to DRAM (normal memory, cacheable
76 * accesses) are properly synchronised with writes to DMA coherent memory
77 * (normal memory, uncacheable) and device writes.
78 *
79 * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
80 * path, as we need to ensure that data is visible to other system
81 * masters prior to writes to those system masters being seen.
82 *
83 * Note: the SRAM path is not synchronised via mb() and wmb().
84 */
85static void omap4_mb(void)
86{
87 if (dram_sync)
88 writel_relaxed(0, dram_sync);
89}
90
91/*
92 * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
93 *
94 * If a data is stalled inside asynchronous bridge because of back
95 * pressure, it may be accepted multiple times, creating pointer
96 * misalignment that will corrupt next transfers on that data path until
97 * next reset of the system. No recovery procedure once the issue is hit,
98 * the path remains consistently broken.
99 *
100 * Async bridges can be found on paths between MPU to EMIF and MPU to L3
101 * interconnects.
102 *
103 * This situation can happen only when the idle is initiated by a Master
104 * Request Disconnection (which is trigged by software when executing WFI
105 * on the CPU).
106 *
107 * The work-around for this errata needs all the initiators connected
108 * through an async bridge to ensure that data path is properly drained
109 * before issuing WFI. This condition will be met if one Strongly ordered
110 * access is performed to the target right before executing the WFI.
111 *
112 * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
113 * IO barrier ensure that there is no synchronisation loss on initiators
114 * operating on both interconnect port simultaneously.
115 *
116 * This is a stronger version of the OMAP4 memory barrier below, and
117 * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
118 * as well, and is necessary prior to executing a WFI.
119 */
120void omap_interconnect_sync(void)
121{
122 if (dram_sync && sram_sync) {
123 writel_relaxed(readl_relaxed(dram_sync), dram_sync);
124 writel_relaxed(readl_relaxed(sram_sync), sram_sync);
125 isb();
126 }
127}
128
129static int __init omap4_sram_init(void)
130{
131 struct device_node *np;
132 struct gen_pool *sram_pool;
133
134 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
135 if (!np)
136 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
137 __func__);
138 sram_pool = of_gen_pool_get(np, "sram", 0);
139 if (!sram_pool)
140 pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
141 __func__);
142 else
143 sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
144
145 return 0;
146}
147omap_arch_initcall(omap4_sram_init);
148
149/* Steal one page physical memory for barrier implementation */
150void __init omap_barrier_reserve_memblock(void)
151{
152 dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
153 dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
154}
155
156void __init omap_barriers_init(void)
157{
158 struct map_desc dram_io_desc[1];
159
160 dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
161 dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
162 dram_io_desc[0].length = dram_sync_size;
163 dram_io_desc[0].type = MT_MEMORY_RW_SO;
164 iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
165 dram_sync = (void __iomem *) dram_io_desc[0].virtual;
166
167 pr_info("OMAP4: Map %pa to %p for dram barrier\n",
168 &dram_sync_paddr, dram_sync);
169
170 soc_mb = omap4_mb;
171}
172
173#endif
174
54void gic_dist_disable(void) 175void gic_dist_disable(void)
55{ 176{
56 if (gic_dist_base_addr) 177 if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index ad1bb9431e94..9b09d85d811a 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume)
333 333
334#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */ 334#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
335 335
336ENTRY(omap_bus_sync)
337 ret lr
338ENDPROC(omap_bus_sync)
339
340ENTRY(omap_do_wfi) 336ENTRY(omap_do_wfi)
341 stmfd sp!, {lr} 337 stmfd sp!, {lr}
338#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
342 /* Drain interconnect write buffers. */ 339 /* Drain interconnect write buffers. */
343 bl omap_bus_sync 340 bl omap_interconnect_sync
341#endif
344 342
345 /* 343 /*
346 * Execute an ISB instruction to ensure that all of the 344 * Execute an ISB instruction to ensure that all of the
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index d99d08eeb966..83e94c95e314 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -16,6 +16,7 @@
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/rtc/sirfsoc_rtciobrg.h> 18#include <linux/rtc/sirfsoc_rtciobrg.h>
19#include <asm/outercache.h>
19#include <asm/suspend.h> 20#include <asm/suspend.h>
20#include <asm/hardware/cache-l2x0.h> 21#include <asm/hardware/cache-l2x0.h>
21 22
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 476092b86c6e..8d27ec546a35 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void);
13extern void shmobile_smp_sleep(void); 13extern void shmobile_smp_sleep(void);
14extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn, 14extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
15 unsigned long arg); 15 unsigned long arg);
16extern int shmobile_smp_cpu_disable(unsigned int cpu); 16extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
17extern void shmobile_boot_scu(void); 17extern void shmobile_boot_scu(void);
18extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus); 18extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
19extern void shmobile_smp_scu_cpu_die(unsigned int cpu); 19extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 3923e09e966d..b23378f3d7e1 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
31} 31}
32 32
33#ifdef CONFIG_HOTPLUG_CPU 33#ifdef CONFIG_HOTPLUG_CPU
34int shmobile_smp_cpu_disable(unsigned int cpu) 34bool shmobile_smp_cpu_can_disable(unsigned int cpu)
35{ 35{
36 return 0; /* Hotplug of any CPU is supported */ 36 return true; /* Hotplug of any CPU is supported */
37} 37}
38#endif 38#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index 2ef0054ce934..4b33d432a364 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = {
64 .smp_prepare_cpus = r8a7790_smp_prepare_cpus, 64 .smp_prepare_cpus = r8a7790_smp_prepare_cpus,
65 .smp_boot_secondary = shmobile_smp_apmu_boot_secondary, 65 .smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
66#ifdef CONFIG_HOTPLUG_CPU 66#ifdef CONFIG_HOTPLUG_CPU
67 .cpu_disable = shmobile_smp_cpu_disable, 67 .cpu_can_disable = shmobile_smp_cpu_can_disable,
68 .cpu_die = shmobile_smp_apmu_cpu_die, 68 .cpu_die = shmobile_smp_apmu_cpu_die,
69 .cpu_kill = shmobile_smp_apmu_cpu_kill, 69 .cpu_kill = shmobile_smp_apmu_cpu_kill,
70#endif 70#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
index 5e2d1db79afa..b2508c0d276b 100644
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = {
58 .smp_prepare_cpus = r8a7791_smp_prepare_cpus, 58 .smp_prepare_cpus = r8a7791_smp_prepare_cpus,
59 .smp_boot_secondary = r8a7791_smp_boot_secondary, 59 .smp_boot_secondary = r8a7791_smp_boot_secondary,
60#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
61 .cpu_disable = shmobile_smp_cpu_disable, 61 .cpu_can_disable = shmobile_smp_cpu_can_disable,
62 .cpu_die = shmobile_smp_apmu_cpu_die, 62 .cpu_die = shmobile_smp_apmu_cpu_die,
63 .cpu_kill = shmobile_smp_apmu_cpu_kill, 63 .cpu_kill = shmobile_smp_apmu_cpu_kill,
64#endif 64#endif
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index d03aa11fb46d..bc2824a036e1 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -60,7 +60,7 @@ struct smp_operations sh73a0_smp_ops __initdata = {
60 .smp_prepare_cpus = sh73a0_smp_prepare_cpus, 60 .smp_prepare_cpus = sh73a0_smp_prepare_cpus,
61 .smp_boot_secondary = sh73a0_boot_secondary, 61 .smp_boot_secondary = sh73a0_boot_secondary,
62#ifdef CONFIG_HOTPLUG_CPU 62#ifdef CONFIG_HOTPLUG_CPU
63 .cpu_disable = shmobile_smp_cpu_disable, 63 .cpu_can_disable = shmobile_smp_cpu_can_disable,
64 .cpu_die = shmobile_smp_scu_cpu_die, 64 .cpu_die = shmobile_smp_scu_cpu_die,
65 .cpu_kill = shmobile_smp_scu_cpu_kill, 65 .cpu_kill = shmobile_smp_scu_cpu_kill,
66#endif 66#endif
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 7557bede7ae6..780bd13cd7e3 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -8,6 +8,7 @@
8#include <linux/of.h> 8#include <linux/of.h>
9#include <linux/of_address.h> 9#include <linux/of_address.h>
10 10
11#include <asm/outercache.h>
11#include <asm/hardware/cache-l2x0.h> 12#include <asm/hardware/cache-l2x0.h>
12 13
13#include "db8500-regs.h" 14#include "db8500-regs.h"
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index ba708ce08616..f80560318c58 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -20,10 +20,10 @@
20#include <linux/mfd/dbx500-prcmu.h> 20#include <linux/mfd/dbx500-prcmu.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/perf/arm_pmu.h>
23#include <linux/regulator/machine.h> 24#include <linux/regulator/machine.h>
24#include <linux/random.h> 25#include <linux/random.h>
25 26
26#include <asm/pmu.h>
27#include <asm/mach/map.h> 27#include <asm/mach/map.h>
28 28
29#include "setup.h" 29#include "setup.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c6b976ab8d3..df7537f12469 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -883,6 +883,7 @@ config OUTER_CACHE
883 883
884config OUTER_CACHE_SYNC 884config OUTER_CACHE_SYNC
885 bool 885 bool
886 select ARM_HEAVY_MB
886 help 887 help
887 The outer cache has a outer_cache_fns.sync function pointer 888 The outer cache has a outer_cache_fns.sync function pointer
888 that can be used to drain the write buffer of the outer cache. 889 that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS
1031 This option allows the use of custom mandatory barriers 1032 This option allows the use of custom mandatory barriers
1032 included via the mach/barriers.h file. 1033 included via the mach/barriers.h file.
1033 1034
1035config ARM_HEAVY_MB
1036 bool
1037
1034config ARCH_SUPPORTS_BIG_ENDIAN 1038config ARCH_SUPPORTS_BIG_ENDIAN
1035 bool 1039 bool
1036 help 1040 help
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4aba9..b3b31e30cadd 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
19 mrc p15, 0, r1, c5, c0, 0 @ get FSR 19 mrc p15, 0, r1, c5, c0, 0 @ get FSR
20 mrc p15, 0, r0, c6, c0, 0 @ get FAR 20 mrc p15, 0, r0, c6, c0, 0 @ get FAR
21 ldr r3, [r4] @ read aborted ARM instruction 21 ldr r3, [r4] @ read aborted ARM instruction
22 uaccess_disable ip @ disable userspace access
22 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR 23 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
23 tst r3, #1 << 20 @ L = 1 -> write? 24 tst r3, #1 << 20 @ L = 1 -> write?
24 orreq r1, r1, #1 << 11 @ yes. 25 orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4653a3..a6a381a6caa5 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
21 mrc p15, 0, r0, c6, c0, 0 @ get FAR 21 mrc p15, 0, r0, c6, c0, 0 @ get FAR
22 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 22 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
23 ldreq r3, [r4] @ read aborted ARM instruction 23 ldreq r3, [r4] @ read aborted ARM instruction
24 uaccess_disable ip @ disable user access
24 bic r1, r1, #1 << 11 @ clear bits 11 of FSR 25 bic r1, r1, #1 << 11 @ clear bits 11 of FSR
25 do_ldrd_abort tmp=ip, insn=r3 26 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
27 beq do_DataAbort @ yes
26 tst r3, #1 << 20 @ check write 28 tst r3, #1 << 20 @ check write
27 orreq r1, r1, #1 << 11 29 orreq r1, r1, #1 << 11
28 b do_DataAbort 30 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a61264..00ab011bef58 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
24 bne do_DataAbort 24 bne do_DataAbort
25 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 25 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
26 ldreq r3, [r4] @ read aborted ARM instruction 26 ldreq r3, [r4] @ read aborted ARM instruction
27 do_ldrd_abort tmp=ip, insn=r3 27 uaccess_disable ip @ disable userspace access
28 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
29 beq do_DataAbort @ yes
28 tst r3, #1 << 20 @ L = 0 -> write 30 tst r3, #1 << 20 @ L = 0 -> write
29 orreq r1, r1, #1 << 11 @ yes. 31 orreq r1, r1, #1 << 11 @ yes.
30 b do_DataAbort 32 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c22a33..8801a15aa105 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
26 ldr ip, =0x4107b36 26 ldr ip, =0x4107b36
27 mrc p15, 0, r3, c0, c0, 0 @ get processor id 27 mrc p15, 0, r3, c0, c0, 0 @ get processor id
28 teq ip, r3, lsr #4 @ r0 ARM1136? 28 teq ip, r3, lsr #4 @ r0 ARM1136?
29 bne do_DataAbort 29 bne 1f
30 tst r5, #PSR_J_BIT @ Java? 30 tst r5, #PSR_J_BIT @ Java?
31 tsteq r5, #PSR_T_BIT @ Thumb? 31 tsteq r5, #PSR_T_BIT @ Thumb?
32 bne do_DataAbort 32 bne 1f
33 bic r1, r1, #1 << 11 @ clear bit 11 of FSR 33 bic r1, r1, #1 << 11 @ clear bit 11 of FSR
34 ldr r3, [r4] @ read aborted ARM instruction 34 ldr r3, [r4] @ read aborted ARM instruction
35 ARM_BE8(rev r3, r3) 35 ARM_BE8(rev r3, r3)
36 36
37 do_ldrd_abort tmp=ip, insn=r3 37 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
38 beq 1f @ yes
38 tst r3, #1 << 20 @ L = 0 -> write 39 tst r3, #1 << 20 @ L = 0 -> write
39 orreq r1, r1, #1 << 11 @ yes. 40 orreq r1, r1, #1 << 11 @ yes.
40#endif 41#endif
421: uaccess_disable ip @ disable userspace access
41 b do_DataAbort 43 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad054214..e8d0e08c227f 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
15ENTRY(v7_early_abort) 15ENTRY(v7_early_abort)
16 mrc p15, 0, r1, c5, c0, 0 @ get FSR 16 mrc p15, 0, r1, c5, c0, 0 @ get FSR
17 mrc p15, 0, r0, c6, c0, 0 @ get FAR 17 mrc p15, 0, r0, c6, c0, 0 @ get FAR
18 uaccess_disable ip @ disable userspace access
18 19
19 /* 20 /*
20 * V6 code adjusts the returned DFSR. 21 * V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f3982580c273..6d8e8e3365d1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
26#endif 26#endif
27 bne .data_thumb_abort 27 bne .data_thumb_abort
28 ldr r8, [r4] @ read arm instruction 28 ldr r8, [r4] @ read arm instruction
29 uaccess_disable ip @ disable userspace access
29 tst r8, #1 << 20 @ L = 1 -> write? 30 tst r8, #1 << 20 @ L = 1 -> write?
30 orreq r1, r1, #1 << 11 @ yes. 31 orreq r1, r1, #1 << 11 @ yes.
31 and r7, r8, #15 << 24 32 and r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
155 156
156.data_thumb_abort: 157.data_thumb_abort:
157 ldrh r8, [r4] @ read instruction 158 ldrh r8, [r4] @ read instruction
159 uaccess_disable ip @ disable userspace access
158 tst r8, #1 << 11 @ L = 1 -> write? 160 tst r8, #1 << 11 @ L = 1 -> write?
159 orreq r1, r1, #1 << 8 @ yes 161 orreq r1, r1, #1 << 8 @ yes
160 and r7, r8, #15 << 12 162 and r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68ef0e83..4509bee4e081 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
13 tst \psr, #PSR_T_BIT 13 tst \psr, #PSR_T_BIT
14 beq not_thumb 14 beq not_thumb
15 ldrh \tmp, [\pc] @ Read aborted Thumb instruction 15 ldrh \tmp, [\pc] @ Read aborted Thumb instruction
16 uaccess_disable ip @ disable userspace access
16 and \tmp, \tmp, # 0xfe00 @ Mask opcode field 17 and \tmp, \tmp, # 0xfe00 @ Mask opcode field
17 cmp \tmp, # 0x5600 @ Is it ldrsb? 18 cmp \tmp, # 0x5600 @ Is it ldrsb?
18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes 19 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
29 * [7:4] == 1101 30 * [7:4] == 1101
30 * [20] == 0 31 * [20] == 0
31 */ 32 */
32 .macro do_ldrd_abort, tmp, insn 33 .macro teq_ldrd, tmp, insn
33 tst \insn, #0x0e100000 @ [27:25,20] == 0 34 mov \tmp, #0x0e100000
34 bne not_ldrd 35 orr \tmp, #0x000000f0
35 and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 36 and \tmp, \insn, \tmp
36 cmp \tmp, #0x000000d0 37 teq \tmp, #0x000000d0
37 beq do_DataAbort
38not_ldrd:
39 .endm 38 .endm
40
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 097181e08c25..5c1b7a7b9af6 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -368,7 +368,6 @@ int __init feroceon_of_init(void)
368 struct device_node *node; 368 struct device_node *node;
369 void __iomem *base; 369 void __iomem *base;
370 bool l2_wt_override = false; 370 bool l2_wt_override = false;
371 struct resource res;
372 371
373#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 372#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
374 l2_wt_override = true; 373 l2_wt_override = true;
@@ -376,10 +375,7 @@ int __init feroceon_of_init(void)
376 375
377 node = of_find_matching_node(NULL, feroceon_ids); 376 node = of_find_matching_node(NULL, feroceon_ids);
378 if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) { 377 if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
379 if (of_address_to_resource(node, 0, &res)) 378 base = of_iomap(node, 0);
380 return -ENODEV;
381
382 base = ioremap(res.start, resource_size(&res));
383 if (!base) 379 if (!base)
384 return -ENOMEM; 380 return -ENOMEM;
385 381
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 71b3d3309024..493692d838c6 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np,
1171 } 1171 }
1172 } 1172 }
1173 1173
1174 if (of_property_read_bool(np, "arm,shared-override")) {
1175 *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1176 *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1177 }
1178
1174 prefetch = l2x0_saved_regs.prefetch_ctrl; 1179 prefetch = l2x0_saved_regs.prefetch_ctrl;
1175 1180
1176 ret = of_property_read_u32(np, "arm,double-linefill", &val); 1181 ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 3d3d6aa60c87..bf35abcc7d59 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -39,6 +39,7 @@
39#include <asm/system_info.h> 39#include <asm/system_info.h>
40#include <asm/dma-contiguous.h> 40#include <asm/dma-contiguous.h>
41 41
42#include "dma.h"
42#include "mm.h" 43#include "mm.h"
43 44
44/* 45/*
@@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
648 size = PAGE_ALIGN(size); 649 size = PAGE_ALIGN(size);
649 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 650 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
650 651
651 if (is_coherent || nommu()) 652 if (nommu())
653 addr = __alloc_simple_buffer(dev, size, gfp, &page);
654 else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
655 addr = __alloc_from_contiguous(dev, size, prot, &page,
656 caller, want_vaddr);
657 else if (is_coherent)
652 addr = __alloc_simple_buffer(dev, size, gfp, &page); 658 addr = __alloc_simple_buffer(dev, size, gfp, &page);
653 else if (!(gfp & __GFP_WAIT)) 659 else if (!(gfp & __GFP_WAIT))
654 addr = __alloc_from_pool(size, &page); 660 addr = __alloc_from_pool(size, &page);
655 else if (!dev_get_cma_area(dev))
656 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
657 else 661 else
658 addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); 662 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
663 caller, want_vaddr);
659 664
660 if (page) 665 if (page)
661 *handle = pfn_to_dma(dev, page_to_pfn(page)); 666 *handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
683static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 688static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
684 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 689 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
685{ 690{
686 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
687 void *memory; 691 void *memory;
688 692
689 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 693 if (dma_alloc_from_coherent(dev, size, handle, &memory))
690 return memory; 694 return memory;
691 695
692 return __dma_alloc(dev, size, handle, gfp, prot, true, 696 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
693 attrs, __builtin_return_address(0)); 697 attrs, __builtin_return_address(0));
694} 698}
695 699
@@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
753 757
754 size = PAGE_ALIGN(size); 758 size = PAGE_ALIGN(size);
755 759
756 if (is_coherent || nommu()) { 760 if (nommu()) {
757 __dma_free_buffer(page, size); 761 __dma_free_buffer(page, size);
758 } else if (__free_from_pool(cpu_addr, size)) { 762 } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
759 return; 763 return;
760 } else if (!dev_get_cma_area(dev)) { 764 } else if (!dev_get_cma_area(dev)) {
761 if (want_vaddr) 765 if (want_vaddr && !is_coherent)
762 __dma_free_remap(cpu_addr, size); 766 __dma_free_remap(cpu_addr, size);
763 __dma_free_buffer(page, size); 767 __dma_free_buffer(page, size);
764 } else { 768 } else {
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644
index 000000000000..70ea6852f94e
--- /dev/null
+++ b/arch/arm/mm/dma.h
@@ -0,0 +1,32 @@
1#ifndef DMA_H
2#define DMA_H
3
4#include <asm/glue-cache.h>
5
6#ifndef MULTI_CACHE
7#define dmac_map_area __glue(_CACHE,_dma_map_area)
8#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
9
10/*
11 * These are private to the dma-mapping API. Do not use directly.
12 * Their sole purpose is to ensure that data held in the cache
13 * is visible to DMA, or data written by DMA to system memory is
14 * visible to the CPU.
15 */
16extern void dmac_map_area(const void *, size_t, int);
17extern void dmac_unmap_area(const void *, size_t, int);
18
19#else
20
21/*
22 * These are private to the dma-mapping API. Do not use directly.
23 * Their sole purpose is to ensure that data held in the cache
24 * is visible to DMA, or data written by DMA to system memory is
25 * visible to the CPU.
26 */
27#define dmac_map_area cpu_cache.dma_map_area
28#define dmac_unmap_area cpu_cache.dma_unmap_area
29
30#endif
31
32#endif
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 34b66af516ea..1ec8e7590fc6 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -21,6 +21,21 @@
21 21
22#include "mm.h" 22#include "mm.h"
23 23
24#ifdef CONFIG_ARM_HEAVY_MB
25void (*soc_mb)(void);
26
27void arm_heavy_mb(void)
28{
29#ifdef CONFIG_OUTER_CACHE_SYNC
30 if (outer_cache.sync)
31 outer_cache.sync();
32#endif
33 if (soc_mb)
34 soc_mb();
35}
36EXPORT_SYMBOL(arm_heavy_mb);
37#endif
38
24#ifdef CONFIG_CPU_CACHE_VIPT 39#ifdef CONFIG_CPU_CACHE_VIPT
25 40
26static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 41static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa793989..9df5f09585ca 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
79 79
80 type = kmap_atomic_idx_push(); 80 type = kmap_atomic_idx_push();
81 81
82 idx = type + KM_TYPE_NR * smp_processor_id(); 82 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
83 vaddr = __fix_to_virt(idx); 83 vaddr = __fix_to_virt(idx);
84#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
85 /* 85 /*
@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
106 106
107 if (kvaddr >= (void *)FIXADDR_START) { 107 if (kvaddr >= (void *)FIXADDR_START) {
108 type = kmap_atomic_idx(); 108 type = kmap_atomic_idx();
109 idx = type + KM_TYPE_NR * smp_processor_id(); 109 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
110 110
111 if (cache_is_vivt()) 111 if (cache_is_vivt())
112 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 112 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
138 return page_address(page); 138 return page_address(page);
139 139
140 type = kmap_atomic_idx_push(); 140 type = kmap_atomic_idx_push();
141 idx = type + KM_TYPE_NR * smp_processor_id(); 141 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
142 vaddr = __fix_to_virt(idx); 142 vaddr = __fix_to_virt(idx);
143#ifdef CONFIG_DEBUG_HIGHMEM 143#ifdef CONFIG_DEBUG_HIGHMEM
144 BUG_ON(!pte_none(get_fixmap_pte(vaddr))); 144 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 870838a46d52..7cd15143a507 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
292 L_PTE_RDONLY, 292 L_PTE_RDONLY,
293 .prot_l1 = PMD_TYPE_TABLE, 293 .prot_l1 = PMD_TYPE_TABLE,
294 .domain = DOMAIN_USER, 294 .domain = DOMAIN_VECTORS,
295 }, 295 },
296 [MT_HIGH_VECTORS] = { 296 [MT_HIGH_VECTORS] = {
297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
298 L_PTE_USER | L_PTE_RDONLY, 298 L_PTE_USER | L_PTE_RDONLY,
299 .prot_l1 = PMD_TYPE_TABLE, 299 .prot_l1 = PMD_TYPE_TABLE,
300 .domain = DOMAIN_USER, 300 .domain = DOMAIN_VECTORS,
301 }, 301 },
302 [MT_MEMORY_RWX] = { 302 [MT_MEMORY_RWX] = {
303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
357} 357}
358EXPORT_SYMBOL(get_mem_type); 358EXPORT_SYMBOL(get_mem_type);
359 359
360static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
361
362static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
363 __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
364
365static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
366{
367 return &bm_pte[pte_index(addr)];
368}
369
370static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
371{
372 return pte_offset_kernel(dir, addr);
373}
374
375static inline pmd_t * __init fixmap_pmd(unsigned long addr)
376{
377 pgd_t *pgd = pgd_offset_k(addr);
378 pud_t *pud = pud_offset(pgd, addr);
379 pmd_t *pmd = pmd_offset(pud, addr);
380
381 return pmd;
382}
383
384void __init early_fixmap_init(void)
385{
386 pmd_t *pmd;
387
388 /*
389 * The early fixmap range spans multiple pmds, for which
390 * we are not prepared:
391 */
392 BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
393 != FIXADDR_TOP >> PMD_SHIFT);
394
395 pmd = fixmap_pmd(FIXADDR_TOP);
396 pmd_populate_kernel(&init_mm, pmd, bm_pte);
397
398 pte_offset_fixmap = pte_offset_early_fixmap;
399}
400
360/* 401/*
361 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). 402 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
362 * As a result, this can only be called with preemption disabled, as under 403 * As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
365void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 406void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
366{ 407{
367 unsigned long vaddr = __fix_to_virt(idx); 408 unsigned long vaddr = __fix_to_virt(idx);
368 pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); 409 pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
369 410
370 /* Make sure fixmap region does not exceed available allocation. */ 411 /* Make sure fixmap region does not exceed available allocation. */
371 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > 412 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
855 } 896 }
856 897
857 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 898 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
858 md->virtual >= PAGE_OFFSET && 899 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
859 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { 900 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
860 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", 901 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
861 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 902 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1219,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void)
1219 1260
1220/* 1261/*
1221 * Set up the device mappings. Since we clear out the page tables for all 1262 * Set up the device mappings. Since we clear out the page tables for all
1222 * mappings above VMALLOC_START, we will remove any debug device mappings. 1263 * mappings above VMALLOC_START, except early fixmap, we might remove debug
1223 * This means you have to be careful how you debug this function, or any 1264 * device mappings. This means earlycon can be used to debug this function
1224 * called function. This means you can't use any function or debugging 1265 * Any other function or debugging method which may touch any device _will_
1225 * method which may touch any device, otherwise the kernel _will_ crash. 1266 * crash the kernel.
1226 */ 1267 */
1227static void __init devicemaps_init(const struct machine_desc *mdesc) 1268static void __init devicemaps_init(const struct machine_desc *mdesc)
1228{ 1269{
@@ -1237,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
1237 1278
1238 early_trap_init(vectors); 1279 early_trap_init(vectors);
1239 1280
1240 for (addr = VMALLOC_START; addr; addr += PMD_SIZE) 1281 /*
1282 * Clear page table except top pmd used by early fixmaps
1283 */
1284 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
1241 pmd_clear(pmd_off_k(addr)); 1285 pmd_clear(pmd_off_k(addr));
1242 1286
1243 /* 1287 /*
@@ -1489,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
1489 1533
1490#endif 1534#endif
1491 1535
1536static void __init early_fixmap_shutdown(void)
1537{
1538 int i;
1539 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
1540
1541 pte_offset_fixmap = pte_offset_late_fixmap;
1542 pmd_clear(fixmap_pmd(va));
1543 local_flush_tlb_kernel_page(va);
1544
1545 for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
1546 pte_t *pte;
1547 struct map_desc map;
1548
1549 map.virtual = fix_to_virt(i);
1550 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
1551
1552 /* Only i/o device mappings are supported ATM */
1553 if (pte_none(*pte) ||
1554 (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
1555 continue;
1556
1557 map.pfn = pte_pfn(*pte);
1558 map.type = MT_DEVICE;
1559 map.length = PAGE_SIZE;
1560
1561 create_mapping(&map);
1562 }
1563}
1564
1492/* 1565/*
1493 * paging_init() sets up the page tables, initialises the zone memory 1566 * paging_init() sets up the page tables, initialises the zone memory
1494 * maps, and sets up the zero page, bad page and bad page tables. 1567 * maps, and sets up the zero page, bad page and bad page tables.
@@ -1502,6 +1575,7 @@ void __init paging_init(const struct machine_desc *mdesc)
1502 map_lowmem(); 1575 map_lowmem();
1503 memblock_set_current_limit(arm_lowmem_limit); 1576 memblock_set_current_limit(arm_lowmem_limit);
1504 dma_contiguous_remap(); 1577 dma_contiguous_remap();
1578 early_fixmap_shutdown();
1505 devicemaps_init(mdesc); 1579 devicemaps_init(mdesc);
1506 kmap_init(); 1580 kmap_init();
1507 tcm_init(); 1581 tcm_init();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f11dd9f..e683db1b90a3 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
84 if (!new_pte) 84 if (!new_pte)
85 goto no_pte; 85 goto no_pte;
86 86
87#ifndef CONFIG_ARM_LPAE
88 /*
89 * Modify the PTE pointer to have the correct domain. This
90 * needs to be the vectors domain to avoid the low vectors
91 * being unmapped.
92 */
93 pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
94 pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
95#endif
96
87 init_pud = pud_offset(init_pgd, 0); 97 init_pud = pud_offset(init_pgd, 0);
88 init_pmd = pmd_offset(init_pud, 0); 98 init_pmd = pmd_offset(init_pud, 0);
89 init_pte = pte_offset_map(init_pmd, 0); 99 init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b7b9ceaa684a..51832ad33fa9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -20,6 +20,7 @@ config ARM64
20 select ARM_GIC_V2M if PCI_MSI 20 select ARM_GIC_V2M if PCI_MSI
21 select ARM_GIC_V3 21 select ARM_GIC_V3
22 select ARM_GIC_V3_ITS if PCI_MSI 22 select ARM_GIC_V3_ITS if PCI_MSI
23 select ARM_PSCI_FW
23 select BUILDTIME_EXTABLE_SORT 24 select BUILDTIME_EXTABLE_SORT
24 select CLONE_BACKWARDS 25 select CLONE_BACKWARDS
25 select COMMON_CLK 26 select COMMON_CLK
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 406485ed110a..208cec08a74f 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,11 +12,11 @@
12#ifndef _ASM_ACPI_H 12#ifndef _ASM_ACPI_H
13#define _ASM_ACPI_H 13#define _ASM_ACPI_H
14 14
15#include <linux/mm.h>
16#include <linux/irqchip/arm-gic-acpi.h> 15#include <linux/irqchip/arm-gic-acpi.h>
16#include <linux/mm.h>
17#include <linux/psci.h>
17 18
18#include <asm/cputype.h> 19#include <asm/cputype.h>
19#include <asm/psci.h>
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21 21
22/* Macros for consistency checks of the GICC subtable of MADT */ 22/* Macros for consistency checks of the GICC subtable of MADT */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
deleted file mode 100644
index 49d7e1aaebdc..000000000000
--- a/arch/arm64/include/asm/psci.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2013 ARM Limited
12 */
13
14#ifndef __ASM_PSCI_H
15#define __ASM_PSCI_H
16
17int __init psci_dt_init(void);
18
19#ifdef CONFIG_ACPI
20int __init psci_acpi_init(void);
21bool __init acpi_psci_present(void);
22bool __init acpi_psci_use_hvc(void);
23#else
24static inline int psci_acpi_init(void) { return 0; }
25static inline bool acpi_psci_present(void) { return false; }
26#endif
27
28#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 869f202748e8..51fd15a16461 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -18,23 +18,17 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/reboot.h>
22#include <linux/pm.h>
23#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/psci.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
24
25#include <uapi/linux/psci.h> 25#include <uapi/linux/psci.h>
26 26
27#include <asm/compiler.h> 27#include <asm/compiler.h>
28#include <asm/cputype.h>
29#include <asm/cpu_ops.h> 28#include <asm/cpu_ops.h>
30#include <asm/errno.h> 29#include <asm/errno.h>
31#include <asm/psci.h>
32#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
33#include <asm/suspend.h> 31#include <asm/suspend.h>
34#include <asm/system_misc.h>
35
36#define PSCI_POWER_STATE_TYPE_STANDBY 0
37#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
38 32
39static bool psci_power_state_loses_context(u32 state) 33static bool psci_power_state_loses_context(u32 state)
40{ 34{
@@ -50,122 +44,8 @@ static bool psci_power_state_is_valid(u32 state)
50 return !(state & ~valid_mask); 44 return !(state & ~valid_mask);
51} 45}
52 46
53/*
54 * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
55 * calls to its resident CPU, so we must avoid issuing those. We never migrate
56 * a Trusted OS even if it claims to be capable of migration -- doing so will
57 * require cooperation with a Trusted OS driver.
58 */
59static int resident_cpu = -1;
60
61struct psci_operations {
62 int (*cpu_suspend)(u32 state, unsigned long entry_point);
63 int (*cpu_off)(u32 state);
64 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
65 int (*migrate)(unsigned long cpuid);
66 int (*affinity_info)(unsigned long target_affinity,
67 unsigned long lowest_affinity_level);
68 int (*migrate_info_type)(void);
69};
70
71static struct psci_operations psci_ops;
72
73typedef unsigned long (psci_fn)(unsigned long, unsigned long,
74 unsigned long, unsigned long);
75asmlinkage psci_fn __invoke_psci_fn_hvc;
76asmlinkage psci_fn __invoke_psci_fn_smc;
77static psci_fn *invoke_psci_fn;
78
79enum psci_function {
80 PSCI_FN_CPU_SUSPEND,
81 PSCI_FN_CPU_ON,
82 PSCI_FN_CPU_OFF,
83 PSCI_FN_MIGRATE,
84 PSCI_FN_MAX,
85};
86
87static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); 47static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
88 48
89static u32 psci_function_id[PSCI_FN_MAX];
90
91static int psci_to_linux_errno(int errno)
92{
93 switch (errno) {
94 case PSCI_RET_SUCCESS:
95 return 0;
96 case PSCI_RET_NOT_SUPPORTED:
97 return -EOPNOTSUPP;
98 case PSCI_RET_INVALID_PARAMS:
99 return -EINVAL;
100 case PSCI_RET_DENIED:
101 return -EPERM;
102 };
103
104 return -EINVAL;
105}
106
107static u32 psci_get_version(void)
108{
109 return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
110}
111
112static int psci_cpu_suspend(u32 state, unsigned long entry_point)
113{
114 int err;
115 u32 fn;
116
117 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
118 err = invoke_psci_fn(fn, state, entry_point, 0);
119 return psci_to_linux_errno(err);
120}
121
122static int psci_cpu_off(u32 state)
123{
124 int err;
125 u32 fn;
126
127 fn = psci_function_id[PSCI_FN_CPU_OFF];
128 err = invoke_psci_fn(fn, state, 0, 0);
129 return psci_to_linux_errno(err);
130}
131
132static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
133{
134 int err;
135 u32 fn;
136
137 fn = psci_function_id[PSCI_FN_CPU_ON];
138 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
139 return psci_to_linux_errno(err);
140}
141
142static int psci_migrate(unsigned long cpuid)
143{
144 int err;
145 u32 fn;
146
147 fn = psci_function_id[PSCI_FN_MIGRATE];
148 err = invoke_psci_fn(fn, cpuid, 0, 0);
149 return psci_to_linux_errno(err);
150}
151
152static int psci_affinity_info(unsigned long target_affinity,
153 unsigned long lowest_affinity_level)
154{
155 return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
156 lowest_affinity_level, 0);
157}
158
159static int psci_migrate_info_type(void)
160{
161 return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
162}
163
164static unsigned long psci_migrate_info_up_cpu(void)
165{
166 return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
167}
168
169static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) 49static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
170{ 50{
171 int i, ret, count = 0; 51 int i, ret, count = 0;
@@ -230,238 +110,6 @@ free_mem:
230 return ret; 110 return ret;
231} 111}
232 112
233static int get_set_conduit_method(struct device_node *np)
234{
235 const char *method;
236
237 pr_info("probing for conduit method from DT.\n");
238
239 if (of_property_read_string(np, "method", &method)) {
240 pr_warn("missing \"method\" property\n");
241 return -ENXIO;
242 }
243
244 if (!strcmp("hvc", method)) {
245 invoke_psci_fn = __invoke_psci_fn_hvc;
246 } else if (!strcmp("smc", method)) {
247 invoke_psci_fn = __invoke_psci_fn_smc;
248 } else {
249 pr_warn("invalid \"method\" property: %s\n", method);
250 return -EINVAL;
251 }
252 return 0;
253}
254
255static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
256{
257 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
258}
259
260static void psci_sys_poweroff(void)
261{
262 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
263}
264
265/*
266 * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
267 * return DENIED (which would be fatal).
268 */
269static void __init psci_init_migrate(void)
270{
271 unsigned long cpuid;
272 int type, cpu;
273
274 type = psci_ops.migrate_info_type();
275
276 if (type == PSCI_0_2_TOS_MP) {
277 pr_info("Trusted OS migration not required\n");
278 return;
279 }
280
281 if (type == PSCI_RET_NOT_SUPPORTED) {
282 pr_info("MIGRATE_INFO_TYPE not supported.\n");
283 return;
284 }
285
286 if (type != PSCI_0_2_TOS_UP_MIGRATE &&
287 type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
288 pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
289 return;
290 }
291
292 cpuid = psci_migrate_info_up_cpu();
293 if (cpuid & ~MPIDR_HWID_BITMASK) {
294 pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
295 cpuid);
296 return;
297 }
298
299 cpu = get_logical_index(cpuid);
300 resident_cpu = cpu >= 0 ? cpu : -1;
301
302 pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
303}
304
305static void __init psci_0_2_set_functions(void)
306{
307 pr_info("Using standard PSCI v0.2 function IDs\n");
308 psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
309 psci_ops.cpu_suspend = psci_cpu_suspend;
310
311 psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
312 psci_ops.cpu_off = psci_cpu_off;
313
314 psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
315 psci_ops.cpu_on = psci_cpu_on;
316
317 psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
318 psci_ops.migrate = psci_migrate;
319
320 psci_ops.affinity_info = psci_affinity_info;
321
322 psci_ops.migrate_info_type = psci_migrate_info_type;
323
324 arm_pm_restart = psci_sys_reset;
325
326 pm_power_off = psci_sys_poweroff;
327}
328
329/*
330 * Probe function for PSCI firmware versions >= 0.2
331 */
332static int __init psci_probe(void)
333{
334 u32 ver = psci_get_version();
335
336 pr_info("PSCIv%d.%d detected in firmware.\n",
337 PSCI_VERSION_MAJOR(ver),
338 PSCI_VERSION_MINOR(ver));
339
340 if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
341 pr_err("Conflicting PSCI version detected.\n");
342 return -EINVAL;
343 }
344
345 psci_0_2_set_functions();
346
347 psci_init_migrate();
348
349 return 0;
350}
351
352typedef int (*psci_initcall_t)(const struct device_node *);
353
354/*
355 * PSCI init function for PSCI versions >=0.2
356 *
357 * Probe based on PSCI PSCI_VERSION function
358 */
359static int __init psci_0_2_init(struct device_node *np)
360{
361 int err;
362
363 err = get_set_conduit_method(np);
364
365 if (err)
366 goto out_put_node;
367 /*
368 * Starting with v0.2, the PSCI specification introduced a call
369 * (PSCI_VERSION) that allows probing the firmware version, so
370 * that PSCI function IDs and version specific initialization
371 * can be carried out according to the specific version reported
372 * by firmware
373 */
374 err = psci_probe();
375
376out_put_node:
377 of_node_put(np);
378 return err;
379}
380
381/*
382 * PSCI < v0.2 get PSCI Function IDs via DT.
383 */
384static int __init psci_0_1_init(struct device_node *np)
385{
386 u32 id;
387 int err;
388
389 err = get_set_conduit_method(np);
390
391 if (err)
392 goto out_put_node;
393
394 pr_info("Using PSCI v0.1 Function IDs from DT\n");
395
396 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
397 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
398 psci_ops.cpu_suspend = psci_cpu_suspend;
399 }
400
401 if (!of_property_read_u32(np, "cpu_off", &id)) {
402 psci_function_id[PSCI_FN_CPU_OFF] = id;
403 psci_ops.cpu_off = psci_cpu_off;
404 }
405
406 if (!of_property_read_u32(np, "cpu_on", &id)) {
407 psci_function_id[PSCI_FN_CPU_ON] = id;
408 psci_ops.cpu_on = psci_cpu_on;
409 }
410
411 if (!of_property_read_u32(np, "migrate", &id)) {
412 psci_function_id[PSCI_FN_MIGRATE] = id;
413 psci_ops.migrate = psci_migrate;
414 }
415
416out_put_node:
417 of_node_put(np);
418 return err;
419}
420
421static const struct of_device_id psci_of_match[] __initconst = {
422 { .compatible = "arm,psci", .data = psci_0_1_init},
423 { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
424 {},
425};
426
427int __init psci_dt_init(void)
428{
429 struct device_node *np;
430 const struct of_device_id *matched_np;
431 psci_initcall_t init_fn;
432
433 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
434
435 if (!np)
436 return -ENODEV;
437
438 init_fn = (psci_initcall_t)matched_np->data;
439 return init_fn(np);
440}
441
442#ifdef CONFIG_ACPI
443/*
444 * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
445 * explicitly clarified in SBBR
446 */
447int __init psci_acpi_init(void)
448{
449 if (!acpi_psci_present()) {
450 pr_info("is not implemented in ACPI.\n");
451 return -EOPNOTSUPP;
452 }
453
454 pr_info("probing for conduit method from ACPI.\n");
455
456 if (acpi_psci_use_hvc())
457 invoke_psci_fn = __invoke_psci_fn_hvc;
458 else
459 invoke_psci_fn = __invoke_psci_fn_smc;
460
461 return psci_probe();
462}
463#endif
464
465#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
466 114
467static int __init cpu_psci_cpu_init(unsigned int cpu) 115static int __init cpu_psci_cpu_init(unsigned int cpu)
@@ -489,11 +137,6 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
489} 137}
490 138
491#ifdef CONFIG_HOTPLUG_CPU 139#ifdef CONFIG_HOTPLUG_CPU
492static bool psci_tos_resident_on(int cpu)
493{
494 return cpu == resident_cpu;
495}
496
497static int cpu_psci_cpu_disable(unsigned int cpu) 140static int cpu_psci_cpu_disable(unsigned int cpu)
498{ 141{
499 /* Fail early if we don't have CPU_OFF support */ 142 /* Fail early if we don't have CPU_OFF support */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 926ae8d9abc5..fdc11f05ac36 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
45#include <linux/of_platform.h> 45#include <linux/of_platform.h>
46#include <linux/efi.h> 46#include <linux/efi.h>
47#include <linux/personality.h> 47#include <linux/personality.h>
48#include <linux/psci.h>
48 49
49#include <asm/acpi.h> 50#include <asm/acpi.h>
50#include <asm/fixmap.h> 51#include <asm/fixmap.h>
@@ -60,7 +61,6 @@
60#include <asm/tlbflush.h> 61#include <asm/tlbflush.h>
61#include <asm/traps.h> 62#include <asm/traps.h>
62#include <asm/memblock.h> 63#include <asm/memblock.h>
63#include <asm/psci.h>
64#include <asm/efi.h> 64#include <asm/efi.h>
65#include <asm/virt.h> 65#include <asm/virt.h>
66#include <asm/xen/hypervisor.h> 66#include <asm/xen/hypervisor.h>
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4e2e6aaf0b88..46b4a8e0f859 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -176,6 +176,8 @@ source "drivers/powercap/Kconfig"
176 176
177source "drivers/mcb/Kconfig" 177source "drivers/mcb/Kconfig"
178 178
179source "drivers/perf/Kconfig"
180
179source "drivers/ras/Kconfig" 181source "drivers/ras/Kconfig"
180 182
181source "drivers/thunderbolt/Kconfig" 183source "drivers/thunderbolt/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 4c270f5414f0..47a118aec76f 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -161,6 +161,7 @@ obj-$(CONFIG_NTB) += ntb/
161obj-$(CONFIG_FMC) += fmc/ 161obj-$(CONFIG_FMC) += fmc/
162obj-$(CONFIG_POWERCAP) += powercap/ 162obj-$(CONFIG_POWERCAP) += powercap/
163obj-$(CONFIG_MCB) += mcb/ 163obj-$(CONFIG_MCB) += mcb/
164obj-$(CONFIG_PERF_EVENTS) += perf/
164obj-$(CONFIG_RAS) += ras/ 165obj-$(CONFIG_RAS) += ras/
165obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ 166obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
166obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/ 167obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index c13feec89ea1..ea9728fde9b3 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -25,16 +25,21 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/psci.h>
29
28#include <asm/cpuidle.h> 30#include <asm/cpuidle.h>
29#include <asm/suspend.h> 31#include <asm/suspend.h>
30#include <asm/psci.h> 32
33#include <uapi/linux/psci.h>
34
35#define CALXEDA_IDLE_PARAM \
36 ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
37 (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
38 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
31 39
32static int calxeda_idle_finish(unsigned long val) 40static int calxeda_idle_finish(unsigned long val)
33{ 41{
34 const struct psci_power_state ps = { 42 return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume));
35 .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
36 };
37 return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
38} 43}
39 44
40static int calxeda_pwrdown_idle(struct cpuidle_device *dev, 45static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 99c69a3205c4..d8de6a8dd4de 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,6 +5,9 @@
5 5
6menu "Firmware Drivers" 6menu "Firmware Drivers"
7 7
8config ARM_PSCI_FW
9 bool
10
8config EDD 11config EDD
9 tristate "BIOS Enhanced Disk Drive calls determine boot disk" 12 tristate "BIOS Enhanced Disk Drive calls determine boot disk"
10 depends on X86 13 depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 4a4b897f9314..000830fc6707 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -1,6 +1,7 @@
1# 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4obj-$(CONFIG_ARM_PSCI_FW) += psci.o
4obj-$(CONFIG_DMI) += dmi_scan.o 5obj-$(CONFIG_DMI) += dmi_scan.o
5obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o 6obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
6obj-$(CONFIG_EDD) += edd.o 7obj-$(CONFIG_EDD) += edd.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
new file mode 100644
index 000000000000..42700f09a8c5
--- /dev/null
+++ b/drivers/firmware/psci.c
@@ -0,0 +1,382 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2015 ARM Limited
12 */
13
14#define pr_fmt(fmt) "psci: " fmt
15
16#include <linux/errno.h>
17#include <linux/linkage.h>
18#include <linux/of.h>
19#include <linux/pm.h>
20#include <linux/printk.h>
21#include <linux/psci.h>
22#include <linux/reboot.h>
23
24#include <uapi/linux/psci.h>
25
26#include <asm/cputype.h>
27#include <asm/system_misc.h>
28#include <asm/smp_plat.h>
29
30/*
31 * While a 64-bit OS can make calls with SMC32 calling conventions, for some
32 * calls it is necessary to use SMC64 to pass or return 64-bit values. For such
33 * calls PSCI_0_2_FN_NATIVE(x) will choose the appropriate (native-width)
34 * function ID.
35 */
36#ifdef CONFIG_64BIT
37#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN64_##name
38#else
39#define PSCI_0_2_FN_NATIVE(name) PSCI_0_2_FN_##name
40#endif
41
42/*
43 * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
44 * calls to its resident CPU, so we must avoid issuing those. We never migrate
45 * a Trusted OS even if it claims to be capable of migration -- doing so will
46 * require cooperation with a Trusted OS driver.
47 */
48static int resident_cpu = -1;
49
50bool psci_tos_resident_on(int cpu)
51{
52 return cpu == resident_cpu;
53}
54
55struct psci_operations psci_ops;
56
57typedef unsigned long (psci_fn)(unsigned long, unsigned long,
58 unsigned long, unsigned long);
59asmlinkage psci_fn __invoke_psci_fn_hvc;
60asmlinkage psci_fn __invoke_psci_fn_smc;
61static psci_fn *invoke_psci_fn;
62
63enum psci_function {
64 PSCI_FN_CPU_SUSPEND,
65 PSCI_FN_CPU_ON,
66 PSCI_FN_CPU_OFF,
67 PSCI_FN_MIGRATE,
68 PSCI_FN_MAX,
69};
70
71static u32 psci_function_id[PSCI_FN_MAX];
72
73static int psci_to_linux_errno(int errno)
74{
75 switch (errno) {
76 case PSCI_RET_SUCCESS:
77 return 0;
78 case PSCI_RET_NOT_SUPPORTED:
79 return -EOPNOTSUPP;
80 case PSCI_RET_INVALID_PARAMS:
81 return -EINVAL;
82 case PSCI_RET_DENIED:
83 return -EPERM;
84 };
85
86 return -EINVAL;
87}
88
89static u32 psci_get_version(void)
90{
91 return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
92}
93
94static int psci_cpu_suspend(u32 state, unsigned long entry_point)
95{
96 int err;
97 u32 fn;
98
99 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
100 err = invoke_psci_fn(fn, state, entry_point, 0);
101 return psci_to_linux_errno(err);
102}
103
104static int psci_cpu_off(u32 state)
105{
106 int err;
107 u32 fn;
108
109 fn = psci_function_id[PSCI_FN_CPU_OFF];
110 err = invoke_psci_fn(fn, state, 0, 0);
111 return psci_to_linux_errno(err);
112}
113
114static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
115{
116 int err;
117 u32 fn;
118
119 fn = psci_function_id[PSCI_FN_CPU_ON];
120 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
121 return psci_to_linux_errno(err);
122}
123
124static int psci_migrate(unsigned long cpuid)
125{
126 int err;
127 u32 fn;
128
129 fn = psci_function_id[PSCI_FN_MIGRATE];
130 err = invoke_psci_fn(fn, cpuid, 0, 0);
131 return psci_to_linux_errno(err);
132}
133
134static int psci_affinity_info(unsigned long target_affinity,
135 unsigned long lowest_affinity_level)
136{
137 return invoke_psci_fn(PSCI_0_2_FN_NATIVE(AFFINITY_INFO),
138 target_affinity, lowest_affinity_level, 0);
139}
140
141static int psci_migrate_info_type(void)
142{
143 return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
144}
145
146static unsigned long psci_migrate_info_up_cpu(void)
147{
148 return invoke_psci_fn(PSCI_0_2_FN_NATIVE(MIGRATE_INFO_UP_CPU),
149 0, 0, 0);
150}
151
152static int get_set_conduit_method(struct device_node *np)
153{
154 const char *method;
155
156 pr_info("probing for conduit method from DT.\n");
157
158 if (of_property_read_string(np, "method", &method)) {
159 pr_warn("missing \"method\" property\n");
160 return -ENXIO;
161 }
162
163 if (!strcmp("hvc", method)) {
164 invoke_psci_fn = __invoke_psci_fn_hvc;
165 } else if (!strcmp("smc", method)) {
166 invoke_psci_fn = __invoke_psci_fn_smc;
167 } else {
168 pr_warn("invalid \"method\" property: %s\n", method);
169 return -EINVAL;
170 }
171 return 0;
172}
173
174static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
175{
176 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
177}
178
179static void psci_sys_poweroff(void)
180{
181 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
182}
183
184/*
185 * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
186 * return DENIED (which would be fatal).
187 */
188static void __init psci_init_migrate(void)
189{
190 unsigned long cpuid;
191 int type, cpu = -1;
192
193 type = psci_ops.migrate_info_type();
194
195 if (type == PSCI_0_2_TOS_MP) {
196 pr_info("Trusted OS migration not required\n");
197 return;
198 }
199
200 if (type == PSCI_RET_NOT_SUPPORTED) {
201 pr_info("MIGRATE_INFO_TYPE not supported.\n");
202 return;
203 }
204
205 if (type != PSCI_0_2_TOS_UP_MIGRATE &&
206 type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
207 pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
208 return;
209 }
210
211 cpuid = psci_migrate_info_up_cpu();
212 if (cpuid & ~MPIDR_HWID_BITMASK) {
213 pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
214 cpuid);
215 return;
216 }
217
218 cpu = get_logical_index(cpuid);
219 resident_cpu = cpu >= 0 ? cpu : -1;
220
221 pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
222}
223
224static void __init psci_0_2_set_functions(void)
225{
226 pr_info("Using standard PSCI v0.2 function IDs\n");
227 psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_NATIVE(CPU_SUSPEND);
228 psci_ops.cpu_suspend = psci_cpu_suspend;
229
230 psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
231 psci_ops.cpu_off = psci_cpu_off;
232
233 psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_NATIVE(CPU_ON);
234 psci_ops.cpu_on = psci_cpu_on;
235
236 psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_NATIVE(MIGRATE);
237 psci_ops.migrate = psci_migrate;
238
239 psci_ops.affinity_info = psci_affinity_info;
240
241 psci_ops.migrate_info_type = psci_migrate_info_type;
242
243 arm_pm_restart = psci_sys_reset;
244
245 pm_power_off = psci_sys_poweroff;
246}
247
248/*
249 * Probe function for PSCI firmware versions >= 0.2
250 */
251static int __init psci_probe(void)
252{
253 u32 ver = psci_get_version();
254
255 pr_info("PSCIv%d.%d detected in firmware.\n",
256 PSCI_VERSION_MAJOR(ver),
257 PSCI_VERSION_MINOR(ver));
258
259 if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
260 pr_err("Conflicting PSCI version detected.\n");
261 return -EINVAL;
262 }
263
264 psci_0_2_set_functions();
265
266 psci_init_migrate();
267
268 return 0;
269}
270
271typedef int (*psci_initcall_t)(const struct device_node *);
272
273/*
274 * PSCI init function for PSCI versions >=0.2
275 *
276 * Probe based on PSCI PSCI_VERSION function
277 */
278static int __init psci_0_2_init(struct device_node *np)
279{
280 int err;
281
282 err = get_set_conduit_method(np);
283
284 if (err)
285 goto out_put_node;
286 /*
287 * Starting with v0.2, the PSCI specification introduced a call
288 * (PSCI_VERSION) that allows probing the firmware version, so
289 * that PSCI function IDs and version specific initialization
290 * can be carried out according to the specific version reported
291 * by firmware
292 */
293 err = psci_probe();
294
295out_put_node:
296 of_node_put(np);
297 return err;
298}
299
300/*
301 * PSCI < v0.2 get PSCI Function IDs via DT.
302 */
303static int __init psci_0_1_init(struct device_node *np)
304{
305 u32 id;
306 int err;
307
308 err = get_set_conduit_method(np);
309
310 if (err)
311 goto out_put_node;
312
313 pr_info("Using PSCI v0.1 Function IDs from DT\n");
314
315 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
316 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
317 psci_ops.cpu_suspend = psci_cpu_suspend;
318 }
319
320 if (!of_property_read_u32(np, "cpu_off", &id)) {
321 psci_function_id[PSCI_FN_CPU_OFF] = id;
322 psci_ops.cpu_off = psci_cpu_off;
323 }
324
325 if (!of_property_read_u32(np, "cpu_on", &id)) {
326 psci_function_id[PSCI_FN_CPU_ON] = id;
327 psci_ops.cpu_on = psci_cpu_on;
328 }
329
330 if (!of_property_read_u32(np, "migrate", &id)) {
331 psci_function_id[PSCI_FN_MIGRATE] = id;
332 psci_ops.migrate = psci_migrate;
333 }
334
335out_put_node:
336 of_node_put(np);
337 return err;
338}
339
340static const struct of_device_id const psci_of_match[] __initconst = {
341 { .compatible = "arm,psci", .data = psci_0_1_init},
342 { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
343 {},
344};
345
346int __init psci_dt_init(void)
347{
348 struct device_node *np;
349 const struct of_device_id *matched_np;
350 psci_initcall_t init_fn;
351
352 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
353
354 if (!np)
355 return -ENODEV;
356
357 init_fn = (psci_initcall_t)matched_np->data;
358 return init_fn(np);
359}
360
361#ifdef CONFIG_ACPI
362/*
363 * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
364 * explicitly clarified in SBBR
365 */
366int __init psci_acpi_init(void)
367{
368 if (!acpi_psci_present()) {
369 pr_info("is not implemented in ACPI.\n");
370 return -EOPNOTSUPP;
371 }
372
373 pr_info("probing for conduit method from ACPI.\n");
374
375 if (acpi_psci_use_hvc())
376 invoke_psci_fn = __invoke_psci_fn_hvc;
377 else
378 invoke_psci_fn = __invoke_psci_fn_smc;
379
380 return psci_probe();
381}
382#endif
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 1bd6f9c34331..29e6850665eb 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -24,7 +24,6 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/qcom_scm.h> 25#include <linux/qcom_scm.h>
26 26
27#include <asm/outercache.h>
28#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
29 28
30#include "qcom_scm.h" 29#include "qcom_scm.h"
@@ -219,8 +218,7 @@ static int __qcom_scm_call(const struct qcom_scm_command *cmd)
219 * Flush the command buffer so that the secure world sees 218 * Flush the command buffer so that the secure world sees
220 * the correct data. 219 * the correct data.
221 */ 220 */
222 __cpuc_flush_dcache_area((void *)cmd, cmd->len); 221 secure_flush_area(cmd, cmd->len);
223 outer_flush_range(cmd_addr, cmd_addr + cmd->len);
224 222
225 ret = smc(cmd_addr); 223 ret = smc(cmd_addr);
226 if (ret < 0) 224 if (ret < 0)
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
new file mode 100644
index 000000000000..d9de36ee165d
--- /dev/null
+++ b/drivers/perf/Kconfig
@@ -0,0 +1,15 @@
1#
2# Performance Monitor Drivers
3#
4
5menu "Performance monitor support"
6
7config ARM_PMU
8 depends on PERF_EVENTS && ARM
9 bool "ARM PMU framework"
10 default y
11 help
12 Say y if you want to use CPU performance monitors on ARM-based
13 systems.
14
15endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
new file mode 100644
index 000000000000..acd2397ded94
--- /dev/null
+++ b/drivers/perf/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_ARM_PMU) += arm_pmu.o
diff --git a/arch/arm/kernel/perf_event.c b/drivers/perf/arm_pmu.c
index 54272e0be713..2365a32a595e 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/drivers/perf/arm_pmu.c
@@ -15,7 +15,8 @@
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/of.h> 18#include <linux/of_device.h>
19#include <linux/perf/arm_pmu.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/spinlock.h> 22#include <linux/spinlock.h>
@@ -24,7 +25,6 @@
24 25
25#include <asm/cputype.h> 26#include <asm/cputype.h>
26#include <asm/irq_regs.h> 27#include <asm/irq_regs.h>
27#include <asm/pmu.h>
28 28
29static int 29static int
30armpmu_map_cache_event(const unsigned (*cache_map) 30armpmu_map_cache_event(const unsigned (*cache_map)
@@ -790,52 +790,77 @@ static int probe_current_pmu(struct arm_pmu *pmu,
790 790
791static int of_pmu_irq_cfg(struct arm_pmu *pmu) 791static int of_pmu_irq_cfg(struct arm_pmu *pmu)
792{ 792{
793 int i, irq, *irqs; 793 int *irqs, i = 0;
794 bool using_spi = false;
794 struct platform_device *pdev = pmu->plat_device; 795 struct platform_device *pdev = pmu->plat_device;
795 796
796 /* Don't bother with PPIs; they're already affine */
797 irq = platform_get_irq(pdev, 0);
798 if (irq >= 0 && irq_is_percpu(irq))
799 return 0;
800
801 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); 797 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
802 if (!irqs) 798 if (!irqs)
803 return -ENOMEM; 799 return -ENOMEM;
804 800
805 for (i = 0; i < pdev->num_resources; ++i) { 801 do {
806 struct device_node *dn; 802 struct device_node *dn;
807 int cpu; 803 int cpu, irq;
808 804
809 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", 805 /* See if we have an affinity entry */
810 i); 806 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
811 if (!dn) { 807 if (!dn)
812 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
813 of_node_full_name(pdev->dev.of_node), i);
814 break; 808 break;
809
810 /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
811 irq = platform_get_irq(pdev, i);
812 if (irq >= 0) {
813 bool spi = !irq_is_percpu(irq);
814
815 if (i > 0 && spi != using_spi) {
816 pr_err("PPI/SPI IRQ type mismatch for %s!\n",
817 dn->name);
818 kfree(irqs);
819 return -EINVAL;
820 }
821
822 using_spi = spi;
815 } 823 }
816 824
825 /* Now look up the logical CPU number */
817 for_each_possible_cpu(cpu) 826 for_each_possible_cpu(cpu)
818 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) 827 if (dn == of_cpu_device_node_get(cpu))
819 break; 828 break;
820 829
821 if (cpu >= nr_cpu_ids) { 830 if (cpu >= nr_cpu_ids) {
822 pr_warn("Failed to find logical CPU for %s\n", 831 pr_warn("Failed to find logical CPU for %s\n",
823 dn->name); 832 dn->name);
824 of_node_put(dn); 833 of_node_put(dn);
834 cpumask_setall(&pmu->supported_cpus);
825 break; 835 break;
826 } 836 }
827 of_node_put(dn); 837 of_node_put(dn);
828 838
829 irqs[i] = cpu; 839 /* For SPIs, we need to track the affinity per IRQ */
840 if (using_spi) {
841 if (i >= pdev->num_resources) {
842 of_node_put(dn);
843 break;
844 }
845
846 irqs[i] = cpu;
847 }
848
849 /* Keep track of the CPUs containing this PMU type */
830 cpumask_set_cpu(cpu, &pmu->supported_cpus); 850 cpumask_set_cpu(cpu, &pmu->supported_cpus);
831 } 851 of_node_put(dn);
852 i++;
853 } while (1);
854
855 /* If we didn't manage to parse anything, claim to support all CPUs */
856 if (cpumask_weight(&pmu->supported_cpus) == 0)
857 cpumask_setall(&pmu->supported_cpus);
832 858
833 if (i == pdev->num_resources) { 859 /* If we matched up the IRQ affinities, use them to route the SPIs */
860 if (using_spi && i == pdev->num_resources)
834 pmu->irq_affinity = irqs; 861 pmu->irq_affinity = irqs;
835 } else { 862 else
836 kfree(irqs); 863 kfree(irqs);
837 cpumask_setall(&pmu->supported_cpus);
838 }
839 864
840 return 0; 865 return 0;
841} 866}
diff --git a/arch/arm/include/asm/pmu.h b/include/linux/perf/arm_pmu.h
index 3fc87dfd77e6..bfa673bb822d 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -30,7 +30,7 @@ struct arm_pmu_platdata {
30 irq_handler_t pmu_handler); 30 irq_handler_t pmu_handler);
31}; 31};
32 32
33#ifdef CONFIG_HW_PERF_EVENTS 33#ifdef CONFIG_ARM_PMU
34 34
35/* 35/*
36 * The ARMv7 CPU PMU supports up to 32 event counters. 36 * The ARMv7 CPU PMU supports up to 32 event counters.
@@ -149,6 +149,6 @@ int arm_pmu_device_probe(struct platform_device *pdev,
149 const struct of_device_id *of_table, 149 const struct of_device_id *of_table,
150 const struct pmu_probe_info *probe_table); 150 const struct pmu_probe_info *probe_table);
151 151
152#endif /* CONFIG_HW_PERF_EVENTS */ 152#endif /* CONFIG_ARM_PMU */
153 153
154#endif /* __ARM_PMU_H__ */ 154#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 000000000000..a682fcc91c33
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2015 ARM Limited
12 */
13
14#ifndef __LINUX_PSCI_H
15#define __LINUX_PSCI_H
16
17#include <linux/init.h>
18#include <linux/types.h>
19
20#define PSCI_POWER_STATE_TYPE_STANDBY 0
21#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
22
23bool psci_tos_resident_on(int cpu);
24
25struct psci_operations {
26 int (*cpu_suspend)(u32 state, unsigned long entry_point);
27 int (*cpu_off)(u32 state);
28 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
29 int (*migrate)(unsigned long cpuid);
30 int (*affinity_info)(unsigned long target_affinity,
31 unsigned long lowest_affinity_level);
32 int (*migrate_info_type)(void);
33};
34
35extern struct psci_operations psci_ops;
36
37#if defined(CONFIG_ARM_PSCI_FW)
38int __init psci_dt_init(void);
39#else
40static inline int psci_dt_init(void) { return 0; }
41#endif
42
43#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
44int __init psci_acpi_init(void);
45bool __init acpi_psci_present(void);
46bool __init acpi_psci_use_hvc(void);
47#else
48static inline int psci_acpi_init(void) { return 0; }
49static inline bool acpi_psci_present(void) { return false; }
50#endif
51
52#endif /* __LINUX_PSCI_H */