aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig25
-rw-r--r--arch/arm/common/mcpm_platsmp.c12
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h69
-rw-r--r--arch/arm/include/asm/barrier.h13
-rw-r--r--arch/arm/include/asm/bitops.h24
-rw-r--r--arch/arm/include/asm/cacheflush.h21
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/include/asm/domain.h53
-rw-r--r--arch/arm/include/asm/fixmap.h15
-rw-r--r--arch/arm/include/asm/futex.h19
-rw-r--r--arch/arm/include/asm/glue-cache.h2
-rw-r--r--arch/arm/include/asm/outercache.h17
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pmu.h154
-rw-r--r--arch/arm/include/asm/psci.h23
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/smp_plat.h9
-rw-r--r--arch/arm/include/asm/thread_info.h23
-rw-r--r--arch/arm/include/asm/uaccess.h132
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/entry-armv.S32
-rw-r--r--arch/arm/kernel/entry-common.S63
-rw-r--r--arch/arm/kernel/entry-header.S112
-rw-r--r--arch/arm/kernel/head.S5
-rw-r--r--arch/arm/kernel/irq.c1
-rw-r--r--arch/arm/kernel/perf_event.c896
-rw-r--r--arch/arm/kernel/perf_event_v6.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c2
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
-rw-r--r--arch/arm/kernel/process.c56
-rw-r--r--arch/arm/kernel/psci.c299
-rw-r--r--arch/arm/kernel/psci_smp.c31
-rw-r--r--arch/arm/kernel/setup.c9
-rw-r--r--arch/arm/kernel/signal.c6
-rw-r--r--arch/arm/kernel/smp.c17
-rw-r--r--arch/arm/kernel/swp_emulate.c3
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/lib/clear_user.S6
-rw-r--r--arch/arm/lib/copy_from_user.S6
-rw-r--r--arch/arm/lib/copy_to_user.S6
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S14
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c4
-rw-r--r--arch/arm/mach-highbank/highbank.c2
-rw-r--r--arch/arm/mach-highbank/pm.c16
-rw-r--r--arch/arm/mach-mmp/pm-pxa910.c1
-rw-r--r--arch/arm/mach-omap2/Kconfig7
-rw-r--r--arch/arm/mach-omap2/common.c1
-rw-r--r--arch/arm/mach-omap2/common.h9
-rw-r--r--arch/arm/mach-omap2/include/mach/barriers.h33
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm/mach-omap2/omap4-common.c121
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S8
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-shmobile/common.h2
-rw-r--r--arch/arm/mach-shmobile/platsmp.c4
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7790.c2
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7791.c2
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c2
-rw-r--r--arch/arm/mach-ux500/cache-l2x0.c1
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c2
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/abort-ev4.S1
-rw-r--r--arch/arm/mm/abort-ev5t.S4
-rw-r--r--arch/arm/mm/abort-ev5tj.S4
-rw-r--r--arch/arm/mm/abort-ev6.S8
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-lv4t.S2
-rw-r--r--arch/arm/mm/abort-macro.S14
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c5
-rw-r--r--arch/arm/mm/dma-mapping.c22
-rw-r--r--arch/arm/mm/dma.h32
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/highmem.c6
-rw-r--r--arch/arm/mm/mmu.c92
-rw-r--r--arch/arm/mm/pgd.c10
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h4
-rw-r--r--arch/arm64/include/asm/psci.h28
-rw-r--r--arch/arm64/kernel/psci.c361
-rw-r--r--arch/arm64/kernel/setup.c2
83 files changed, 912 insertions, 2127 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 41cbb4a53066..0d1b717e1eca 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64
188config ARCH_HAS_BANDGAP 188config ARCH_HAS_BANDGAP
189 bool 189 bool
190 190
191config FIX_EARLYCON_MEM
192 def_bool y if MMU
193
191config GENERIC_HWEIGHT 194config GENERIC_HWEIGHT
192 bool 195 bool
193 default y 196 default y
@@ -1496,6 +1499,7 @@ config HOTPLUG_CPU
1496config ARM_PSCI 1499config ARM_PSCI
1497 bool "Support for the ARM Power State Coordination Interface (PSCI)" 1500 bool "Support for the ARM Power State Coordination Interface (PSCI)"
1498 depends on CPU_V7 1501 depends on CPU_V7
1502 select ARM_PSCI_FW
1499 help 1503 help
1500 Say Y here if you want Linux to communicate with system firmware 1504 Say Y here if you want Linux to communicate with system firmware
1501 implementing the PSCI specification for CPU-centric power 1505 implementing the PSCI specification for CPU-centric power
@@ -1700,13 +1704,24 @@ config HIGHPTE
1700 consumed by page tables. Setting this option will allow 1704 consumed by page tables. Setting this option will allow
1701 user-space 2nd level page tables to reside in high memory. 1705 user-space 2nd level page tables to reside in high memory.
1702 1706
1703config HW_PERF_EVENTS 1707config CPU_SW_DOMAIN_PAN
1704 bool "Enable hardware performance counter support for perf events" 1708 bool "Enable use of CPU domains to implement privileged no-access"
1705 depends on PERF_EVENTS 1709 depends on MMU && !ARM_LPAE
1706 default y 1710 default y
1707 help 1711 help
1708 Enable hardware performance counter support for perf events. If 1712 Increase kernel security by ensuring that normal kernel accesses
1709 disabled, perf events will use software events only. 1713 are unable to access userspace addresses. This can help prevent
1714 use-after-free bugs becoming an exploitable privilege escalation
1715 by ensuring that magic values (such as LIST_POISON) will always
1716 fault when dereferenced.
1717
1718 CPUs with low-vector mappings use a best-efforts implementation.
1719 Their lower 1MB needs to remain accessible for the vectors, but
1720 the remainder of userspace will become appropriately inaccessible.
1721
1722config HW_PERF_EVENTS
1723 def_bool y
1724 depends on ARM_PMU
1710 1725
1711config SYS_SUPPORTS_HUGETLBFS 1726config SYS_SUPPORTS_HUGETLBFS
1712 def_bool y 1727 def_bool y
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 92e54d7c6f46..2b25b6038f66 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu)
65 return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); 65 return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
66} 66}
67 67
68static int mcpm_cpu_disable(unsigned int cpu) 68static bool mcpm_cpu_can_disable(unsigned int cpu)
69{ 69{
70 /* 70 /* We assume all CPUs may be shut down. */
71 * We assume all CPUs may be shut down. 71 return true;
72 * This would be the hook to use for eventual Secure
73 * OS migration requests as described in the PSCI spec.
74 */
75 return 0;
76} 72}
77 73
78static void mcpm_cpu_die(unsigned int cpu) 74static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
92 .smp_secondary_init = mcpm_secondary_init, 88 .smp_secondary_init = mcpm_secondary_init,
93#ifdef CONFIG_HOTPLUG_CPU 89#ifdef CONFIG_HOTPLUG_CPU
94 .cpu_kill = mcpm_cpu_kill, 90 .cpu_kill = mcpm_cpu_kill,
95 .cpu_disable = mcpm_cpu_disable, 91 .cpu_can_disable = mcpm_cpu_can_disable,
96 .cpu_die = mcpm_cpu_die, 92 .cpu_die = mcpm_cpu_die,
97#endif 93#endif
98}; 94};
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 30b3bc1666d2..be648eb47cd9 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += irq_regs.h
12generic-y += kdebug.h 12generic-y += kdebug.h
13generic-y += local.h 13generic-y += local.h
14generic-y += local64.h 14generic-y += local64.h
15generic-y += mcs_spinlock.h
16generic-y += mm-arch-hooks.h 15generic-y += mm-arch-hooks.h
17generic-y += msgbuf.h 16generic-y += msgbuf.h
18generic-y += param.h 17generic-y += param.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe57279c66..7bbf325a4f31 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -108,33 +108,37 @@
108 .endm 108 .endm
109#endif 109#endif
110 110
111 .macro asm_trace_hardirqs_off 111 .macro asm_trace_hardirqs_off, save=1
112#if defined(CONFIG_TRACE_IRQFLAGS) 112#if defined(CONFIG_TRACE_IRQFLAGS)
113 .if \save
113 stmdb sp!, {r0-r3, ip, lr} 114 stmdb sp!, {r0-r3, ip, lr}
115 .endif
114 bl trace_hardirqs_off 116 bl trace_hardirqs_off
117 .if \save
115 ldmia sp!, {r0-r3, ip, lr} 118 ldmia sp!, {r0-r3, ip, lr}
119 .endif
116#endif 120#endif
117 .endm 121 .endm
118 122
119 .macro asm_trace_hardirqs_on_cond, cond 123 .macro asm_trace_hardirqs_on, cond=al, save=1
120#if defined(CONFIG_TRACE_IRQFLAGS) 124#if defined(CONFIG_TRACE_IRQFLAGS)
121 /* 125 /*
122 * actually the registers should be pushed and pop'd conditionally, but 126 * actually the registers should be pushed and pop'd conditionally, but
123 * after bl the flags are certainly clobbered 127 * after bl the flags are certainly clobbered
124 */ 128 */
129 .if \save
125 stmdb sp!, {r0-r3, ip, lr} 130 stmdb sp!, {r0-r3, ip, lr}
131 .endif
126 bl\cond trace_hardirqs_on 132 bl\cond trace_hardirqs_on
133 .if \save
127 ldmia sp!, {r0-r3, ip, lr} 134 ldmia sp!, {r0-r3, ip, lr}
135 .endif
128#endif 136#endif
129 .endm 137 .endm
130 138
131 .macro asm_trace_hardirqs_on 139 .macro disable_irq, save=1
132 asm_trace_hardirqs_on_cond al
133 .endm
134
135 .macro disable_irq
136 disable_irq_notrace 140 disable_irq_notrace
137 asm_trace_hardirqs_off 141 asm_trace_hardirqs_off \save
138 .endm 142 .endm
139 143
140 .macro enable_irq 144 .macro enable_irq
@@ -173,7 +177,7 @@
173 177
174 .macro restore_irqs, oldcpsr 178 .macro restore_irqs, oldcpsr
175 tst \oldcpsr, #PSR_I_BIT 179 tst \oldcpsr, #PSR_I_BIT
176 asm_trace_hardirqs_on_cond eq 180 asm_trace_hardirqs_on cond=eq
177 restore_irqs_notrace \oldcpsr 181 restore_irqs_notrace \oldcpsr
178 .endm 182 .endm
179 183
@@ -445,6 +449,53 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
445#endif 449#endif
446 .endm 450 .endm
447 451
452 .macro uaccess_disable, tmp, isb=1
453#ifdef CONFIG_CPU_SW_DOMAIN_PAN
454 /*
455 * Whenever we re-enter userspace, the domains should always be
456 * set appropriately.
457 */
458 mov \tmp, #DACR_UACCESS_DISABLE
459 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
460 .if \isb
461 instr_sync
462 .endif
463#endif
464 .endm
465
466 .macro uaccess_enable, tmp, isb=1
467#ifdef CONFIG_CPU_SW_DOMAIN_PAN
468 /*
469 * Whenever we re-enter userspace, the domains should always be
470 * set appropriately.
471 */
472 mov \tmp, #DACR_UACCESS_ENABLE
473 mcr p15, 0, \tmp, c3, c0, 0
474 .if \isb
475 instr_sync
476 .endif
477#endif
478 .endm
479
480 .macro uaccess_save, tmp
481#ifdef CONFIG_CPU_SW_DOMAIN_PAN
482 mrc p15, 0, \tmp, c3, c0, 0
483 str \tmp, [sp, #S_FRAME_SIZE]
484#endif
485 .endm
486
487 .macro uaccess_restore
488#ifdef CONFIG_CPU_SW_DOMAIN_PAN
489 ldr r0, [sp, #S_FRAME_SIZE]
490 mcr p15, 0, r0, c3, c0, 0
491#endif
492 .endm
493
494 .macro uaccess_save_and_disable, tmp
495 uaccess_save \tmp
496 uaccess_disable \tmp
497 .endm
498
448 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 499 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
449 .macro ret\c, reg 500 .macro ret\c, reg
450#if __LINUX_ARM_ARCH__ < 6 501#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 70393574e0fa..3ff5642d9788 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,7 +2,6 @@
2#define __ASM_BARRIER_H 2#define __ASM_BARRIER_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <asm/outercache.h>
6 5
7#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 6#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
8 7
@@ -37,12 +36,20 @@
37#define dmb(x) __asm__ __volatile__ ("" : : : "memory") 36#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
38#endif 37#endif
39 38
39#ifdef CONFIG_ARM_HEAVY_MB
40extern void (*soc_mb)(void);
41extern void arm_heavy_mb(void);
42#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
43#else
44#define __arm_heavy_mb(x...) dsb(x)
45#endif
46
40#ifdef CONFIG_ARCH_HAS_BARRIERS 47#ifdef CONFIG_ARCH_HAS_BARRIERS
41#include <mach/barriers.h> 48#include <mach/barriers.h>
42#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 49#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
43#define mb() do { dsb(); outer_sync(); } while (0) 50#define mb() __arm_heavy_mb()
44#define rmb() dsb() 51#define rmb() dsb()
45#define wmb() do { dsb(st); outer_sync(); } while (0) 52#define wmb() __arm_heavy_mb(st)
46#define dma_rmb() dmb(osh) 53#define dma_rmb() dmb(osh)
47#define dma_wmb() dmb(oshst) 54#define dma_wmb() dmb(oshst)
48#else 55#else
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 56380995f4c3..e943e6cee254 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
35static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) 35static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 unsigned long mask = 1UL << (bit & 31); 38 unsigned long mask = BIT_MASK(bit);
39 39
40 p += bit >> 5; 40 p += BIT_WORD(bit);
41 41
42 raw_local_irq_save(flags); 42 raw_local_irq_save(flags);
43 *p |= mask; 43 *p |= mask;
@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
47static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) 47static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
48{ 48{
49 unsigned long flags; 49 unsigned long flags;
50 unsigned long mask = 1UL << (bit & 31); 50 unsigned long mask = BIT_MASK(bit);
51 51
52 p += bit >> 5; 52 p += BIT_WORD(bit);
53 53
54 raw_local_irq_save(flags); 54 raw_local_irq_save(flags);
55 *p &= ~mask; 55 *p &= ~mask;
@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
59static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) 59static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
60{ 60{
61 unsigned long flags; 61 unsigned long flags;
62 unsigned long mask = 1UL << (bit & 31); 62 unsigned long mask = BIT_MASK(bit);
63 63
64 p += bit >> 5; 64 p += BIT_WORD(bit);
65 65
66 raw_local_irq_save(flags); 66 raw_local_irq_save(flags);
67 *p ^= mask; 67 *p ^= mask;
@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 unsigned int res; 75 unsigned int res;
76 unsigned long mask = 1UL << (bit & 31); 76 unsigned long mask = BIT_MASK(bit);
77 77
78 p += bit >> 5; 78 p += BIT_WORD(bit);
79 79
80 raw_local_irq_save(flags); 80 raw_local_irq_save(flags);
81 res = *p; 81 res = *p;
@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
90{ 90{
91 unsigned long flags; 91 unsigned long flags;
92 unsigned int res; 92 unsigned int res;
93 unsigned long mask = 1UL << (bit & 31); 93 unsigned long mask = BIT_MASK(bit);
94 94
95 p += bit >> 5; 95 p += BIT_WORD(bit);
96 96
97 raw_local_irq_save(flags); 97 raw_local_irq_save(flags);
98 res = *p; 98 res = *p;
@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
107{ 107{
108 unsigned long flags; 108 unsigned long flags;
109 unsigned int res; 109 unsigned int res;
110 unsigned long mask = 1UL << (bit & 31); 110 unsigned long mask = BIT_MASK(bit);
111 111
112 p += bit >> 5; 112 p += BIT_WORD(bit);
113 113
114 raw_local_irq_save(flags); 114 raw_local_irq_save(flags);
115 res = *p; 115 res = *p;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda8fd17..d5525bfc7e3e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
140 * is visible to DMA, or data written by DMA to system memory is 140 * is visible to DMA, or data written by DMA to system memory is
141 * visible to the CPU. 141 * visible to the CPU.
142 */ 142 */
143#define dmac_map_area cpu_cache.dma_map_area
144#define dmac_unmap_area cpu_cache.dma_unmap_area
145#define dmac_flush_range cpu_cache.dma_flush_range 143#define dmac_flush_range cpu_cache.dma_flush_range
146 144
147#else 145#else
@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
161 * is visible to DMA, or data written by DMA to system memory is 159 * is visible to DMA, or data written by DMA to system memory is
162 * visible to the CPU. 160 * visible to the CPU.
163 */ 161 */
164extern void dmac_map_area(const void *, size_t, int);
165extern void dmac_unmap_area(const void *, size_t, int);
166extern void dmac_flush_range(const void *, const void *); 162extern void dmac_flush_range(const void *, const void *);
167 163
168#endif 164#endif
@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
506void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, 502void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
507 void *kaddr, unsigned long len); 503 void *kaddr, unsigned long len);
508 504
505/**
506 * secure_flush_area - ensure coherency across the secure boundary
507 * @addr: virtual address
508 * @size: size of region
509 *
510 * Ensure that the specified area of memory is coherent across the secure
511 * boundary from the non-secure side. This is used when calling secure
512 * firmware where the secure firmware does not ensure coherency.
513 */
514static inline void secure_flush_area(const void *addr, size_t size)
515{
516 phys_addr_t phys = __pa(addr);
517
518 __cpuc_flush_dcache_area((void *)addr, size);
519 outer_flush_range(phys, phys + size);
520}
521
509#endif 522#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d37ec7..a68b9d8a71fe 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,7 +14,7 @@
14#include <xen/xen.h> 14#include <xen/xen.h>
15#include <asm/xen/hypervisor.h> 15#include <asm/xen/hypervisor.h>
16 16
17#define DMA_ERROR_CODE (~0) 17#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
18extern struct dma_map_ops arm_dma_ops; 18extern struct dma_map_ops arm_dma_ops;
19extern struct dma_map_ops arm_coherent_dma_ops; 19extern struct dma_map_ops arm_coherent_dma_ops;
20 20
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..e878129f2fee 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,15 +34,14 @@
34 */ 34 */
35#ifndef CONFIG_IO_36 35#ifndef CONFIG_IO_36
36#define DOMAIN_KERNEL 0 36#define DOMAIN_KERNEL 0
37#define DOMAIN_TABLE 0
38#define DOMAIN_USER 1 37#define DOMAIN_USER 1
39#define DOMAIN_IO 2 38#define DOMAIN_IO 2
40#else 39#else
41#define DOMAIN_KERNEL 2 40#define DOMAIN_KERNEL 2
42#define DOMAIN_TABLE 2
43#define DOMAIN_USER 1 41#define DOMAIN_USER 1
44#define DOMAIN_IO 0 42#define DOMAIN_IO 0
45#endif 43#endif
44#define DOMAIN_VECTORS 3
46 45
47/* 46/*
48 * Domain types 47 * Domain types
@@ -55,11 +54,46 @@
55#define DOMAIN_MANAGER 1 54#define DOMAIN_MANAGER 1
56#endif 55#endif
57 56
58#define domain_val(dom,type) ((type) << (2*(dom))) 57#define domain_mask(dom) ((3) << (2 * (dom)))
58#define domain_val(dom,type) ((type) << (2 * (dom)))
59
60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61#define DACR_INIT \
62 (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
63 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
64 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
65 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
66#else
67#define DACR_INIT \
68 (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
69 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
70 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
71 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
72#endif
73
74#define __DACR_DEFAULT \
75 domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
76 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
77 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
78
79#define DACR_UACCESS_DISABLE \
80 (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
81#define DACR_UACCESS_ENABLE \
82 (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
59 83
60#ifndef __ASSEMBLY__ 84#ifndef __ASSEMBLY__
61 85
62#ifdef CONFIG_CPU_USE_DOMAINS 86static inline unsigned int get_domain(void)
87{
88 unsigned int domain;
89
90 asm(
91 "mrc p15, 0, %0, c3, c0 @ get domain"
92 : "=r" (domain));
93
94 return domain;
95}
96
63static inline void set_domain(unsigned val) 97static inline void set_domain(unsigned val)
64{ 98{
65 asm volatile( 99 asm volatile(
@@ -68,17 +102,16 @@ static inline void set_domain(unsigned val)
68 isb(); 102 isb();
69} 103}
70 104
105#ifdef CONFIG_CPU_USE_DOMAINS
71#define modify_domain(dom,type) \ 106#define modify_domain(dom,type) \
72 do { \ 107 do { \
73 struct thread_info *thread = current_thread_info(); \ 108 unsigned int domain = get_domain(); \
74 unsigned int domain = thread->cpu_domain; \ 109 domain &= ~domain_mask(dom); \
75 domain &= ~domain_val(dom, DOMAIN_MANAGER); \ 110 domain = domain | domain_val(dom, type); \
76 thread->cpu_domain = domain | domain_val(dom, type); \ 111 set_domain(domain); \
77 set_domain(thread->cpu_domain); \
78 } while (0) 112 } while (0)
79 113
80#else 114#else
81static inline void set_domain(unsigned val) { }
82static inline void modify_domain(unsigned dom, unsigned type) { } 115static inline void modify_domain(unsigned dom, unsigned type) { }
83#endif 116#endif
84 117
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae1df27..58cfe9f1a687 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
6#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) 6#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
7 7
8#include <asm/kmap_types.h> 8#include <asm/kmap_types.h>
9#include <asm/pgtable.h>
9 10
10enum fixed_addresses { 11enum fixed_addresses {
11 FIX_KMAP_BEGIN, 12 FIX_EARLYCON_MEM_BASE,
13 __end_of_permanent_fixed_addresses,
14
15 FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
12 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 16 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
13 17
14 /* Support writing RO kernel text via kprobes, jump labels, etc. */ 18 /* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@ enum fixed_addresses {
18 __end_of_fixed_addresses 22 __end_of_fixed_addresses
19}; 23};
20 24
25#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
26
27#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
28
29/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
30#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
31#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
32
21void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); 33void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
34void __init early_fixmap_init(void);
22 35
23#include <asm-generic/fixmap.h> 36#include <asm-generic/fixmap.h>
24 37
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed82809d82..6795368ad023 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
22#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
23 23
24#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 24#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
25({ \
26 unsigned int __ua_flags; \
25 smp_mb(); \ 27 smp_mb(); \
26 prefetchw(uaddr); \ 28 prefetchw(uaddr); \
29 __ua_flags = uaccess_save_and_enable(); \
27 __asm__ __volatile__( \ 30 __asm__ __volatile__( \
28 "1: ldrex %1, [%3]\n" \ 31 "1: ldrex %1, [%3]\n" \
29 " " insn "\n" \ 32 " " insn "\n" \
@@ -34,12 +37,15 @@
34 __futex_atomic_ex_table("%5") \ 37 __futex_atomic_ex_table("%5") \
35 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ 38 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
36 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 39 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
37 : "cc", "memory") 40 : "cc", "memory"); \
41 uaccess_restore(__ua_flags); \
42})
38 43
39static inline int 44static inline int
40futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 45futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
41 u32 oldval, u32 newval) 46 u32 oldval, u32 newval)
42{ 47{
48 unsigned int __ua_flags;
43 int ret; 49 int ret;
44 u32 val; 50 u32 val;
45 51
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
49 smp_mb(); 55 smp_mb();
50 /* Prefetching cannot fault */ 56 /* Prefetching cannot fault */
51 prefetchw(uaddr); 57 prefetchw(uaddr);
58 __ua_flags = uaccess_save_and_enable();
52 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 59 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
53 "1: ldrex %1, [%4]\n" 60 "1: ldrex %1, [%4]\n"
54 " teq %1, %2\n" 61 " teq %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
61 : "=&r" (ret), "=&r" (val) 68 : "=&r" (ret), "=&r" (val)
62 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) 69 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
63 : "cc", "memory"); 70 : "cc", "memory");
71 uaccess_restore(__ua_flags);
64 smp_mb(); 72 smp_mb();
65 73
66 *uval = val; 74 *uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
73#include <asm/domain.h> 81#include <asm/domain.h>
74 82
75#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 83#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
84({ \
85 unsigned int __ua_flags = uaccess_save_and_enable(); \
76 __asm__ __volatile__( \ 86 __asm__ __volatile__( \
77 "1: " TUSER(ldr) " %1, [%3]\n" \ 87 "1: " TUSER(ldr) " %1, [%3]\n" \
78 " " insn "\n" \ 88 " " insn "\n" \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
81 __futex_atomic_ex_table("%5") \ 91 __futex_atomic_ex_table("%5") \
82 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ 92 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
83 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 93 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
84 : "cc", "memory") 94 : "cc", "memory"); \
95 uaccess_restore(__ua_flags); \
96})
85 97
86static inline int 98static inline int
87futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 99futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
88 u32 oldval, u32 newval) 100 u32 oldval, u32 newval)
89{ 101{
102 unsigned int __ua_flags;
90 int ret = 0; 103 int ret = 0;
91 u32 val; 104 u32 val;
92 105
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
94 return -EFAULT; 107 return -EFAULT;
95 108
96 preempt_disable(); 109 preempt_disable();
110 __ua_flags = uaccess_save_and_enable();
97 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 111 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
98 "1: " TUSER(ldr) " %1, [%4]\n" 112 "1: " TUSER(ldr) " %1, [%4]\n"
99 " teq %1, %2\n" 113 " teq %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
103 : "+r" (ret), "=&r" (val) 117 : "+r" (ret), "=&r" (val)
104 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) 118 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
105 : "cc", "memory"); 119 : "cc", "memory");
120 uaccess_restore(__ua_flags);
106 121
107 *uval = val; 122 *uval = val;
108 preempt_enable(); 123 preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd5b7c8..cab07f69382d 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
158#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) 158#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
159#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) 159#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
160 160
161#define dmac_map_area __glue(_CACHE,_dma_map_area)
162#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
163#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 161#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
164#endif 162#endif
165 163
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 563b92fc2f41..c2bf24f40177 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
129 129
130#endif 130#endif
131 131
132#ifdef CONFIG_OUTER_CACHE_SYNC
133/**
134 * outer_sync - perform a sync point for outer cache
135 *
136 * Ensure that all outer cache operations are complete and any store
137 * buffers are drained.
138 */
139static inline void outer_sync(void)
140{
141 if (outer_cache.sync)
142 outer_cache.sync();
143}
144#else
145static inline void outer_sync(void)
146{ }
147#endif
148
149#endif /* __ASM_OUTERCACHE_H */ 132#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278e953e..d0131ee6f6af 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
23#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ 23#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
24#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) 24#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
25#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) 25#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
26#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
26#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ 27#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
27/* 28/*
28 * - section 29 * - section
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
deleted file mode 100644
index 3fc87dfd77e6..000000000000
--- a/arch/arm/include/asm/pmu.h
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
15#include <linux/interrupt.h>
16#include <linux/perf_event.h>
17
18#include <asm/cputype.h>
19
20/*
21 * struct arm_pmu_platdata - ARM PMU platform data
22 *
23 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
27 */
28struct arm_pmu_platdata {
29 irqreturn_t (*handle_irq)(int irq, void *dev,
30 irq_handler_t pmu_handler);
31};
32
33#ifdef CONFIG_HW_PERF_EVENTS
34
35/*
36 * The ARMv7 CPU PMU supports up to 32 event counters.
37 */
38#define ARMPMU_MAX_HWEVENTS 32
39
40#define HW_OP_UNSUPPORTED 0xFFFF
41#define C(_x) PERF_COUNT_HW_CACHE_##_x
42#define CACHE_OP_UNSUPPORTED 0xFFFF
43
44#define PERF_MAP_ALL_UNSUPPORTED \
45 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
46
47#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
48[0 ... C(MAX) - 1] = { \
49 [0 ... C(OP_MAX) - 1] = { \
50 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
51 }, \
52}
53
54/* The events for a given PMU register set. */
55struct pmu_hw_events {
56 /*
57 * The events that are active on the PMU for the given index.
58 */
59 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
60
61 /*
62 * A 1 bit for an index indicates that the counter is being used for
63 * an event. A 0 means that the counter can be used.
64 */
65 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
66
67 /*
68 * Hardware lock to serialize accesses to PMU registers. Needed for the
69 * read/modify/write sequences.
70 */
71 raw_spinlock_t pmu_lock;
72
73 /*
74 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
75 * already have to allocate this struct per cpu.
76 */
77 struct arm_pmu *percpu_pmu;
78};
79
80struct arm_pmu {
81 struct pmu pmu;
82 cpumask_t active_irqs;
83 cpumask_t supported_cpus;
84 int *irq_affinity;
85 char *name;
86 irqreturn_t (*handle_irq)(int irq_num, void *dev);
87 void (*enable)(struct perf_event *event);
88 void (*disable)(struct perf_event *event);
89 int (*get_event_idx)(struct pmu_hw_events *hw_events,
90 struct perf_event *event);
91 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
92 struct perf_event *event);
93 int (*set_event_filter)(struct hw_perf_event *evt,
94 struct perf_event_attr *attr);
95 u32 (*read_counter)(struct perf_event *event);
96 void (*write_counter)(struct perf_event *event, u32 val);
97 void (*start)(struct arm_pmu *);
98 void (*stop)(struct arm_pmu *);
99 void (*reset)(void *);
100 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
101 void (*free_irq)(struct arm_pmu *);
102 int (*map_event)(struct perf_event *event);
103 int num_events;
104 atomic_t active_events;
105 struct mutex reserve_mutex;
106 u64 max_period;
107 struct platform_device *plat_device;
108 struct pmu_hw_events __percpu *hw_events;
109 struct notifier_block hotplug_nb;
110};
111
112#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
113
114int armpmu_register(struct arm_pmu *armpmu, int type);
115
116u64 armpmu_event_update(struct perf_event *event);
117
118int armpmu_event_set_period(struct perf_event *event);
119
120int armpmu_map_event(struct perf_event *event,
121 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
122 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
123 [PERF_COUNT_HW_CACHE_OP_MAX]
124 [PERF_COUNT_HW_CACHE_RESULT_MAX],
125 u32 raw_event_mask);
126
127struct pmu_probe_info {
128 unsigned int cpuid;
129 unsigned int mask;
130 int (*init)(struct arm_pmu *);
131};
132
133#define PMU_PROBE(_cpuid, _mask, _fn) \
134{ \
135 .cpuid = (_cpuid), \
136 .mask = (_mask), \
137 .init = (_fn), \
138}
139
140#define ARM_PMU_PROBE(_cpuid, _fn) \
141 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
142
143#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
144
145#define XSCALE_PMU_PROBE(_version, _fn) \
146 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
147
148int arm_pmu_device_probe(struct platform_device *pdev,
149 const struct of_device_id *of_table,
150 const struct pmu_probe_info *probe_table);
151
152#endif /* CONFIG_HW_PERF_EVENTS */
153
154#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index c25ef3ec6d1f..68ee3ce17b82 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -14,34 +14,11 @@
14#ifndef __ASM_ARM_PSCI_H 14#ifndef __ASM_ARM_PSCI_H
15#define __ASM_ARM_PSCI_H 15#define __ASM_ARM_PSCI_H
16 16
17#define PSCI_POWER_STATE_TYPE_STANDBY 0
18#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
19
20struct psci_power_state {
21 u16 id;
22 u8 type;
23 u8 affinity_level;
24};
25
26struct psci_operations {
27 int (*cpu_suspend)(struct psci_power_state state,
28 unsigned long entry_point);
29 int (*cpu_off)(struct psci_power_state state);
30 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
31 int (*migrate)(unsigned long cpuid);
32 int (*affinity_info)(unsigned long target_affinity,
33 unsigned long lowest_affinity_level);
34 int (*migrate_info_type)(void);
35};
36
37extern struct psci_operations psci_ops;
38extern struct smp_operations psci_smp_ops; 17extern struct smp_operations psci_smp_ops;
39 18
40#ifdef CONFIG_ARM_PSCI 19#ifdef CONFIG_ARM_PSCI
41int psci_init(void);
42bool psci_smp_available(void); 20bool psci_smp_available(void);
43#else 21#else
44static inline int psci_init(void) { return 0; }
45static inline bool psci_smp_available(void) { return false; } 22static inline bool psci_smp_available(void) { return false; }
46#endif 23#endif
47 24
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1ba6fb4..ef356659b4f4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
74extern int __cpu_disable(void); 74extern int __cpu_disable(void);
75 75
76extern void __cpu_die(unsigned int cpu); 76extern void __cpu_die(unsigned int cpu);
77extern void cpu_die(void);
78 77
79extern void arch_send_call_function_single_ipi(int cpu); 78extern void arch_send_call_function_single_ipi(int cpu);
80extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 79extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@ struct smp_operations {
105#ifdef CONFIG_HOTPLUG_CPU 104#ifdef CONFIG_HOTPLUG_CPU
106 int (*cpu_kill)(unsigned int cpu); 105 int (*cpu_kill)(unsigned int cpu);
107 void (*cpu_die)(unsigned int cpu); 106 void (*cpu_die)(unsigned int cpu);
107 bool (*cpu_can_disable)(unsigned int cpu);
108 int (*cpu_disable)(unsigned int cpu); 108 int (*cpu_disable)(unsigned int cpu);
109#endif 109#endif
110#endif 110#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e5224d8f7..f9080717fc88 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
107extern int platform_can_secondary_boot(void); 107extern int platform_can_secondary_boot(void);
108extern int platform_can_cpu_hotplug(void); 108extern int platform_can_cpu_hotplug(void);
109 109
110#ifdef CONFIG_HOTPLUG_CPU
111extern int platform_can_hotplug_cpu(unsigned int cpu);
112#else
113static inline int platform_can_hotplug_cpu(unsigned int cpu)
114{
115 return 0;
116}
117#endif
118
110#endif 119#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32eded3e50..d0a1119dcaf3 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@ struct thread_info {
74 .flags = 0, \ 74 .flags = 0, \
75 .preempt_count = INIT_PREEMPT_COUNT, \ 75 .preempt_count = INIT_PREEMPT_COUNT, \
76 .addr_limit = KERNEL_DS, \ 76 .addr_limit = KERNEL_DS, \
77 .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
78 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
79 domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
80} 77}
81 78
82#define init_thread_info (init_thread_union.thread_info) 79#define init_thread_info (init_thread_union.thread_info)
@@ -136,22 +133,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
136 133
137/* 134/*
138 * thread information flags: 135 * thread information flags:
139 * TIF_SYSCALL_TRACE - syscall trace active
140 * TIF_SYSCAL_AUDIT - syscall auditing active
141 * TIF_SIGPENDING - signal pending
142 * TIF_NEED_RESCHED - rescheduling necessary
143 * TIF_NOTIFY_RESUME - callback before returning to user
144 * TIF_USEDFPU - FPU was used by this task this quantum (SMP) 136 * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
145 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED 137 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
146 */ 138 */
147#define TIF_SIGPENDING 0 139#define TIF_SIGPENDING 0 /* signal pending */
148#define TIF_NEED_RESCHED 1 140#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
149#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ 141#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
150#define TIF_UPROBE 7 142#define TIF_UPROBE 3 /* breakpointed or singlestepping */
151#define TIF_SYSCALL_TRACE 8 143#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
152#define TIF_SYSCALL_AUDIT 9 144#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
153#define TIF_SYSCALL_TRACEPOINT 10 145#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ 146#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
147
155#define TIF_NOHZ 12 /* in adaptive nohz mode */ 148#define TIF_NOHZ 12 /* in adaptive nohz mode */
156#define TIF_USING_IWMMXT 17 149#define TIF_USING_IWMMXT 17
157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 150#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d09ef7a..8cc85a4ebec2 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@ struct exception_table_entry
50extern int fixup_exception(struct pt_regs *regs); 50extern int fixup_exception(struct pt_regs *regs);
51 51
52/* 52/*
53 * These two functions allow hooking accesses to userspace to increase
54 * system integrity by ensuring that the kernel can not inadvertantly
55 * perform such accesses (eg, via list poison values) which could then
56 * be exploited for priviledge escalation.
57 */
58static inline unsigned int uaccess_save_and_enable(void)
59{
60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61 unsigned int old_domain = get_domain();
62
63 /* Set the current domain access to permit user accesses */
64 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
65 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
66
67 return old_domain;
68#else
69 return 0;
70#endif
71}
72
73static inline void uaccess_restore(unsigned int flags)
74{
75#ifdef CONFIG_CPU_SW_DOMAIN_PAN
76 /* Restore the user access mask */
77 set_domain(flags);
78#endif
79}
80
81/*
53 * These two are intentionally not defined anywhere - if the kernel 82 * These two are intentionally not defined anywhere - if the kernel
54 * code generates any references to them, that's a bug. 83 * code generates any references to them, that's a bug.
55 */ 84 */
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
165 register typeof(x) __r2 asm("r2"); \ 194 register typeof(x) __r2 asm("r2"); \
166 register unsigned long __l asm("r1") = __limit; \ 195 register unsigned long __l asm("r1") = __limit; \
167 register int __e asm("r0"); \ 196 register int __e asm("r0"); \
197 unsigned int __ua_flags = uaccess_save_and_enable(); \
168 switch (sizeof(*(__p))) { \ 198 switch (sizeof(*(__p))) { \
169 case 1: \ 199 case 1: \
170 if (sizeof((x)) >= 8) \ 200 if (sizeof((x)) >= 8) \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
192 break; \ 222 break; \
193 default: __e = __get_user_bad(); break; \ 223 default: __e = __get_user_bad(); break; \
194 } \ 224 } \
225 uaccess_restore(__ua_flags); \
195 x = (typeof(*(p))) __r2; \ 226 x = (typeof(*(p))) __r2; \
196 __e; \ 227 __e; \
197 }) 228 })
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
224 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ 255 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
225 register unsigned long __l asm("r1") = __limit; \ 256 register unsigned long __l asm("r1") = __limit; \
226 register int __e asm("r0"); \ 257 register int __e asm("r0"); \
258 unsigned int __ua_flags = uaccess_save_and_enable(); \
227 switch (sizeof(*(__p))) { \ 259 switch (sizeof(*(__p))) { \
228 case 1: \ 260 case 1: \
229 __put_user_x(__r2, __p, __e, __l, 1); \ 261 __put_user_x(__r2, __p, __e, __l, 1); \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
239 break; \ 271 break; \
240 default: __e = __put_user_bad(); break; \ 272 default: __e = __put_user_bad(); break; \
241 } \ 273 } \
274 uaccess_restore(__ua_flags); \
242 __e; \ 275 __e; \
243 }) 276 })
244 277
@@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs)
300do { \ 333do { \
301 unsigned long __gu_addr = (unsigned long)(ptr); \ 334 unsigned long __gu_addr = (unsigned long)(ptr); \
302 unsigned long __gu_val; \ 335 unsigned long __gu_val; \
336 unsigned int __ua_flags; \
303 __chk_user_ptr(ptr); \ 337 __chk_user_ptr(ptr); \
304 might_fault(); \ 338 might_fault(); \
339 __ua_flags = uaccess_save_and_enable(); \
305 switch (sizeof(*(ptr))) { \ 340 switch (sizeof(*(ptr))) { \
306 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ 341 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
307 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ 342 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
308 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ 343 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
309 default: (__gu_val) = __get_user_bad(); \ 344 default: (__gu_val) = __get_user_bad(); \
310 } \ 345 } \
346 uaccess_restore(__ua_flags); \
311 (x) = (__typeof__(*(ptr)))__gu_val; \ 347 (x) = (__typeof__(*(ptr)))__gu_val; \
312} while (0) 348} while (0)
313 349
314#define __get_user_asm_byte(x, addr, err) \ 350#define __get_user_asm(x, addr, err, instr) \
315 __asm__ __volatile__( \ 351 __asm__ __volatile__( \
316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \ 352 "1: " TUSER(instr) " %1, [%2], #0\n" \
317 "2:\n" \ 353 "2:\n" \
318 " .pushsection .text.fixup,\"ax\"\n" \ 354 " .pushsection .text.fixup,\"ax\"\n" \
319 " .align 2\n" \ 355 " .align 2\n" \
@@ -329,6 +365,9 @@ do { \
329 : "r" (addr), "i" (-EFAULT) \ 365 : "r" (addr), "i" (-EFAULT) \
330 : "cc") 366 : "cc")
331 367
368#define __get_user_asm_byte(x, addr, err) \
369 __get_user_asm(x, addr, err, ldrb)
370
332#ifndef __ARMEB__ 371#ifndef __ARMEB__
333#define __get_user_asm_half(x, __gu_addr, err) \ 372#define __get_user_asm_half(x, __gu_addr, err) \
334({ \ 373({ \
@@ -348,22 +387,7 @@ do { \
348#endif 387#endif
349 388
350#define __get_user_asm_word(x, addr, err) \ 389#define __get_user_asm_word(x, addr, err) \
351 __asm__ __volatile__( \ 390 __get_user_asm(x, addr, err, ldr)
352 "1: " TUSER(ldr) " %1,[%2],#0\n" \
353 "2:\n" \
354 " .pushsection .text.fixup,\"ax\"\n" \
355 " .align 2\n" \
356 "3: mov %0, %3\n" \
357 " mov %1, #0\n" \
358 " b 2b\n" \
359 " .popsection\n" \
360 " .pushsection __ex_table,\"a\"\n" \
361 " .align 3\n" \
362 " .long 1b, 3b\n" \
363 " .popsection" \
364 : "+r" (err), "=&r" (x) \
365 : "r" (addr), "i" (-EFAULT) \
366 : "cc")
367 391
368#define __put_user(x, ptr) \ 392#define __put_user(x, ptr) \
369({ \ 393({ \
@@ -381,9 +405,11 @@ do { \
381#define __put_user_err(x, ptr, err) \ 405#define __put_user_err(x, ptr, err) \
382do { \ 406do { \
383 unsigned long __pu_addr = (unsigned long)(ptr); \ 407 unsigned long __pu_addr = (unsigned long)(ptr); \
408 unsigned int __ua_flags; \
384 __typeof__(*(ptr)) __pu_val = (x); \ 409 __typeof__(*(ptr)) __pu_val = (x); \
385 __chk_user_ptr(ptr); \ 410 __chk_user_ptr(ptr); \
386 might_fault(); \ 411 might_fault(); \
412 __ua_flags = uaccess_save_and_enable(); \
387 switch (sizeof(*(ptr))) { \ 413 switch (sizeof(*(ptr))) { \
388 case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ 414 case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
389 case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ 415 case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
@@ -391,11 +417,12 @@ do { \
391 case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ 417 case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
392 default: __put_user_bad(); \ 418 default: __put_user_bad(); \
393 } \ 419 } \
420 uaccess_restore(__ua_flags); \
394} while (0) 421} while (0)
395 422
396#define __put_user_asm_byte(x, __pu_addr, err) \ 423#define __put_user_asm(x, __pu_addr, err, instr) \
397 __asm__ __volatile__( \ 424 __asm__ __volatile__( \
398 "1: " TUSER(strb) " %1,[%2],#0\n" \ 425 "1: " TUSER(instr) " %1, [%2], #0\n" \
399 "2:\n" \ 426 "2:\n" \
400 " .pushsection .text.fixup,\"ax\"\n" \ 427 " .pushsection .text.fixup,\"ax\"\n" \
401 " .align 2\n" \ 428 " .align 2\n" \
@@ -410,6 +437,9 @@ do { \
410 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ 437 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
411 : "cc") 438 : "cc")
412 439
440#define __put_user_asm_byte(x, __pu_addr, err) \
441 __put_user_asm(x, __pu_addr, err, strb)
442
413#ifndef __ARMEB__ 443#ifndef __ARMEB__
414#define __put_user_asm_half(x, __pu_addr, err) \ 444#define __put_user_asm_half(x, __pu_addr, err) \
415({ \ 445({ \
@@ -427,21 +457,7 @@ do { \
427#endif 457#endif
428 458
429#define __put_user_asm_word(x, __pu_addr, err) \ 459#define __put_user_asm_word(x, __pu_addr, err) \
430 __asm__ __volatile__( \ 460 __put_user_asm(x, __pu_addr, err, str)
431 "1: " TUSER(str) " %1,[%2],#0\n" \
432 "2:\n" \
433 " .pushsection .text.fixup,\"ax\"\n" \
434 " .align 2\n" \
435 "3: mov %0, %3\n" \
436 " b 2b\n" \
437 " .popsection\n" \
438 " .pushsection __ex_table,\"a\"\n" \
439 " .align 3\n" \
440 " .long 1b, 3b\n" \
441 " .popsection" \
442 : "+r" (err) \
443 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
444 : "cc")
445 461
446#ifndef __ARMEB__ 462#ifndef __ARMEB__
447#define __reg_oper0 "%R2" 463#define __reg_oper0 "%R2"
@@ -474,11 +490,46 @@ do { \
474 490
475 491
476#ifdef CONFIG_MMU 492#ifdef CONFIG_MMU
477extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); 493extern unsigned long __must_check
478extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); 494arm_copy_from_user(void *to, const void __user *from, unsigned long n);
479extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); 495
480extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 496static inline unsigned long __must_check
481extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); 497__copy_from_user(void *to, const void __user *from, unsigned long n)
498{
499 unsigned int __ua_flags = uaccess_save_and_enable();
500 n = arm_copy_from_user(to, from, n);
501 uaccess_restore(__ua_flags);
502 return n;
503}
504
505extern unsigned long __must_check
506arm_copy_to_user(void __user *to, const void *from, unsigned long n);
507extern unsigned long __must_check
508__copy_to_user_std(void __user *to, const void *from, unsigned long n);
509
510static inline unsigned long __must_check
511__copy_to_user(void __user *to, const void *from, unsigned long n)
512{
513 unsigned int __ua_flags = uaccess_save_and_enable();
514 n = arm_copy_to_user(to, from, n);
515 uaccess_restore(__ua_flags);
516 return n;
517}
518
519extern unsigned long __must_check
520arm_clear_user(void __user *addr, unsigned long n);
521extern unsigned long __must_check
522__clear_user_std(void __user *addr, unsigned long n);
523
524static inline unsigned long __must_check
525__clear_user(void __user *addr, unsigned long n)
526{
527 unsigned int __ua_flags = uaccess_save_and_enable();
528 n = arm_clear_user(addr, n);
529 uaccess_restore(__ua_flags);
530 return n;
531}
532
482#else 533#else
483#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) 534#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
484#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) 535#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
511 return n; 562 return n;
512} 563}
513 564
565/* These are from lib/ code, and use __get_user() and friends */
514extern long strncpy_from_user(char *dest, const char __user *src, long count); 566extern long strncpy_from_user(char *dest, const char __user *src, long count);
515 567
516extern __must_check long strlen_user(const char __user *str); 568extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e69f7a19735d..af9e59bf3831 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -71,8 +71,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
71obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o 71obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
72obj-$(CONFIG_IWMMXT) += iwmmxt.o 72obj-$(CONFIG_IWMMXT) += iwmmxt.o
73obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 73obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
74obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o \ 74obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
75 perf_event_xscale.o perf_event_v6.o \
76 perf_event_v7.o 75 perf_event_v7.o
77CFLAGS_pj4-cp0.o := -marm 76CFLAGS_pj4-cp0.o := -marm
78AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 77AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
@@ -89,7 +88,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
89 88
90obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 89obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
91ifeq ($(CONFIG_ARM_PSCI),y) 90ifeq ($(CONFIG_ARM_PSCI),y)
92obj-y += psci.o psci-call.o 91obj-y += psci-call.o
93obj-$(CONFIG_SMP) += psci_smp.o 92obj-$(CONFIG_SMP) += psci_smp.o
94endif 93endif
95 94
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5e5a51a99e68..f89811fb9a55 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
97#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
98EXPORT_SYMBOL(copy_page); 98EXPORT_SYMBOL(copy_page);
99 99
100EXPORT_SYMBOL(__copy_from_user); 100EXPORT_SYMBOL(arm_copy_from_user);
101EXPORT_SYMBOL(__copy_to_user); 101EXPORT_SYMBOL(arm_copy_to_user);
102EXPORT_SYMBOL(__clear_user); 102EXPORT_SYMBOL(arm_clear_user);
103 103
104EXPORT_SYMBOL(__get_user_1); 104EXPORT_SYMBOL(__get_user_1);
105EXPORT_SYMBOL(__get_user_2); 105EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cb4fb1e69778..3e1c26eb32b4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
149#define SPFIX(code...) 149#define SPFIX(code...)
150#endif 150#endif
151 151
152 .macro svc_entry, stack_hole=0, trace=1 152 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
153 UNWIND(.fnstart ) 153 UNWIND(.fnstart )
154 UNWIND(.save {r0 - pc} ) 154 UNWIND(.save {r0 - pc} )
155 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 155 sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
156#ifdef CONFIG_THUMB2_KERNEL 156#ifdef CONFIG_THUMB2_KERNEL
157 SPFIX( str r0, [sp] ) @ temporarily saved 157 SPFIX( str r0, [sp] ) @ temporarily saved
158 SPFIX( mov r0, sp ) 158 SPFIX( mov r0, sp )
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
167 ldmia r0, {r3 - r5} 167 ldmia r0, {r3 - r5}
168 add r7, sp, #S_SP - 4 @ here for interlock avoidance 168 add r7, sp, #S_SP - 4 @ here for interlock avoidance
169 mov r6, #-1 @ "" "" "" "" 169 mov r6, #-1 @ "" "" "" ""
170 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) 170 add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
171 SPFIX( addeq r2, r2, #4 ) 171 SPFIX( addeq r2, r2, #4 )
172 str r3, [sp, #-4]! @ save the "real" r0 copied 172 str r3, [sp, #-4]! @ save the "real" r0 copied
173 @ from the exception stack 173 @ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
185 @ 185 @
186 stmia r7, {r2 - r6} 186 stmia r7, {r2 - r6}
187 187
188 uaccess_save r0
189 .if \uaccess
190 uaccess_disable r0
191 .endif
192
188 .if \trace 193 .if \trace
189#ifdef CONFIG_TRACE_IRQFLAGS 194#ifdef CONFIG_TRACE_IRQFLAGS
190 bl trace_hardirqs_off 195 bl trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
194 199
195 .align 5 200 .align 5
196__dabt_svc: 201__dabt_svc:
197 svc_entry 202 svc_entry uaccess=0
198 mov r2, sp 203 mov r2, sp
199 dabt_helper 204 dabt_helper
200 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR 205 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
368#error "sizeof(struct pt_regs) must be a multiple of 8" 373#error "sizeof(struct pt_regs) must be a multiple of 8"
369#endif 374#endif
370 375
371 .macro usr_entry, trace=1 376 .macro usr_entry, trace=1, uaccess=1
372 UNWIND(.fnstart ) 377 UNWIND(.fnstart )
373 UNWIND(.cantunwind ) @ don't unwind the user space 378 UNWIND(.cantunwind ) @ don't unwind the user space
374 sub sp, sp, #S_FRAME_SIZE 379 sub sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
400 ARM( stmdb r0, {sp, lr}^ ) 405 ARM( stmdb r0, {sp, lr}^ )
401 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 406 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
402 407
408 .if \uaccess
409 uaccess_disable ip
410 .endif
411
403 @ Enable the alignment trap while in kernel mode 412 @ Enable the alignment trap while in kernel mode
404 ATRAP( teq r8, r7) 413 ATRAP( teq r8, r7)
405 ATRAP( mcrne p15, 0, r8, c1, c0, 0) 414 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
435 444
436 .align 5 445 .align 5
437__dabt_usr: 446__dabt_usr:
438 usr_entry 447 usr_entry uaccess=0
439 kuser_cmpxchg_check 448 kuser_cmpxchg_check
440 mov r2, sp 449 mov r2, sp
441 dabt_helper 450 dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
458 467
459 .align 5 468 .align 5
460__und_usr: 469__und_usr:
461 usr_entry 470 usr_entry uaccess=0
462 471
463 mov r2, r4 472 mov r2, r4
464 mov r3, r5 473 mov r3, r5
@@ -484,6 +493,8 @@ __und_usr:
4841: ldrt r0, [r4] 4931: ldrt r0, [r4]
485 ARM_BE8(rev r0, r0) @ little endian instruction 494 ARM_BE8(rev r0, r0) @ little endian instruction
486 495
496 uaccess_disable ip
497
487 @ r0 = 32-bit ARM instruction which caused the exception 498 @ r0 = 32-bit ARM instruction which caused the exception
488 @ r2 = PC value for the following instruction (:= regs->ARM_pc) 499 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
489 @ r4 = PC value for the faulting instruction 500 @ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
5182: ldrht r5, [r4] 5292: ldrht r5, [r4]
519ARM_BE8(rev16 r5, r5) @ little endian instruction 530ARM_BE8(rev16 r5, r5) @ little endian instruction
520 cmp r5, #0xe800 @ 32bit instruction if xx != 0 531 cmp r5, #0xe800 @ 32bit instruction if xx != 0
521 blo __und_usr_fault_16 @ 16bit undefined instruction 532 blo __und_usr_fault_16_pan @ 16bit undefined instruction
5223: ldrht r0, [r2] 5333: ldrht r0, [r2]
523ARM_BE8(rev16 r0, r0) @ little endian instruction 534ARM_BE8(rev16 r0, r0) @ little endian instruction
535 uaccess_disable ip
524 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 536 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
525 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update 537 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
526 orr r0, r0, r5, lsl #16 538 orr r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
715__und_usr_fault_32: 727__und_usr_fault_32:
716 mov r1, #4 728 mov r1, #4
717 b 1f 729 b 1f
730__und_usr_fault_16_pan:
731 uaccess_disable ip
718__und_usr_fault_16: 732__und_usr_fault_16:
719 mov r1, #2 733 mov r1, #2
7201: mov r0, sp 7341: mov r0, sp
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
770 ldr r4, [r2, #TI_TP_VALUE] 784 ldr r4, [r2, #TI_TP_VALUE]
771 ldr r5, [r2, #TI_TP_VALUE + 4] 785 ldr r5, [r2, #TI_TP_VALUE + 4]
772#ifdef CONFIG_CPU_USE_DOMAINS 786#ifdef CONFIG_CPU_USE_DOMAINS
787 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
788 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
773 ldr r6, [r2, #TI_CPU_DOMAIN] 789 ldr r6, [r2, #TI_CPU_DOMAIN]
774#endif 790#endif
775 switch_tls r1, r4, r5, r3, r7 791 switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b48dd4f37f80..30a7228eaceb 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -24,35 +24,55 @@
24 24
25 25
26 .align 5 26 .align 5
27#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
27/* 28/*
28 * This is the fast syscall return path. We do as little as 29 * This is the fast syscall return path. We do as little as possible here,
29 * possible here, and this includes saving r0 back into the SVC 30 * such as avoiding writing r0 to the stack. We only use this path if we
30 * stack. 31 * have tracing and context tracking disabled - the overheads from those
32 * features make this path too inefficient.
31 */ 33 */
32ret_fast_syscall: 34ret_fast_syscall:
33 UNWIND(.fnstart ) 35 UNWIND(.fnstart )
34 UNWIND(.cantunwind ) 36 UNWIND(.cantunwind )
35 disable_irq @ disable interrupts 37 disable_irq_notrace @ disable interrupts
36 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 38 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
37 tst r1, #_TIF_SYSCALL_WORK 39 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
38 bne __sys_trace_return
39 tst r1, #_TIF_WORK_MASK
40 bne fast_work_pending 40 bne fast_work_pending
41 asm_trace_hardirqs_on
42 41
43 /* perform architecture specific actions before user return */ 42 /* perform architecture specific actions before user return */
44 arch_ret_to_user r1, lr 43 arch_ret_to_user r1, lr
45 ct_user_enter
46 44
47 restore_user_regs fast = 1, offset = S_OFF 45 restore_user_regs fast = 1, offset = S_OFF
48 UNWIND(.fnend ) 46 UNWIND(.fnend )
47ENDPROC(ret_fast_syscall)
49 48
50/* 49 /* Ok, we need to do extra processing, enter the slow path. */
51 * Ok, we need to do extra processing, enter the slow path.
52 */
53fast_work_pending: 50fast_work_pending:
54 str r0, [sp, #S_R0+S_OFF]! @ returned r0 51 str r0, [sp, #S_R0+S_OFF]! @ returned r0
55work_pending: 52 /* fall through to work_pending */
53#else
54/*
55 * The "replacement" ret_fast_syscall for when tracing or context tracking
56 * is enabled. As we will need to call out to some C functions, we save
57 * r0 first to avoid needing to save registers around each C function call.
58 */
59ret_fast_syscall:
60 UNWIND(.fnstart )
61 UNWIND(.cantunwind )
62 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
63 disable_irq_notrace @ disable interrupts
64 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
65 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
66 beq no_work_pending
67 UNWIND(.fnend )
68ENDPROC(ret_fast_syscall)
69
70 /* Slower path - fall through to work_pending */
71#endif
72
73 tst r1, #_TIF_SYSCALL_WORK
74 bne __sys_trace_return_nosave
75slow_work_pending:
56 mov r0, sp @ 'regs' 76 mov r0, sp @ 'regs'
57 mov r2, why @ 'syscall' 77 mov r2, why @ 'syscall'
58 bl do_work_pending 78 bl do_work_pending
@@ -65,16 +85,19 @@ ENDPROC(ret_fast_syscall)
65 85
66/* 86/*
67 * "slow" syscall return path. "why" tells us if this was a real syscall. 87 * "slow" syscall return path. "why" tells us if this was a real syscall.
88 * IRQs may be enabled here, so always disable them. Note that we use the
89 * "notrace" version to avoid calling into the tracing code unnecessarily.
90 * do_work_pending() will update this state if necessary.
68 */ 91 */
69ENTRY(ret_to_user) 92ENTRY(ret_to_user)
70ret_slow_syscall: 93ret_slow_syscall:
71 disable_irq @ disable interrupts 94 disable_irq_notrace @ disable interrupts
72ENTRY(ret_to_user_from_irq) 95ENTRY(ret_to_user_from_irq)
73 ldr r1, [tsk, #TI_FLAGS] 96 ldr r1, [tsk, #TI_FLAGS]
74 tst r1, #_TIF_WORK_MASK 97 tst r1, #_TIF_WORK_MASK
75 bne work_pending 98 bne slow_work_pending
76no_work_pending: 99no_work_pending:
77 asm_trace_hardirqs_on 100 asm_trace_hardirqs_on save = 0
78 101
79 /* perform architecture specific actions before user return */ 102 /* perform architecture specific actions before user return */
80 arch_ret_to_user r1, lr 103 arch_ret_to_user r1, lr
@@ -174,6 +197,8 @@ ENTRY(vector_swi)
174 USER( ldr scno, [lr, #-4] ) @ get SWI instruction 197 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
175#endif 198#endif
176 199
200 uaccess_disable tbl
201
177 adr tbl, sys_call_table @ load syscall table pointer 202 adr tbl, sys_call_table @ load syscall table pointer
178 203
179#if defined(CONFIG_OABI_COMPAT) 204#if defined(CONFIG_OABI_COMPAT)
@@ -252,6 +277,12 @@ __sys_trace_return:
252 bl syscall_trace_exit 277 bl syscall_trace_exit
253 b ret_slow_syscall 278 b ret_slow_syscall
254 279
280__sys_trace_return_nosave:
281 enable_irq_notrace
282 mov r0, sp
283 bl syscall_trace_exit
284 b ret_slow_syscall
285
255 .align 5 286 .align 5
256#ifdef CONFIG_ALIGNMENT_TRAP 287#ifdef CONFIG_ALIGNMENT_TRAP
257 .type __cr_alignment, #object 288 .type __cr_alignment, #object
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045abead7..0d22ad206d52 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
196 msr cpsr_c, \rtemp @ switch back to the SVC mode 196 msr cpsr_c, \rtemp @ switch back to the SVC mode
197 .endm 197 .endm
198 198
199#ifndef CONFIG_THUMB2_KERNEL 199
200 .macro svc_exit, rpsr, irq = 0 200 .macro svc_exit, rpsr, irq = 0
201 .if \irq != 0 201 .if \irq != 0
202 @ IRQs already off 202 @ IRQs already off
@@ -215,6 +215,10 @@
215 blne trace_hardirqs_off 215 blne trace_hardirqs_off
216#endif 216#endif
217 .endif 217 .endif
218 uaccess_restore
219
220#ifndef CONFIG_THUMB2_KERNEL
221 @ ARM mode SVC restore
218 msr spsr_cxsf, \rpsr 222 msr spsr_cxsf, \rpsr
219#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 223#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
220 @ We must avoid clrex due to Cortex-A15 erratum #830321 224 @ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
222 strex r1, r2, [r0] @ clear the exclusive monitor 226 strex r1, r2, [r0] @ clear the exclusive monitor
223#endif 227#endif
224 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 228 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
229#else
230 @ Thumb mode SVC restore
231 ldr lr, [sp, #S_SP] @ top of the stack
232 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
233
234 @ We must avoid clrex due to Cortex-A15 erratum #830321
235 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
236
237 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
238 ldmia sp, {r0 - r12}
239 mov sp, lr
240 ldr lr, [sp], #4
241 rfeia sp!
242#endif
225 .endm 243 .endm
226 244
227 @ 245 @
@@ -241,6 +259,9 @@
241 @ on the stack remains correct). 259 @ on the stack remains correct).
242 @ 260 @
243 .macro svc_exit_via_fiq 261 .macro svc_exit_via_fiq
262 uaccess_restore
263#ifndef CONFIG_THUMB2_KERNEL
264 @ ARM mode restore
244 mov r0, sp 265 mov r0, sp
245 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will 266 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
246 @ clobber state restored below) 267 @ clobber state restored below)
@@ -250,9 +271,27 @@
250 msr spsr_cxsf, r9 271 msr spsr_cxsf, r9
251 ldr r0, [r0, #S_R0] 272 ldr r0, [r0, #S_R0]
252 ldmia r8, {pc}^ 273 ldmia r8, {pc}^
274#else
275 @ Thumb mode restore
276 add r0, sp, #S_R2
277 ldr lr, [sp, #S_LR]
278 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
279 @ clobber state restored below)
280 ldmia r0, {r2 - r12}
281 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
282 msr cpsr_c, r1
283 sub r0, #S_R2
284 add r8, r0, #S_PC
285 ldmia r0, {r0 - r1}
286 rfeia r8
287#endif
253 .endm 288 .endm
254 289
290
255 .macro restore_user_regs, fast = 0, offset = 0 291 .macro restore_user_regs, fast = 0, offset = 0
292 uaccess_enable r1, isb=0
293#ifndef CONFIG_THUMB2_KERNEL
294 @ ARM mode restore
256 mov r2, sp 295 mov r2, sp
257 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr 296 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
258 ldr lr, [r2, #\offset + S_PC]! @ get pc 297 ldr lr, [r2, #\offset + S_PC]! @ get pc
@@ -270,72 +309,16 @@
270 @ after ldm {}^ 309 @ after ldm {}^
271 add sp, sp, #\offset + S_FRAME_SIZE 310 add sp, sp, #\offset + S_FRAME_SIZE
272 movs pc, lr @ return & move spsr_svc into cpsr 311 movs pc, lr @ return & move spsr_svc into cpsr
273 .endm 312#elif defined(CONFIG_CPU_V7M)
274 313 @ V7M restore.
275#else /* CONFIG_THUMB2_KERNEL */ 314 @ Note that we don't need to do clrex here as clearing the local
276 .macro svc_exit, rpsr, irq = 0 315 @ monitor is part of the exception entry and exit sequence.
277 .if \irq != 0
278 @ IRQs already off
279#ifdef CONFIG_TRACE_IRQFLAGS
280 @ The parent context IRQs must have been enabled to get here in
281 @ the first place, so there's no point checking the PSR I bit.
282 bl trace_hardirqs_on
283#endif
284 .else
285 @ IRQs off again before pulling preserved data off the stack
286 disable_irq_notrace
287#ifdef CONFIG_TRACE_IRQFLAGS
288 tst \rpsr, #PSR_I_BIT
289 bleq trace_hardirqs_on
290 tst \rpsr, #PSR_I_BIT
291 blne trace_hardirqs_off
292#endif
293 .endif
294 ldr lr, [sp, #S_SP] @ top of the stack
295 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
296
297 @ We must avoid clrex due to Cortex-A15 erratum #830321
298 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
299
300 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
301 ldmia sp, {r0 - r12}
302 mov sp, lr
303 ldr lr, [sp], #4
304 rfeia sp!
305 .endm
306
307 @
308 @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
309 @
310 @ For full details see non-Thumb implementation above.
311 @
312 .macro svc_exit_via_fiq
313 add r0, sp, #S_R2
314 ldr lr, [sp, #S_LR]
315 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
316 @ clobber state restored below)
317 ldmia r0, {r2 - r12}
318 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
319 msr cpsr_c, r1
320 sub r0, #S_R2
321 add r8, r0, #S_PC
322 ldmia r0, {r0 - r1}
323 rfeia r8
324 .endm
325
326#ifdef CONFIG_CPU_V7M
327 /*
328 * Note we don't need to do clrex here as clearing the local monitor is
329 * part of each exception entry and exit sequence.
330 */
331 .macro restore_user_regs, fast = 0, offset = 0
332 .if \offset 316 .if \offset
333 add sp, #\offset 317 add sp, #\offset
334 .endif 318 .endif
335 v7m_exception_slow_exit ret_r0 = \fast 319 v7m_exception_slow_exit ret_r0 = \fast
336 .endm 320#else
337#else /* ifdef CONFIG_CPU_V7M */ 321 @ Thumb mode restore
338 .macro restore_user_regs, fast = 0, offset = 0
339 mov r2, sp 322 mov r2, sp
340 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 323 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
341 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 324 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
@@ -353,9 +336,8 @@
353 .endif 336 .endif
354 add sp, sp, #S_FRAME_SIZE - S_SP 337 add sp, sp, #S_FRAME_SIZE - S_SP
355 movs pc, lr @ return & move spsr_svc into cpsr 338 movs pc, lr @ return & move spsr_svc into cpsr
356 .endm
357#endif /* ifdef CONFIG_CPU_V7M / else */
358#endif /* !CONFIG_THUMB2_KERNEL */ 339#endif /* !CONFIG_THUMB2_KERNEL */
340 .endm
359 341
360/* 342/*
361 * Context tracking subsystem. Used to instrument transitions 343 * Context tracking subsystem. Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 29e2991465cb..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -464,10 +464,7 @@ __enable_mmu:
464#ifdef CONFIG_ARM_LPAE 464#ifdef CONFIG_ARM_LPAE
465 mcrr p15, 0, r4, r5, c2 @ load TTBR0 465 mcrr p15, 0, r4, r5, c2 @ load TTBR0
466#else 466#else
467 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 467 mov r5, #DACR_INIT
468 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
469 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
470 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
471 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 468 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
472 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 469 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
473#endif 470#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index baf8edebe26f..5ff4826cb154 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -39,6 +39,7 @@
39#include <linux/export.h> 39#include <linux/export.h>
40 40
41#include <asm/hardware/cache-l2x0.h> 41#include <asm/hardware/cache-l2x0.h>
42#include <asm/outercache.h>
42#include <asm/exception.h> 43#include <asm/exception.h>
43#include <asm/mach/arch.h> 44#include <asm/mach/arch.h>
44#include <asm/mach/irq.h> 45#include <asm/mach/irq.h>
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
deleted file mode 100644
index 54272e0be713..000000000000
--- a/arch/arm/kernel/perf_event.c
+++ /dev/null
@@ -1,896 +0,0 @@
1#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
8 *
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code.
11 */
12#define pr_fmt(fmt) "hw perfevents: " fmt
13
14#include <linux/bitmap.h>
15#include <linux/cpumask.h>
16#include <linux/export.h>
17#include <linux/kernel.h>
18#include <linux/of.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22#include <linux/irq.h>
23#include <linux/irqdesc.h>
24
25#include <asm/cputype.h>
26#include <asm/irq_regs.h>
27#include <asm/pmu.h>
28
29static int
30armpmu_map_cache_event(const unsigned (*cache_map)
31 [PERF_COUNT_HW_CACHE_MAX]
32 [PERF_COUNT_HW_CACHE_OP_MAX]
33 [PERF_COUNT_HW_CACHE_RESULT_MAX],
34 u64 config)
35{
36 unsigned int cache_type, cache_op, cache_result, ret;
37
38 cache_type = (config >> 0) & 0xff;
39 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
40 return -EINVAL;
41
42 cache_op = (config >> 8) & 0xff;
43 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
44 return -EINVAL;
45
46 cache_result = (config >> 16) & 0xff;
47 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
48 return -EINVAL;
49
50 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
51
52 if (ret == CACHE_OP_UNSUPPORTED)
53 return -ENOENT;
54
55 return ret;
56}
57
58static int
59armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
60{
61 int mapping;
62
63 if (config >= PERF_COUNT_HW_MAX)
64 return -EINVAL;
65
66 mapping = (*event_map)[config];
67 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
68}
69
70static int
71armpmu_map_raw_event(u32 raw_event_mask, u64 config)
72{
73 return (int)(config & raw_event_mask);
74}
75
76int
77armpmu_map_event(struct perf_event *event,
78 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
79 const unsigned (*cache_map)
80 [PERF_COUNT_HW_CACHE_MAX]
81 [PERF_COUNT_HW_CACHE_OP_MAX]
82 [PERF_COUNT_HW_CACHE_RESULT_MAX],
83 u32 raw_event_mask)
84{
85 u64 config = event->attr.config;
86 int type = event->attr.type;
87
88 if (type == event->pmu->type)
89 return armpmu_map_raw_event(raw_event_mask, config);
90
91 switch (type) {
92 case PERF_TYPE_HARDWARE:
93 return armpmu_map_hw_event(event_map, config);
94 case PERF_TYPE_HW_CACHE:
95 return armpmu_map_cache_event(cache_map, config);
96 case PERF_TYPE_RAW:
97 return armpmu_map_raw_event(raw_event_mask, config);
98 }
99
100 return -ENOENT;
101}
102
103int armpmu_event_set_period(struct perf_event *event)
104{
105 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
106 struct hw_perf_event *hwc = &event->hw;
107 s64 left = local64_read(&hwc->period_left);
108 s64 period = hwc->sample_period;
109 int ret = 0;
110
111 if (unlikely(left <= -period)) {
112 left = period;
113 local64_set(&hwc->period_left, left);
114 hwc->last_period = period;
115 ret = 1;
116 }
117
118 if (unlikely(left <= 0)) {
119 left += period;
120 local64_set(&hwc->period_left, left);
121 hwc->last_period = period;
122 ret = 1;
123 }
124
125 /*
126 * Limit the maximum period to prevent the counter value
127 * from overtaking the one we are about to program. In
128 * effect we are reducing max_period to account for
129 * interrupt latency (and we are being very conservative).
130 */
131 if (left > (armpmu->max_period >> 1))
132 left = armpmu->max_period >> 1;
133
134 local64_set(&hwc->prev_count, (u64)-left);
135
136 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
137
138 perf_event_update_userpage(event);
139
140 return ret;
141}
142
143u64 armpmu_event_update(struct perf_event *event)
144{
145 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
146 struct hw_perf_event *hwc = &event->hw;
147 u64 delta, prev_raw_count, new_raw_count;
148
149again:
150 prev_raw_count = local64_read(&hwc->prev_count);
151 new_raw_count = armpmu->read_counter(event);
152
153 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
154 new_raw_count) != prev_raw_count)
155 goto again;
156
157 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
158
159 local64_add(delta, &event->count);
160 local64_sub(delta, &hwc->period_left);
161
162 return new_raw_count;
163}
164
165static void
166armpmu_read(struct perf_event *event)
167{
168 armpmu_event_update(event);
169}
170
171static void
172armpmu_stop(struct perf_event *event, int flags)
173{
174 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
175 struct hw_perf_event *hwc = &event->hw;
176
177 /*
178 * ARM pmu always has to update the counter, so ignore
179 * PERF_EF_UPDATE, see comments in armpmu_start().
180 */
181 if (!(hwc->state & PERF_HES_STOPPED)) {
182 armpmu->disable(event);
183 armpmu_event_update(event);
184 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
185 }
186}
187
188static void armpmu_start(struct perf_event *event, int flags)
189{
190 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
191 struct hw_perf_event *hwc = &event->hw;
192
193 /*
194 * ARM pmu always has to reprogram the period, so ignore
195 * PERF_EF_RELOAD, see the comment below.
196 */
197 if (flags & PERF_EF_RELOAD)
198 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
199
200 hwc->state = 0;
201 /*
202 * Set the period again. Some counters can't be stopped, so when we
203 * were stopped we simply disabled the IRQ source and the counter
204 * may have been left counting. If we don't do this step then we may
205 * get an interrupt too soon or *way* too late if the overflow has
206 * happened since disabling.
207 */
208 armpmu_event_set_period(event);
209 armpmu->enable(event);
210}
211
212static void
213armpmu_del(struct perf_event *event, int flags)
214{
215 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
216 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
217 struct hw_perf_event *hwc = &event->hw;
218 int idx = hwc->idx;
219
220 armpmu_stop(event, PERF_EF_UPDATE);
221 hw_events->events[idx] = NULL;
222 clear_bit(idx, hw_events->used_mask);
223 if (armpmu->clear_event_idx)
224 armpmu->clear_event_idx(hw_events, event);
225
226 perf_event_update_userpage(event);
227}
228
229static int
230armpmu_add(struct perf_event *event, int flags)
231{
232 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
233 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
234 struct hw_perf_event *hwc = &event->hw;
235 int idx;
236 int err = 0;
237
238 /* An event following a process won't be stopped earlier */
239 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
240 return -ENOENT;
241
242 perf_pmu_disable(event->pmu);
243
244 /* If we don't have a space for the counter then finish early. */
245 idx = armpmu->get_event_idx(hw_events, event);
246 if (idx < 0) {
247 err = idx;
248 goto out;
249 }
250
251 /*
252 * If there is an event in the counter we are going to use then make
253 * sure it is disabled.
254 */
255 event->hw.idx = idx;
256 armpmu->disable(event);
257 hw_events->events[idx] = event;
258
259 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
260 if (flags & PERF_EF_START)
261 armpmu_start(event, PERF_EF_RELOAD);
262
263 /* Propagate our changes to the userspace mapping. */
264 perf_event_update_userpage(event);
265
266out:
267 perf_pmu_enable(event->pmu);
268 return err;
269}
270
271static int
272validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
273 struct perf_event *event)
274{
275 struct arm_pmu *armpmu;
276
277 if (is_software_event(event))
278 return 1;
279
280 /*
281 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
282 * core perf code won't check that the pmu->ctx == leader->ctx
283 * until after pmu->event_init(event).
284 */
285 if (event->pmu != pmu)
286 return 0;
287
288 if (event->state < PERF_EVENT_STATE_OFF)
289 return 1;
290
291 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
292 return 1;
293
294 armpmu = to_arm_pmu(event->pmu);
295 return armpmu->get_event_idx(hw_events, event) >= 0;
296}
297
298static int
299validate_group(struct perf_event *event)
300{
301 struct perf_event *sibling, *leader = event->group_leader;
302 struct pmu_hw_events fake_pmu;
303
304 /*
305 * Initialise the fake PMU. We only need to populate the
306 * used_mask for the purposes of validation.
307 */
308 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
309
310 if (!validate_event(event->pmu, &fake_pmu, leader))
311 return -EINVAL;
312
313 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
314 if (!validate_event(event->pmu, &fake_pmu, sibling))
315 return -EINVAL;
316 }
317
318 if (!validate_event(event->pmu, &fake_pmu, event))
319 return -EINVAL;
320
321 return 0;
322}
323
324static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
325{
326 struct arm_pmu *armpmu;
327 struct platform_device *plat_device;
328 struct arm_pmu_platdata *plat;
329 int ret;
330 u64 start_clock, finish_clock;
331
332 /*
333 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
334 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
335 * do any necessary shifting, we just need to perform the first
336 * dereference.
337 */
338 armpmu = *(void **)dev;
339 plat_device = armpmu->plat_device;
340 plat = dev_get_platdata(&plat_device->dev);
341
342 start_clock = sched_clock();
343 if (plat && plat->handle_irq)
344 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
345 else
346 ret = armpmu->handle_irq(irq, armpmu);
347 finish_clock = sched_clock();
348
349 perf_sample_event_took(finish_clock - start_clock);
350 return ret;
351}
352
353static void
354armpmu_release_hardware(struct arm_pmu *armpmu)
355{
356 armpmu->free_irq(armpmu);
357}
358
359static int
360armpmu_reserve_hardware(struct arm_pmu *armpmu)
361{
362 int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
363 if (err) {
364 armpmu_release_hardware(armpmu);
365 return err;
366 }
367
368 return 0;
369}
370
371static void
372hw_perf_event_destroy(struct perf_event *event)
373{
374 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
375 atomic_t *active_events = &armpmu->active_events;
376 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
377
378 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
379 armpmu_release_hardware(armpmu);
380 mutex_unlock(pmu_reserve_mutex);
381 }
382}
383
384static int
385event_requires_mode_exclusion(struct perf_event_attr *attr)
386{
387 return attr->exclude_idle || attr->exclude_user ||
388 attr->exclude_kernel || attr->exclude_hv;
389}
390
391static int
392__hw_perf_event_init(struct perf_event *event)
393{
394 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
395 struct hw_perf_event *hwc = &event->hw;
396 int mapping;
397
398 mapping = armpmu->map_event(event);
399
400 if (mapping < 0) {
401 pr_debug("event %x:%llx not supported\n", event->attr.type,
402 event->attr.config);
403 return mapping;
404 }
405
406 /*
407 * We don't assign an index until we actually place the event onto
408 * hardware. Use -1 to signify that we haven't decided where to put it
409 * yet. For SMP systems, each core has it's own PMU so we can't do any
410 * clever allocation or constraints checking at this point.
411 */
412 hwc->idx = -1;
413 hwc->config_base = 0;
414 hwc->config = 0;
415 hwc->event_base = 0;
416
417 /*
418 * Check whether we need to exclude the counter from certain modes.
419 */
420 if ((!armpmu->set_event_filter ||
421 armpmu->set_event_filter(hwc, &event->attr)) &&
422 event_requires_mode_exclusion(&event->attr)) {
423 pr_debug("ARM performance counters do not support "
424 "mode exclusion\n");
425 return -EOPNOTSUPP;
426 }
427
428 /*
429 * Store the event encoding into the config_base field.
430 */
431 hwc->config_base |= (unsigned long)mapping;
432
433 if (!is_sampling_event(event)) {
434 /*
435 * For non-sampling runs, limit the sample_period to half
436 * of the counter width. That way, the new counter value
437 * is far less likely to overtake the previous one unless
438 * you have some serious IRQ latency issues.
439 */
440 hwc->sample_period = armpmu->max_period >> 1;
441 hwc->last_period = hwc->sample_period;
442 local64_set(&hwc->period_left, hwc->sample_period);
443 }
444
445 if (event->group_leader != event) {
446 if (validate_group(event) != 0)
447 return -EINVAL;
448 }
449
450 return 0;
451}
452
453static int armpmu_event_init(struct perf_event *event)
454{
455 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
456 int err = 0;
457 atomic_t *active_events = &armpmu->active_events;
458
459 /*
460 * Reject CPU-affine events for CPUs that are of a different class to
461 * that which this PMU handles. Process-following events (where
462 * event->cpu == -1) can be migrated between CPUs, and thus we have to
463 * reject them later (in armpmu_add) if they're scheduled on a
464 * different class of CPU.
465 */
466 if (event->cpu != -1 &&
467 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
468 return -ENOENT;
469
470 /* does not support taken branch sampling */
471 if (has_branch_stack(event))
472 return -EOPNOTSUPP;
473
474 if (armpmu->map_event(event) == -ENOENT)
475 return -ENOENT;
476
477 event->destroy = hw_perf_event_destroy;
478
479 if (!atomic_inc_not_zero(active_events)) {
480 mutex_lock(&armpmu->reserve_mutex);
481 if (atomic_read(active_events) == 0)
482 err = armpmu_reserve_hardware(armpmu);
483
484 if (!err)
485 atomic_inc(active_events);
486 mutex_unlock(&armpmu->reserve_mutex);
487 }
488
489 if (err)
490 return err;
491
492 err = __hw_perf_event_init(event);
493 if (err)
494 hw_perf_event_destroy(event);
495
496 return err;
497}
498
499static void armpmu_enable(struct pmu *pmu)
500{
501 struct arm_pmu *armpmu = to_arm_pmu(pmu);
502 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
503 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
504
505 /* For task-bound events we may be called on other CPUs */
506 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
507 return;
508
509 if (enabled)
510 armpmu->start(armpmu);
511}
512
513static void armpmu_disable(struct pmu *pmu)
514{
515 struct arm_pmu *armpmu = to_arm_pmu(pmu);
516
517 /* For task-bound events we may be called on other CPUs */
518 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
519 return;
520
521 armpmu->stop(armpmu);
522}
523
524/*
525 * In heterogeneous systems, events are specific to a particular
526 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
527 * the same microarchitecture.
528 */
529static int armpmu_filter_match(struct perf_event *event)
530{
531 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
532 unsigned int cpu = smp_processor_id();
533 return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
534}
535
536static void armpmu_init(struct arm_pmu *armpmu)
537{
538 atomic_set(&armpmu->active_events, 0);
539 mutex_init(&armpmu->reserve_mutex);
540
541 armpmu->pmu = (struct pmu) {
542 .pmu_enable = armpmu_enable,
543 .pmu_disable = armpmu_disable,
544 .event_init = armpmu_event_init,
545 .add = armpmu_add,
546 .del = armpmu_del,
547 .start = armpmu_start,
548 .stop = armpmu_stop,
549 .read = armpmu_read,
550 .filter_match = armpmu_filter_match,
551 };
552}
553
554int armpmu_register(struct arm_pmu *armpmu, int type)
555{
556 armpmu_init(armpmu);
557 pr_info("enabled with %s PMU driver, %d counters available\n",
558 armpmu->name, armpmu->num_events);
559 return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
560}
561
562/* Set at runtime when we know what CPU type we are. */
563static struct arm_pmu *__oprofile_cpu_pmu;
564
565/*
566 * Despite the names, these two functions are CPU-specific and are used
567 * by the OProfile/perf code.
568 */
569const char *perf_pmu_name(void)
570{
571 if (!__oprofile_cpu_pmu)
572 return NULL;
573
574 return __oprofile_cpu_pmu->name;
575}
576EXPORT_SYMBOL_GPL(perf_pmu_name);
577
578int perf_num_counters(void)
579{
580 int max_events = 0;
581
582 if (__oprofile_cpu_pmu != NULL)
583 max_events = __oprofile_cpu_pmu->num_events;
584
585 return max_events;
586}
587EXPORT_SYMBOL_GPL(perf_num_counters);
588
589static void cpu_pmu_enable_percpu_irq(void *data)
590{
591 int irq = *(int *)data;
592
593 enable_percpu_irq(irq, IRQ_TYPE_NONE);
594}
595
596static void cpu_pmu_disable_percpu_irq(void *data)
597{
598 int irq = *(int *)data;
599
600 disable_percpu_irq(irq);
601}
602
603static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
604{
605 int i, irq, irqs;
606 struct platform_device *pmu_device = cpu_pmu->plat_device;
607 struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
608
609 irqs = min(pmu_device->num_resources, num_possible_cpus());
610
611 irq = platform_get_irq(pmu_device, 0);
612 if (irq >= 0 && irq_is_percpu(irq)) {
613 on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
614 free_percpu_irq(irq, &hw_events->percpu_pmu);
615 } else {
616 for (i = 0; i < irqs; ++i) {
617 int cpu = i;
618
619 if (cpu_pmu->irq_affinity)
620 cpu = cpu_pmu->irq_affinity[i];
621
622 if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
623 continue;
624 irq = platform_get_irq(pmu_device, i);
625 if (irq >= 0)
626 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
627 }
628 }
629}
630
631static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
632{
633 int i, err, irq, irqs;
634 struct platform_device *pmu_device = cpu_pmu->plat_device;
635 struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
636
637 if (!pmu_device)
638 return -ENODEV;
639
640 irqs = min(pmu_device->num_resources, num_possible_cpus());
641 if (irqs < 1) {
642 pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
643 return 0;
644 }
645
646 irq = platform_get_irq(pmu_device, 0);
647 if (irq >= 0 && irq_is_percpu(irq)) {
648 err = request_percpu_irq(irq, handler, "arm-pmu",
649 &hw_events->percpu_pmu);
650 if (err) {
651 pr_err("unable to request IRQ%d for ARM PMU counters\n",
652 irq);
653 return err;
654 }
655 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
656 } else {
657 for (i = 0; i < irqs; ++i) {
658 int cpu = i;
659
660 err = 0;
661 irq = platform_get_irq(pmu_device, i);
662 if (irq < 0)
663 continue;
664
665 if (cpu_pmu->irq_affinity)
666 cpu = cpu_pmu->irq_affinity[i];
667
668 /*
669 * If we have a single PMU interrupt that we can't shift,
670 * assume that we're running on a uniprocessor machine and
671 * continue. Otherwise, continue without this interrupt.
672 */
673 if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
674 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
675 irq, cpu);
676 continue;
677 }
678
679 err = request_irq(irq, handler,
680 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
681 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
682 if (err) {
683 pr_err("unable to request IRQ%d for ARM PMU counters\n",
684 irq);
685 return err;
686 }
687
688 cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
689 }
690 }
691
692 return 0;
693}
694
695/*
696 * PMU hardware loses all context when a CPU goes offline.
697 * When a CPU is hotplugged back in, since some hardware registers are
698 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
699 * junk values out of them.
700 */
701static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
702 void *hcpu)
703{
704 int cpu = (unsigned long)hcpu;
705 struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
706
707 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
708 return NOTIFY_DONE;
709
710 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
711 return NOTIFY_DONE;
712
713 if (pmu->reset)
714 pmu->reset(pmu);
715 else
716 return NOTIFY_DONE;
717
718 return NOTIFY_OK;
719}
720
721static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
722{
723 int err;
724 int cpu;
725 struct pmu_hw_events __percpu *cpu_hw_events;
726
727 cpu_hw_events = alloc_percpu(struct pmu_hw_events);
728 if (!cpu_hw_events)
729 return -ENOMEM;
730
731 cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
732 err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
733 if (err)
734 goto out_hw_events;
735
736 for_each_possible_cpu(cpu) {
737 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
738 raw_spin_lock_init(&events->pmu_lock);
739 events->percpu_pmu = cpu_pmu;
740 }
741
742 cpu_pmu->hw_events = cpu_hw_events;
743 cpu_pmu->request_irq = cpu_pmu_request_irq;
744 cpu_pmu->free_irq = cpu_pmu_free_irq;
745
746 /* Ensure the PMU has sane values out of reset. */
747 if (cpu_pmu->reset)
748 on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
749 cpu_pmu, 1);
750
751 /* If no interrupts available, set the corresponding capability flag */
752 if (!platform_get_irq(cpu_pmu->plat_device, 0))
753 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
754
755 return 0;
756
757out_hw_events:
758 free_percpu(cpu_hw_events);
759 return err;
760}
761
762static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
763{
764 unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
765 free_percpu(cpu_pmu->hw_events);
766}
767
768/*
769 * CPU PMU identification and probing.
770 */
771static int probe_current_pmu(struct arm_pmu *pmu,
772 const struct pmu_probe_info *info)
773{
774 int cpu = get_cpu();
775 unsigned int cpuid = read_cpuid_id();
776 int ret = -ENODEV;
777
778 pr_info("probing PMU on CPU %d\n", cpu);
779
780 for (; info->init != NULL; info++) {
781 if ((cpuid & info->mask) != info->cpuid)
782 continue;
783 ret = info->init(pmu);
784 break;
785 }
786
787 put_cpu();
788 return ret;
789}
790
791static int of_pmu_irq_cfg(struct arm_pmu *pmu)
792{
793 int i, irq, *irqs;
794 struct platform_device *pdev = pmu->plat_device;
795
796 /* Don't bother with PPIs; they're already affine */
797 irq = platform_get_irq(pdev, 0);
798 if (irq >= 0 && irq_is_percpu(irq))
799 return 0;
800
801 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
802 if (!irqs)
803 return -ENOMEM;
804
805 for (i = 0; i < pdev->num_resources; ++i) {
806 struct device_node *dn;
807 int cpu;
808
809 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
810 i);
811 if (!dn) {
812 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
813 of_node_full_name(pdev->dev.of_node), i);
814 break;
815 }
816
817 for_each_possible_cpu(cpu)
818 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
819 break;
820
821 if (cpu >= nr_cpu_ids) {
822 pr_warn("Failed to find logical CPU for %s\n",
823 dn->name);
824 of_node_put(dn);
825 break;
826 }
827 of_node_put(dn);
828
829 irqs[i] = cpu;
830 cpumask_set_cpu(cpu, &pmu->supported_cpus);
831 }
832
833 if (i == pdev->num_resources) {
834 pmu->irq_affinity = irqs;
835 } else {
836 kfree(irqs);
837 cpumask_setall(&pmu->supported_cpus);
838 }
839
840 return 0;
841}
842
843int arm_pmu_device_probe(struct platform_device *pdev,
844 const struct of_device_id *of_table,
845 const struct pmu_probe_info *probe_table)
846{
847 const struct of_device_id *of_id;
848 const int (*init_fn)(struct arm_pmu *);
849 struct device_node *node = pdev->dev.of_node;
850 struct arm_pmu *pmu;
851 int ret = -ENODEV;
852
853 pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
854 if (!pmu) {
855 pr_info("failed to allocate PMU device!\n");
856 return -ENOMEM;
857 }
858
859 if (!__oprofile_cpu_pmu)
860 __oprofile_cpu_pmu = pmu;
861
862 pmu->plat_device = pdev;
863
864 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
865 init_fn = of_id->data;
866
867 ret = of_pmu_irq_cfg(pmu);
868 if (!ret)
869 ret = init_fn(pmu);
870 } else {
871 ret = probe_current_pmu(pmu, probe_table);
872 cpumask_setall(&pmu->supported_cpus);
873 }
874
875 if (ret) {
876 pr_info("failed to probe PMU!\n");
877 goto out_free;
878 }
879
880 ret = cpu_pmu_init(pmu);
881 if (ret)
882 goto out_free;
883
884 ret = armpmu_register(pmu, -1);
885 if (ret)
886 goto out_destroy;
887
888 return 0;
889
890out_destroy:
891 cpu_pmu_destroy(pmu);
892out_free:
893 pr_info("failed to register PMU devices!\n");
894 kfree(pmu);
895 return ret;
896}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 09f83e414a72..09413e7b49aa 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -34,9 +34,9 @@
34 34
35#include <asm/cputype.h> 35#include <asm/cputype.h>
36#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
37#include <asm/pmu.h>
38 37
39#include <linux/of.h> 38#include <linux/of.h>
39#include <linux/perf/arm_pmu.h>
40#include <linux/platform_device.h> 40#include <linux/platform_device.h>
41 41
42enum armv6_perf_types { 42enum armv6_perf_types {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f9b37f876e20..126dc679b230 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -21,11 +21,11 @@
21#include <asm/cp15.h> 21#include <asm/cp15.h>
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/irq_regs.h> 23#include <asm/irq_regs.h>
24#include <asm/pmu.h>
25#include <asm/vfp.h> 24#include <asm/vfp.h>
26#include "../vfp/vfpinstr.h" 25#include "../vfp/vfpinstr.h"
27 26
28#include <linux/of.h> 27#include <linux/of.h>
28#include <linux/perf/arm_pmu.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30 30
31/* 31/*
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 304d056d5b25..aa0499e2eef7 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -16,9 +16,9 @@
16 16
17#include <asm/cputype.h> 17#include <asm/cputype.h>
18#include <asm/irq_regs.h> 18#include <asm/irq_regs.h>
19#include <asm/pmu.h>
20 19
21#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/perf/arm_pmu.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23 23
24enum xscale_perf_types { 24enum xscale_perf_types {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a41719..a3089bacb8d8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void)
91 ledtrig_cpu(CPU_LED_IDLE_END); 91 ledtrig_cpu(CPU_LED_IDLE_END);
92} 92}
93 93
94#ifdef CONFIG_HOTPLUG_CPU
95void arch_cpu_idle_dead(void)
96{
97 cpu_die();
98}
99#endif
100
101void __show_regs(struct pt_regs *regs) 94void __show_regs(struct pt_regs *regs)
102{ 95{
103 unsigned long flags; 96 unsigned long flags;
@@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs)
129 buf[4] = '\0'; 122 buf[4] = '\0';
130 123
131#ifndef CONFIG_CPU_V7M 124#ifndef CONFIG_CPU_V7M
132 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", 125 {
133 buf, interrupts_enabled(regs) ? "n" : "ff", 126 unsigned int domain = get_domain();
134 fast_interrupts_enabled(regs) ? "n" : "ff", 127 const char *segment;
135 processor_modes[processor_mode(regs)], 128
136 isa_modes[isa_mode(regs)], 129#ifdef CONFIG_CPU_SW_DOMAIN_PAN
137 get_fs() == get_ds() ? "kernel" : "user"); 130 /*
131 * Get the domain register for the parent context. In user
132 * mode, we don't save the DACR, so lets use what it should
133 * be. For other modes, we place it after the pt_regs struct.
134 */
135 if (user_mode(regs))
136 domain = DACR_UACCESS_ENABLE;
137 else
138 domain = *(unsigned int *)(regs + 1);
139#endif
140
141 if ((domain & domain_mask(DOMAIN_USER)) ==
142 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
143 segment = "none";
144 else if (get_fs() == get_ds())
145 segment = "kernel";
146 else
147 segment = "user";
148
149 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
150 buf, interrupts_enabled(regs) ? "n" : "ff",
151 fast_interrupts_enabled(regs) ? "n" : "ff",
152 processor_modes[processor_mode(regs)],
153 isa_modes[isa_mode(regs)], segment);
154 }
138#else 155#else
139 printk("xPSR: %08lx\n", regs->ARM_cpsr); 156 printk("xPSR: %08lx\n", regs->ARM_cpsr);
140#endif 157#endif
@@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs)
146 buf[0] = '\0'; 163 buf[0] = '\0';
147#ifdef CONFIG_CPU_CP15_MMU 164#ifdef CONFIG_CPU_CP15_MMU
148 { 165 {
149 unsigned int transbase, dac; 166 unsigned int transbase, dac = get_domain();
150 asm("mrc p15, 0, %0, c2, c0\n\t" 167 asm("mrc p15, 0, %0, c2, c0\n\t"
151 "mrc p15, 0, %1, c3, c0\n" 168 : "=r" (transbase));
152 : "=r" (transbase), "=r" (dac));
153 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 169 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
154 transbase, dac); 170 transbase, dac);
155 } 171 }
@@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
210 226
211 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 227 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
212 228
229 /*
230 * Copy the initial value of the domain access control register
231 * from the current thread: thread->addr_limit will have been
232 * copied from the current thread via setup_thread_stack() in
233 * kernel/fork.c
234 */
235 thread->cpu_domain = get_domain();
236
213 if (likely(!(p->flags & PF_KTHREAD))) { 237 if (likely(!(p->flags & PF_KTHREAD))) {
214 *childregs = *current_pt_regs(); 238 *childregs = *current_pt_regs();
215 childregs->ARM_r0 = 0; 239 childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
deleted file mode 100644
index 2e6024334790..000000000000
--- a/arch/arm/kernel/psci.c
+++ /dev/null
@@ -1,299 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 *
13 * Author: Will Deacon <will.deacon@arm.com>
14 */
15
16#define pr_fmt(fmt) "psci: " fmt
17
18#include <linux/init.h>
19#include <linux/of.h>
20#include <linux/reboot.h>
21#include <linux/pm.h>
22#include <uapi/linux/psci.h>
23
24#include <asm/compiler.h>
25#include <asm/errno.h>
26#include <asm/psci.h>
27#include <asm/system_misc.h>
28
29struct psci_operations psci_ops;
30
31static int (*invoke_psci_fn)(u32, u32, u32, u32);
32typedef int (*psci_initcall_t)(const struct device_node *);
33
34asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
35asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
36
37enum psci_function {
38 PSCI_FN_CPU_SUSPEND,
39 PSCI_FN_CPU_ON,
40 PSCI_FN_CPU_OFF,
41 PSCI_FN_MIGRATE,
42 PSCI_FN_AFFINITY_INFO,
43 PSCI_FN_MIGRATE_INFO_TYPE,
44 PSCI_FN_MAX,
45};
46
47static u32 psci_function_id[PSCI_FN_MAX];
48
49static int psci_to_linux_errno(int errno)
50{
51 switch (errno) {
52 case PSCI_RET_SUCCESS:
53 return 0;
54 case PSCI_RET_NOT_SUPPORTED:
55 return -EOPNOTSUPP;
56 case PSCI_RET_INVALID_PARAMS:
57 return -EINVAL;
58 case PSCI_RET_DENIED:
59 return -EPERM;
60 };
61
62 return -EINVAL;
63}
64
65static u32 psci_power_state_pack(struct psci_power_state state)
66{
67 return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
68 & PSCI_0_2_POWER_STATE_ID_MASK) |
69 ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
70 & PSCI_0_2_POWER_STATE_TYPE_MASK) |
71 ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
72 & PSCI_0_2_POWER_STATE_AFFL_MASK);
73}
74
75static int psci_get_version(void)
76{
77 int err;
78
79 err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
80 return err;
81}
82
83static int psci_cpu_suspend(struct psci_power_state state,
84 unsigned long entry_point)
85{
86 int err;
87 u32 fn, power_state;
88
89 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
90 power_state = psci_power_state_pack(state);
91 err = invoke_psci_fn(fn, power_state, entry_point, 0);
92 return psci_to_linux_errno(err);
93}
94
95static int psci_cpu_off(struct psci_power_state state)
96{
97 int err;
98 u32 fn, power_state;
99
100 fn = psci_function_id[PSCI_FN_CPU_OFF];
101 power_state = psci_power_state_pack(state);
102 err = invoke_psci_fn(fn, power_state, 0, 0);
103 return psci_to_linux_errno(err);
104}
105
106static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
107{
108 int err;
109 u32 fn;
110
111 fn = psci_function_id[PSCI_FN_CPU_ON];
112 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
113 return psci_to_linux_errno(err);
114}
115
116static int psci_migrate(unsigned long cpuid)
117{
118 int err;
119 u32 fn;
120
121 fn = psci_function_id[PSCI_FN_MIGRATE];
122 err = invoke_psci_fn(fn, cpuid, 0, 0);
123 return psci_to_linux_errno(err);
124}
125
126static int psci_affinity_info(unsigned long target_affinity,
127 unsigned long lowest_affinity_level)
128{
129 int err;
130 u32 fn;
131
132 fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
133 err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
134 return err;
135}
136
137static int psci_migrate_info_type(void)
138{
139 int err;
140 u32 fn;
141
142 fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
143 err = invoke_psci_fn(fn, 0, 0, 0);
144 return err;
145}
146
147static int get_set_conduit_method(struct device_node *np)
148{
149 const char *method;
150
151 pr_info("probing for conduit method from DT.\n");
152
153 if (of_property_read_string(np, "method", &method)) {
154 pr_warn("missing \"method\" property\n");
155 return -ENXIO;
156 }
157
158 if (!strcmp("hvc", method)) {
159 invoke_psci_fn = __invoke_psci_fn_hvc;
160 } else if (!strcmp("smc", method)) {
161 invoke_psci_fn = __invoke_psci_fn_smc;
162 } else {
163 pr_warn("invalid \"method\" property: %s\n", method);
164 return -EINVAL;
165 }
166 return 0;
167}
168
169static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
170{
171 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
172}
173
174static void psci_sys_poweroff(void)
175{
176 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
177}
178
179/*
180 * PSCI Function IDs for v0.2+ are well defined so use
181 * standard values.
182 */
183static int psci_0_2_init(struct device_node *np)
184{
185 int err, ver;
186
187 err = get_set_conduit_method(np);
188
189 if (err)
190 goto out_put_node;
191
192 ver = psci_get_version();
193
194 if (ver == PSCI_RET_NOT_SUPPORTED) {
195 /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
196 pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
197 err = -EOPNOTSUPP;
198 goto out_put_node;
199 } else {
200 pr_info("PSCIv%d.%d detected in firmware.\n",
201 PSCI_VERSION_MAJOR(ver),
202 PSCI_VERSION_MINOR(ver));
203
204 if (PSCI_VERSION_MAJOR(ver) == 0 &&
205 PSCI_VERSION_MINOR(ver) < 2) {
206 err = -EINVAL;
207 pr_err("Conflicting PSCI version detected.\n");
208 goto out_put_node;
209 }
210 }
211
212 pr_info("Using standard PSCI v0.2 function IDs\n");
213 psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND;
214 psci_ops.cpu_suspend = psci_cpu_suspend;
215
216 psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
217 psci_ops.cpu_off = psci_cpu_off;
218
219 psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON;
220 psci_ops.cpu_on = psci_cpu_on;
221
222 psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE;
223 psci_ops.migrate = psci_migrate;
224
225 psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
226 psci_ops.affinity_info = psci_affinity_info;
227
228 psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
229 PSCI_0_2_FN_MIGRATE_INFO_TYPE;
230 psci_ops.migrate_info_type = psci_migrate_info_type;
231
232 arm_pm_restart = psci_sys_reset;
233
234 pm_power_off = psci_sys_poweroff;
235
236out_put_node:
237 of_node_put(np);
238 return err;
239}
240
241/*
242 * PSCI < v0.2 get PSCI Function IDs via DT.
243 */
244static int psci_0_1_init(struct device_node *np)
245{
246 u32 id;
247 int err;
248
249 err = get_set_conduit_method(np);
250
251 if (err)
252 goto out_put_node;
253
254 pr_info("Using PSCI v0.1 Function IDs from DT\n");
255
256 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
257 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
258 psci_ops.cpu_suspend = psci_cpu_suspend;
259 }
260
261 if (!of_property_read_u32(np, "cpu_off", &id)) {
262 psci_function_id[PSCI_FN_CPU_OFF] = id;
263 psci_ops.cpu_off = psci_cpu_off;
264 }
265
266 if (!of_property_read_u32(np, "cpu_on", &id)) {
267 psci_function_id[PSCI_FN_CPU_ON] = id;
268 psci_ops.cpu_on = psci_cpu_on;
269 }
270
271 if (!of_property_read_u32(np, "migrate", &id)) {
272 psci_function_id[PSCI_FN_MIGRATE] = id;
273 psci_ops.migrate = psci_migrate;
274 }
275
276out_put_node:
277 of_node_put(np);
278 return err;
279}
280
281static const struct of_device_id const psci_of_match[] __initconst = {
282 { .compatible = "arm,psci", .data = psci_0_1_init},
283 { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
284 {},
285};
286
287int __init psci_init(void)
288{
289 struct device_node *np;
290 const struct of_device_id *matched_np;
291 psci_initcall_t init_fn;
292
293 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
294 if (!np)
295 return -ENODEV;
296
297 init_fn = (psci_initcall_t)matched_np->data;
298 return init_fn(np);
299}
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 28a1db4da704..61c04b02faeb 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -17,6 +17,8 @@
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/psci.h>
21
20#include <uapi/linux/psci.h> 22#include <uapi/linux/psci.h>
21 23
22#include <asm/psci.h> 24#include <asm/psci.h>
@@ -51,22 +53,34 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
51{ 53{
52 if (psci_ops.cpu_on) 54 if (psci_ops.cpu_on)
53 return psci_ops.cpu_on(cpu_logical_map(cpu), 55 return psci_ops.cpu_on(cpu_logical_map(cpu),
54 __pa(secondary_startup)); 56 virt_to_idmap(&secondary_startup));
55 return -ENODEV; 57 return -ENODEV;
56} 58}
57 59
58#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
61int psci_cpu_disable(unsigned int cpu)
62{
63 /* Fail early if we don't have CPU_OFF support */
64 if (!psci_ops.cpu_off)
65 return -EOPNOTSUPP;
66
67 /* Trusted OS will deny CPU_OFF */
68 if (psci_tos_resident_on(cpu))
69 return -EPERM;
70
71 return 0;
72}
73
59void __ref psci_cpu_die(unsigned int cpu) 74void __ref psci_cpu_die(unsigned int cpu)
60{ 75{
61 const struct psci_power_state ps = { 76 u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
62 .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, 77 PSCI_0_2_POWER_STATE_TYPE_SHIFT;
63 };
64 78
65 if (psci_ops.cpu_off) 79 if (psci_ops.cpu_off)
66 psci_ops.cpu_off(ps); 80 psci_ops.cpu_off(state);
67 81
68 /* We should never return */ 82 /* We should never return */
69 panic("psci: cpu %d failed to shutdown\n", cpu); 83 panic("psci: cpu %d failed to shutdown\n", cpu);
70} 84}
71 85
72int __ref psci_cpu_kill(unsigned int cpu) 86int __ref psci_cpu_kill(unsigned int cpu)
@@ -109,6 +123,7 @@ bool __init psci_smp_available(void)
109struct smp_operations __initdata psci_smp_ops = { 123struct smp_operations __initdata psci_smp_ops = {
110 .smp_boot_secondary = psci_boot_secondary, 124 .smp_boot_secondary = psci_boot_secondary,
111#ifdef CONFIG_HOTPLUG_CPU 125#ifdef CONFIG_HOTPLUG_CPU
126 .cpu_disable = psci_cpu_disable,
112 .cpu_die = psci_cpu_die, 127 .cpu_die = psci_cpu_die,
113 .cpu_kill = psci_cpu_kill, 128 .cpu_kill = psci_cpu_kill,
114#endif 129#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 36c18b73c1f4..20edd349d379 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -31,12 +31,14 @@
31#include <linux/bug.h> 31#include <linux/bug.h>
32#include <linux/compiler.h> 32#include <linux/compiler.h>
33#include <linux/sort.h> 33#include <linux/sort.h>
34#include <linux/psci.h>
34 35
35#include <asm/unified.h> 36#include <asm/unified.h>
36#include <asm/cp15.h> 37#include <asm/cp15.h>
37#include <asm/cpu.h> 38#include <asm/cpu.h>
38#include <asm/cputype.h> 39#include <asm/cputype.h>
39#include <asm/elf.h> 40#include <asm/elf.h>
41#include <asm/fixmap.h>
40#include <asm/procinfo.h> 42#include <asm/procinfo.h>
41#include <asm/psci.h> 43#include <asm/psci.h>
42#include <asm/sections.h> 44#include <asm/sections.h>
@@ -954,6 +956,9 @@ void __init setup_arch(char **cmdline_p)
954 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 956 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
955 *cmdline_p = cmd_line; 957 *cmdline_p = cmd_line;
956 958
959 if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
960 early_fixmap_init();
961
957 parse_early_param(); 962 parse_early_param();
958 963
959#ifdef CONFIG_MMU 964#ifdef CONFIG_MMU
@@ -972,7 +977,7 @@ void __init setup_arch(char **cmdline_p)
972 unflatten_device_tree(); 977 unflatten_device_tree();
973 978
974 arm_dt_init_cpu_maps(); 979 arm_dt_init_cpu_maps();
975 psci_init(); 980 psci_dt_init();
976 xen_early_init(); 981 xen_early_init();
977#ifdef CONFIG_SMP 982#ifdef CONFIG_SMP
978 if (is_smp()) { 983 if (is_smp()) {
@@ -1015,7 +1020,7 @@ static int __init topology_init(void)
1015 1020
1016 for_each_possible_cpu(cpu) { 1021 for_each_possible_cpu(cpu) {
1017 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 1022 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1018 cpuinfo->cpu.hotpluggable = 1; 1023 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1019 register_cpu(&cpuinfo->cpu, cpu); 1024 register_cpu(&cpuinfo->cpu, cpu);
1020 } 1025 }
1021 1026
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e23791..b6cda06b455f 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall)
562asmlinkage int 562asmlinkage int
563do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) 563do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
564{ 564{
565 /*
566 * The assembly code enters us with IRQs off, but it hasn't
567 * informed the tracing code of that for efficiency reasons.
568 * Update the trace code with the current status.
569 */
570 trace_hardirqs_off();
565 do { 571 do {
566 if (likely(thread_flags & _TIF_NEED_RESCHED)) { 572 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
567 schedule(); 573 schedule();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3d6b7821cff8..ba0063c539c3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu)
175 if (smp_ops.cpu_disable) 175 if (smp_ops.cpu_disable)
176 return smp_ops.cpu_disable(cpu); 176 return smp_ops.cpu_disable(cpu);
177 177
178 return 0;
179}
180
181int platform_can_hotplug_cpu(unsigned int cpu)
182{
183 /* cpu_die must be specified to support hotplug */
184 if (!smp_ops.cpu_die)
185 return 0;
186
187 if (smp_ops.cpu_can_disable)
188 return smp_ops.cpu_can_disable(cpu);
189
178 /* 190 /*
179 * By default, allow disabling all CPUs except the first one, 191 * By default, allow disabling all CPUs except the first one,
180 * since this is special on a lot of platforms, e.g. because 192 * since this is special on a lot of platforms, e.g. because
181 * of clock tick interrupts. 193 * of clock tick interrupts.
182 */ 194 */
183 return cpu == 0 ? -EPERM : 0; 195 return cpu != 0;
184} 196}
197
185/* 198/*
186 * __cpu_disable runs on the processor to be shutdown. 199 * __cpu_disable runs on the processor to be shutdown.
187 */ 200 */
@@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu)
253 * of the other hotplug-cpu capable cores, so presumably coming 266 * of the other hotplug-cpu capable cores, so presumably coming
254 * out of idle fixes this. 267 * out of idle fixes this.
255 */ 268 */
256void __ref cpu_die(void) 269void arch_cpu_idle_dead(void)
257{ 270{
258 unsigned int cpu = smp_processor_id(); 271 unsigned int cpu = smp_processor_id();
259 272
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756782c7..5b26e7efa9ea 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
141 141
142 while (1) { 142 while (1) {
143 unsigned long temp; 143 unsigned long temp;
144 unsigned int __ua_flags;
144 145
146 __ua_flags = uaccess_save_and_enable();
145 if (type == TYPE_SWPB) 147 if (type == TYPE_SWPB)
146 __user_swpb_asm(*data, address, res, temp); 148 __user_swpb_asm(*data, address, res, temp);
147 else 149 else
148 __user_swp_asm(*data, address, res, temp); 150 __user_swp_asm(*data, address, res, temp);
151 uaccess_restore(__ua_flags);
149 152
150 if (likely(res != -EAGAIN) || signal_pending(current)) 153 if (likely(res != -EAGAIN) || signal_pending(current))
151 break; 154 break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226236f2..969f9d9e665f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
870 kuser_init(vectors_base); 870 kuser_init(vectors_base);
871 871
872 flush_icache_range(vectors, vectors + PAGE_SIZE * 2); 872 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
873 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
874#else /* ifndef CONFIG_CPU_V7M */ 873#else /* ifndef CONFIG_CPU_V7M */
875 /* 874 /*
876 * on V7-M there is no need to copy the vector table to a dedicated 875 * on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7db2d5..970d6c043774 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
12 12
13 .text 13 .text
14 14
15/* Prototype: int __clear_user(void *addr, size_t sz) 15/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
16 * Purpose : clear some user memory 16 * Purpose : clear some user memory
17 * Params : addr - user memory address to clear 17 * Params : addr - user memory address to clear
18 * : sz - number of bytes to clear 18 * : sz - number of bytes to clear
19 * Returns : number of bytes NOT cleared 19 * Returns : number of bytes NOT cleared
20 */ 20 */
21ENTRY(__clear_user_std) 21ENTRY(__clear_user_std)
22WEAK(__clear_user) 22WEAK(arm_clear_user)
23 stmfd sp!, {r1, lr} 23 stmfd sp!, {r1, lr}
24 mov r2, #0 24 mov r2, #0
25 cmp r1, #4 25 cmp r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
44USER( strnebt r2, [r0]) 44USER( strnebt r2, [r0])
45 mov r0, #0 45 mov r0, #0
46 ldmfd sp!, {r1, pc} 46 ldmfd sp!, {r1, pc}
47ENDPROC(__clear_user) 47ENDPROC(arm_clear_user)
48ENDPROC(__clear_user_std) 48ENDPROC(__clear_user_std)
49 49
50 .pushsection .text.fixup,"ax" 50 .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9952be..1512bebfbf1b 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
17/* 17/*
18 * Prototype: 18 * Prototype:
19 * 19 *
20 * size_t __copy_from_user(void *to, const void *from, size_t n) 20 * size_t arm_copy_from_user(void *to, const void *from, size_t n)
21 * 21 *
22 * Purpose: 22 * Purpose:
23 * 23 *
@@ -89,11 +89,11 @@
89 89
90 .text 90 .text
91 91
92ENTRY(__copy_from_user) 92ENTRY(arm_copy_from_user)
93 93
94#include "copy_template.S" 94#include "copy_template.S"
95 95
96ENDPROC(__copy_from_user) 96ENDPROC(arm_copy_from_user)
97 97
98 .pushsection .fixup,"ax" 98 .pushsection .fixup,"ax"
99 .align 0 99 .align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b0675a3e..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
17/* 17/*
18 * Prototype: 18 * Prototype:
19 * 19 *
20 * size_t __copy_to_user(void *to, const void *from, size_t n) 20 * size_t arm_copy_to_user(void *to, const void *from, size_t n)
21 * 21 *
22 * Purpose: 22 * Purpose:
23 * 23 *
@@ -93,11 +93,11 @@
93 .text 93 .text
94 94
95ENTRY(__copy_to_user_std) 95ENTRY(__copy_to_user_std)
96WEAK(__copy_to_user) 96WEAK(arm_copy_to_user)
97 97
98#include "copy_template.S" 98#include "copy_template.S"
99 99
100ENDPROC(__copy_to_user) 100ENDPROC(arm_copy_to_user)
101ENDPROC(__copy_to_user_std) 101ENDPROC(__copy_to_user_std)
102 102
103 .pushsection .text.fixup,"ax" 103 .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e61f89..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
17 17
18 .text 18 .text
19 19
20#ifdef CONFIG_CPU_SW_DOMAIN_PAN
21 .macro save_regs
22 mrc p15, 0, ip, c3, c0, 0
23 stmfd sp!, {r1, r2, r4 - r8, ip, lr}
24 uaccess_enable ip
25 .endm
26
27 .macro load_regs
28 ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
29 mcr p15, 0, ip, c3, c0, 0
30 ret lr
31 .endm
32#else
20 .macro save_regs 33 .macro save_regs
21 stmfd sp!, {r1, r2, r4 - r8, lr} 34 stmfd sp!, {r1, r2, r4 - r8, lr}
22 .endm 35 .endm
@@ -24,6 +37,7 @@
24 .macro load_regs 37 .macro load_regs
25 ldmfd sp!, {r1, r2, r4 - r8, pc} 38 ldmfd sp!, {r1, r2, r4 - r8, pc}
26 .endm 39 .endm
40#endif
27 41
28 .macro load1b, reg1 42 .macro load1b, reg1
29 ldrusr \reg1, r0, 1 43 ldrusr \reg1, r0, 1
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 4b39af2dfda9..d72b90905132 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -136,7 +136,7 @@ out:
136} 136}
137 137
138unsigned long 138unsigned long
139__copy_to_user(void __user *to, const void *from, unsigned long n) 139arm_copy_to_user(void __user *to, const void *from, unsigned long n)
140{ 140{
141 /* 141 /*
142 * This test is stubbed out of the main function above to keep 142 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
190 return n; 190 return n;
191} 191}
192 192
193unsigned long __clear_user(void __user *addr, unsigned long n) 193unsigned long arm_clear_user(void __user *addr, unsigned long n)
194{ 194{
195 /* See rational for this in __copy_to_user() above. */ 195 /* See rational for this in __copy_to_user() above. */
196 if (n < 64) 196 if (n < 64)
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 231fba0d03e5..6050a14faee6 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -28,8 +28,8 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/amba/bus.h> 29#include <linux/amba/bus.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/psci.h>
31 32
32#include <asm/psci.h>
33#include <asm/hardware/cache-l2x0.h> 33#include <asm/hardware/cache-l2x0.h>
34#include <asm/mach/arch.h> 34#include <asm/mach/arch.h>
35#include <asm/mach/map.h> 35#include <asm/mach/map.h>
diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c
index 7f2bd85eb935..400311695548 100644
--- a/arch/arm/mach-highbank/pm.c
+++ b/arch/arm/mach-highbank/pm.c
@@ -16,19 +16,21 @@
16 16
17#include <linux/cpu_pm.h> 17#include <linux/cpu_pm.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/psci.h>
19#include <linux/suspend.h> 20#include <linux/suspend.h>
20 21
21#include <asm/suspend.h> 22#include <asm/suspend.h>
22#include <asm/psci.h> 23
24#include <uapi/linux/psci.h>
25
26#define HIGHBANK_SUSPEND_PARAM \
27 ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
28 (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
29 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
23 30
24static int highbank_suspend_finish(unsigned long val) 31static int highbank_suspend_finish(unsigned long val)
25{ 32{
26 const struct psci_power_state ps = { 33 return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume));
27 .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
28 .affinity_level = 1,
29 };
30
31 return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
32} 34}
33 35
34static int highbank_pm_enter(suspend_state_t state) 36static int highbank_pm_enter(suspend_state_t state)
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 04c9daf9f8d7..7db5870d127f 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -18,6 +18,7 @@
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <asm/mach-types.h> 20#include <asm/mach-types.h>
21#include <asm/outercache.h>
21#include <mach/hardware.h> 22#include <mach/hardware.h>
22#include <mach/cputype.h> 23#include <mach/cputype.h>
23#include <mach/addr-map.h> 24#include <mach/addr-map.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 9e2a68456b81..07d2e100caab 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -29,6 +29,7 @@ config ARCH_OMAP4
29 select HAVE_ARM_SCU if SMP 29 select HAVE_ARM_SCU if SMP
30 select HAVE_ARM_TWD if SMP 30 select HAVE_ARM_TWD if SMP
31 select OMAP_INTERCONNECT 31 select OMAP_INTERCONNECT
32 select OMAP_INTERCONNECT_BARRIER
32 select PL310_ERRATA_588369 if CACHE_L2X0 33 select PL310_ERRATA_588369 if CACHE_L2X0
33 select PL310_ERRATA_727915 if CACHE_L2X0 34 select PL310_ERRATA_727915 if CACHE_L2X0
34 select PM_OPP if PM 35 select PM_OPP if PM
@@ -46,6 +47,7 @@ config SOC_OMAP5
46 select HAVE_ARM_TWD if SMP 47 select HAVE_ARM_TWD if SMP
47 select HAVE_ARM_ARCH_TIMER 48 select HAVE_ARM_ARCH_TIMER
48 select ARM_ERRATA_798181 if SMP 49 select ARM_ERRATA_798181 if SMP
50 select OMAP_INTERCONNECT_BARRIER
49 51
50config SOC_AM33XX 52config SOC_AM33XX
51 bool "TI AM33XX" 53 bool "TI AM33XX"
@@ -71,6 +73,7 @@ config SOC_DRA7XX
71 select HAVE_ARM_ARCH_TIMER 73 select HAVE_ARM_ARCH_TIMER
72 select IRQ_CROSSBAR 74 select IRQ_CROSSBAR
73 select ARM_ERRATA_798181 if SMP 75 select ARM_ERRATA_798181 if SMP
76 select OMAP_INTERCONNECT_BARRIER
74 77
75config ARCH_OMAP2PLUS 78config ARCH_OMAP2PLUS
76 bool 79 bool
@@ -92,6 +95,10 @@ config ARCH_OMAP2PLUS
92 help 95 help
93 Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 96 Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
94 97
98config OMAP_INTERCONNECT_BARRIER
99 bool
100 select ARM_HEAVY_MB
101
95 102
96if ARCH_OMAP2PLUS 103if ARCH_OMAP2PLUS
97 104
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index eae6a0e87c90..484cdadfb187 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
30void __init omap_reserve(void) 30void __init omap_reserve(void)
31{ 31{
32 omap_secure_ram_reserve_memblock(); 32 omap_secure_ram_reserve_memblock();
33 omap_barrier_reserve_memblock();
33} 34}
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 749d50bb4ca5..92e92cfc2775 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
189} 189}
190#endif 190#endif
191 191
192#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
193void omap_barrier_reserve_memblock(void);
194void omap_barriers_init(void);
195#else
196static inline void omap_barrier_reserve_memblock(void)
197{
198}
199#endif
200
192/* This gets called from mach-omap2/io.c, do not call this */ 201/* This gets called from mach-omap2/io.c, do not call this */
193void __init omap2_set_globals_tap(u32 class, void __iomem *tap); 202void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
194 203
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
deleted file mode 100644
index 1c582a8592b9..000000000000
--- a/arch/arm/mach-omap2/include/mach/barriers.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * OMAP memory barrier header.
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Richard Woodruff <r-woodruff2@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef __MACH_BARRIERS_H
23#define __MACH_BARRIERS_H
24
25#include <asm/outercache.h>
26
27extern void omap_bus_sync(void);
28
29#define rmb() dsb()
30#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
31#define mb() wmb()
32
33#endif /* __MACH_BARRIERS_H */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 6a4822dbb4ea..980c9372e6fd 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -352,6 +352,7 @@ void __init am33xx_map_io(void)
352void __init omap4_map_io(void) 352void __init omap4_map_io(void)
353{ 353{
354 iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); 354 iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
355 omap_barriers_init();
355} 356}
356#endif 357#endif
357 358
@@ -359,6 +360,7 @@ void __init omap4_map_io(void)
359void __init omap5_map_io(void) 360void __init omap5_map_io(void)
360{ 361{
361 iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc)); 362 iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
363 omap_barriers_init();
362} 364}
363#endif 365#endif
364 366
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 16350eefa66c..949696b6f17b 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -51,6 +51,127 @@ static void __iomem *twd_base;
51 51
52#define IRQ_LOCALTIMER 29 52#define IRQ_LOCALTIMER 29
53 53
54#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
55
56/* Used to implement memory barrier on DRAM path */
57#define OMAP4_DRAM_BARRIER_VA 0xfe600000
58
59static void __iomem *dram_sync, *sram_sync;
60static phys_addr_t dram_sync_paddr;
61static u32 dram_sync_size;
62
63/*
64 * The OMAP4 bus structure contains asynchrnous bridges which can buffer
65 * data writes from the MPU. These asynchronous bridges can be found on
66 * paths between the MPU to EMIF, and the MPU to L3 interconnects.
67 *
68 * We need to be careful about re-ordering which can happen as a result
69 * of different accesses being performed via different paths, and
70 * therefore different asynchronous bridges.
71 */
72
73/*
74 * OMAP4 interconnect barrier which is called for each mb() and wmb().
75 * This is to ensure that normal paths to DRAM (normal memory, cacheable
76 * accesses) are properly synchronised with writes to DMA coherent memory
77 * (normal memory, uncacheable) and device writes.
78 *
79 * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
80 * path, as we need to ensure that data is visible to other system
81 * masters prior to writes to those system masters being seen.
82 *
83 * Note: the SRAM path is not synchronised via mb() and wmb().
84 */
85static void omap4_mb(void)
86{
87 if (dram_sync)
88 writel_relaxed(0, dram_sync);
89}
90
91/*
92 * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
93 *
94 * If a data is stalled inside asynchronous bridge because of back
95 * pressure, it may be accepted multiple times, creating pointer
96 * misalignment that will corrupt next transfers on that data path until
97 * next reset of the system. No recovery procedure once the issue is hit,
98 * the path remains consistently broken.
99 *
100 * Async bridges can be found on paths between MPU to EMIF and MPU to L3
101 * interconnects.
102 *
103 * This situation can happen only when the idle is initiated by a Master
104 * Request Disconnection (which is trigged by software when executing WFI
105 * on the CPU).
106 *
107 * The work-around for this errata needs all the initiators connected
108 * through an async bridge to ensure that data path is properly drained
109 * before issuing WFI. This condition will be met if one Strongly ordered
110 * access is performed to the target right before executing the WFI.
111 *
112 * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
113 * IO barrier ensure that there is no synchronisation loss on initiators
114 * operating on both interconnect port simultaneously.
115 *
116 * This is a stronger version of the OMAP4 memory barrier below, and
117 * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
118 * as well, and is necessary prior to executing a WFI.
119 */
120void omap_interconnect_sync(void)
121{
122 if (dram_sync && sram_sync) {
123 writel_relaxed(readl_relaxed(dram_sync), dram_sync);
124 writel_relaxed(readl_relaxed(sram_sync), sram_sync);
125 isb();
126 }
127}
128
129static int __init omap4_sram_init(void)
130{
131 struct device_node *np;
132 struct gen_pool *sram_pool;
133
134 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
135 if (!np)
136 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
137 __func__);
138 sram_pool = of_gen_pool_get(np, "sram", 0);
139 if (!sram_pool)
140 pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
141 __func__);
142 else
143 sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
144
145 return 0;
146}
147omap_arch_initcall(omap4_sram_init);
148
149/* Steal one page physical memory for barrier implementation */
150void __init omap_barrier_reserve_memblock(void)
151{
152 dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
153 dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
154}
155
156void __init omap_barriers_init(void)
157{
158 struct map_desc dram_io_desc[1];
159
160 dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
161 dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
162 dram_io_desc[0].length = dram_sync_size;
163 dram_io_desc[0].type = MT_MEMORY_RW_SO;
164 iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
165 dram_sync = (void __iomem *) dram_io_desc[0].virtual;
166
167 pr_info("OMAP4: Map %pa to %p for dram barrier\n",
168 &dram_sync_paddr, dram_sync);
169
170 soc_mb = omap4_mb;
171}
172
173#endif
174
54void gic_dist_disable(void) 175void gic_dist_disable(void)
55{ 176{
56 if (gic_dist_base_addr) 177 if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index ad1bb9431e94..9b09d85d811a 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume)
333 333
334#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */ 334#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
335 335
336ENTRY(omap_bus_sync)
337 ret lr
338ENDPROC(omap_bus_sync)
339
340ENTRY(omap_do_wfi) 336ENTRY(omap_do_wfi)
341 stmfd sp!, {lr} 337 stmfd sp!, {lr}
338#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
342 /* Drain interconnect write buffers. */ 339 /* Drain interconnect write buffers. */
343 bl omap_bus_sync 340 bl omap_interconnect_sync
341#endif
344 342
345 /* 343 /*
346 * Execute an ISB instruction to ensure that all of the 344 * Execute an ISB instruction to ensure that all of the
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index d99d08eeb966..83e94c95e314 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -16,6 +16,7 @@
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/rtc/sirfsoc_rtciobrg.h> 18#include <linux/rtc/sirfsoc_rtciobrg.h>
19#include <asm/outercache.h>
19#include <asm/suspend.h> 20#include <asm/suspend.h>
20#include <asm/hardware/cache-l2x0.h> 21#include <asm/hardware/cache-l2x0.h>
21 22
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 476092b86c6e..8d27ec546a35 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void);
13extern void shmobile_smp_sleep(void); 13extern void shmobile_smp_sleep(void);
14extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn, 14extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
15 unsigned long arg); 15 unsigned long arg);
16extern int shmobile_smp_cpu_disable(unsigned int cpu); 16extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
17extern void shmobile_boot_scu(void); 17extern void shmobile_boot_scu(void);
18extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus); 18extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
19extern void shmobile_smp_scu_cpu_die(unsigned int cpu); 19extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 3923e09e966d..b23378f3d7e1 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
31} 31}
32 32
33#ifdef CONFIG_HOTPLUG_CPU 33#ifdef CONFIG_HOTPLUG_CPU
34int shmobile_smp_cpu_disable(unsigned int cpu) 34bool shmobile_smp_cpu_can_disable(unsigned int cpu)
35{ 35{
36 return 0; /* Hotplug of any CPU is supported */ 36 return true; /* Hotplug of any CPU is supported */
37} 37}
38#endif 38#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index 2ef0054ce934..4b33d432a364 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = {
64 .smp_prepare_cpus = r8a7790_smp_prepare_cpus, 64 .smp_prepare_cpus = r8a7790_smp_prepare_cpus,
65 .smp_boot_secondary = shmobile_smp_apmu_boot_secondary, 65 .smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
66#ifdef CONFIG_HOTPLUG_CPU 66#ifdef CONFIG_HOTPLUG_CPU
67 .cpu_disable = shmobile_smp_cpu_disable, 67 .cpu_can_disable = shmobile_smp_cpu_can_disable,
68 .cpu_die = shmobile_smp_apmu_cpu_die, 68 .cpu_die = shmobile_smp_apmu_cpu_die,
69 .cpu_kill = shmobile_smp_apmu_cpu_kill, 69 .cpu_kill = shmobile_smp_apmu_cpu_kill,
70#endif 70#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
index 5e2d1db79afa..b2508c0d276b 100644
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = {
58 .smp_prepare_cpus = r8a7791_smp_prepare_cpus, 58 .smp_prepare_cpus = r8a7791_smp_prepare_cpus,
59 .smp_boot_secondary = r8a7791_smp_boot_secondary, 59 .smp_boot_secondary = r8a7791_smp_boot_secondary,
60#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
61 .cpu_disable = shmobile_smp_cpu_disable, 61 .cpu_can_disable = shmobile_smp_cpu_can_disable,
62 .cpu_die = shmobile_smp_apmu_cpu_die, 62 .cpu_die = shmobile_smp_apmu_cpu_die,
63 .cpu_kill = shmobile_smp_apmu_cpu_kill, 63 .cpu_kill = shmobile_smp_apmu_cpu_kill,
64#endif 64#endif
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index d03aa11fb46d..bc2824a036e1 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -60,7 +60,7 @@ struct smp_operations sh73a0_smp_ops __initdata = {
60 .smp_prepare_cpus = sh73a0_smp_prepare_cpus, 60 .smp_prepare_cpus = sh73a0_smp_prepare_cpus,
61 .smp_boot_secondary = sh73a0_boot_secondary, 61 .smp_boot_secondary = sh73a0_boot_secondary,
62#ifdef CONFIG_HOTPLUG_CPU 62#ifdef CONFIG_HOTPLUG_CPU
63 .cpu_disable = shmobile_smp_cpu_disable, 63 .cpu_can_disable = shmobile_smp_cpu_can_disable,
64 .cpu_die = shmobile_smp_scu_cpu_die, 64 .cpu_die = shmobile_smp_scu_cpu_die,
65 .cpu_kill = shmobile_smp_scu_cpu_kill, 65 .cpu_kill = shmobile_smp_scu_cpu_kill,
66#endif 66#endif
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 7557bede7ae6..780bd13cd7e3 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -8,6 +8,7 @@
8#include <linux/of.h> 8#include <linux/of.h>
9#include <linux/of_address.h> 9#include <linux/of_address.h>
10 10
11#include <asm/outercache.h>
11#include <asm/hardware/cache-l2x0.h> 12#include <asm/hardware/cache-l2x0.h>
12 13
13#include "db8500-regs.h" 14#include "db8500-regs.h"
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index ba708ce08616..f80560318c58 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -20,10 +20,10 @@
20#include <linux/mfd/dbx500-prcmu.h> 20#include <linux/mfd/dbx500-prcmu.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/perf/arm_pmu.h>
23#include <linux/regulator/machine.h> 24#include <linux/regulator/machine.h>
24#include <linux/random.h> 25#include <linux/random.h>
25 26
26#include <asm/pmu.h>
27#include <asm/mach/map.h> 27#include <asm/mach/map.h>
28 28
29#include "setup.h" 29#include "setup.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c6b976ab8d3..df7537f12469 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -883,6 +883,7 @@ config OUTER_CACHE
883 883
884config OUTER_CACHE_SYNC 884config OUTER_CACHE_SYNC
885 bool 885 bool
886 select ARM_HEAVY_MB
886 help 887 help
887 The outer cache has a outer_cache_fns.sync function pointer 888 The outer cache has a outer_cache_fns.sync function pointer
888 that can be used to drain the write buffer of the outer cache. 889 that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS
1031 This option allows the use of custom mandatory barriers 1032 This option allows the use of custom mandatory barriers
1032 included via the mach/barriers.h file. 1033 included via the mach/barriers.h file.
1033 1034
1035config ARM_HEAVY_MB
1036 bool
1037
1034config ARCH_SUPPORTS_BIG_ENDIAN 1038config ARCH_SUPPORTS_BIG_ENDIAN
1035 bool 1039 bool
1036 help 1040 help
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4aba9..b3b31e30cadd 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
19 mrc p15, 0, r1, c5, c0, 0 @ get FSR 19 mrc p15, 0, r1, c5, c0, 0 @ get FSR
20 mrc p15, 0, r0, c6, c0, 0 @ get FAR 20 mrc p15, 0, r0, c6, c0, 0 @ get FAR
21 ldr r3, [r4] @ read aborted ARM instruction 21 ldr r3, [r4] @ read aborted ARM instruction
22 uaccess_disable ip @ disable userspace access
22 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR 23 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
23 tst r3, #1 << 20 @ L = 1 -> write? 24 tst r3, #1 << 20 @ L = 1 -> write?
24 orreq r1, r1, #1 << 11 @ yes. 25 orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4653a3..a6a381a6caa5 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
21 mrc p15, 0, r0, c6, c0, 0 @ get FAR 21 mrc p15, 0, r0, c6, c0, 0 @ get FAR
22 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 22 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
23 ldreq r3, [r4] @ read aborted ARM instruction 23 ldreq r3, [r4] @ read aborted ARM instruction
24 uaccess_disable ip @ disable user access
24 bic r1, r1, #1 << 11 @ clear bits 11 of FSR 25 bic r1, r1, #1 << 11 @ clear bits 11 of FSR
25 do_ldrd_abort tmp=ip, insn=r3 26 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
27 beq do_DataAbort @ yes
26 tst r3, #1 << 20 @ check write 28 tst r3, #1 << 20 @ check write
27 orreq r1, r1, #1 << 11 29 orreq r1, r1, #1 << 11
28 b do_DataAbort 30 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a61264..00ab011bef58 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
24 bne do_DataAbort 24 bne do_DataAbort
25 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 25 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
26 ldreq r3, [r4] @ read aborted ARM instruction 26 ldreq r3, [r4] @ read aborted ARM instruction
27 do_ldrd_abort tmp=ip, insn=r3 27 uaccess_disable ip @ disable userspace access
28 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
29 beq do_DataAbort @ yes
28 tst r3, #1 << 20 @ L = 0 -> write 30 tst r3, #1 << 20 @ L = 0 -> write
29 orreq r1, r1, #1 << 11 @ yes. 31 orreq r1, r1, #1 << 11 @ yes.
30 b do_DataAbort 32 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c22a33..8801a15aa105 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
26 ldr ip, =0x4107b36 26 ldr ip, =0x4107b36
27 mrc p15, 0, r3, c0, c0, 0 @ get processor id 27 mrc p15, 0, r3, c0, c0, 0 @ get processor id
28 teq ip, r3, lsr #4 @ r0 ARM1136? 28 teq ip, r3, lsr #4 @ r0 ARM1136?
29 bne do_DataAbort 29 bne 1f
30 tst r5, #PSR_J_BIT @ Java? 30 tst r5, #PSR_J_BIT @ Java?
31 tsteq r5, #PSR_T_BIT @ Thumb? 31 tsteq r5, #PSR_T_BIT @ Thumb?
32 bne do_DataAbort 32 bne 1f
33 bic r1, r1, #1 << 11 @ clear bit 11 of FSR 33 bic r1, r1, #1 << 11 @ clear bit 11 of FSR
34 ldr r3, [r4] @ read aborted ARM instruction 34 ldr r3, [r4] @ read aborted ARM instruction
35 ARM_BE8(rev r3, r3) 35 ARM_BE8(rev r3, r3)
36 36
37 do_ldrd_abort tmp=ip, insn=r3 37 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
38 beq 1f @ yes
38 tst r3, #1 << 20 @ L = 0 -> write 39 tst r3, #1 << 20 @ L = 0 -> write
39 orreq r1, r1, #1 << 11 @ yes. 40 orreq r1, r1, #1 << 11 @ yes.
40#endif 41#endif
421: uaccess_disable ip @ disable userspace access
41 b do_DataAbort 43 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad054214..e8d0e08c227f 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
15ENTRY(v7_early_abort) 15ENTRY(v7_early_abort)
16 mrc p15, 0, r1, c5, c0, 0 @ get FSR 16 mrc p15, 0, r1, c5, c0, 0 @ get FSR
17 mrc p15, 0, r0, c6, c0, 0 @ get FAR 17 mrc p15, 0, r0, c6, c0, 0 @ get FAR
18 uaccess_disable ip @ disable userspace access
18 19
19 /* 20 /*
20 * V6 code adjusts the returned DFSR. 21 * V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f3982580c273..6d8e8e3365d1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
26#endif 26#endif
27 bne .data_thumb_abort 27 bne .data_thumb_abort
28 ldr r8, [r4] @ read arm instruction 28 ldr r8, [r4] @ read arm instruction
29 uaccess_disable ip @ disable userspace access
29 tst r8, #1 << 20 @ L = 1 -> write? 30 tst r8, #1 << 20 @ L = 1 -> write?
30 orreq r1, r1, #1 << 11 @ yes. 31 orreq r1, r1, #1 << 11 @ yes.
31 and r7, r8, #15 << 24 32 and r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
155 156
156.data_thumb_abort: 157.data_thumb_abort:
157 ldrh r8, [r4] @ read instruction 158 ldrh r8, [r4] @ read instruction
159 uaccess_disable ip @ disable userspace access
158 tst r8, #1 << 11 @ L = 1 -> write? 160 tst r8, #1 << 11 @ L = 1 -> write?
159 orreq r1, r1, #1 << 8 @ yes 161 orreq r1, r1, #1 << 8 @ yes
160 and r7, r8, #15 << 12 162 and r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68ef0e83..4509bee4e081 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
13 tst \psr, #PSR_T_BIT 13 tst \psr, #PSR_T_BIT
14 beq not_thumb 14 beq not_thumb
15 ldrh \tmp, [\pc] @ Read aborted Thumb instruction 15 ldrh \tmp, [\pc] @ Read aborted Thumb instruction
16 uaccess_disable ip @ disable userspace access
16 and \tmp, \tmp, # 0xfe00 @ Mask opcode field 17 and \tmp, \tmp, # 0xfe00 @ Mask opcode field
17 cmp \tmp, # 0x5600 @ Is it ldrsb? 18 cmp \tmp, # 0x5600 @ Is it ldrsb?
18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes 19 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
29 * [7:4] == 1101 30 * [7:4] == 1101
30 * [20] == 0 31 * [20] == 0
31 */ 32 */
32 .macro do_ldrd_abort, tmp, insn 33 .macro teq_ldrd, tmp, insn
33 tst \insn, #0x0e100000 @ [27:25,20] == 0 34 mov \tmp, #0x0e100000
34 bne not_ldrd 35 orr \tmp, #0x000000f0
35 and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 36 and \tmp, \insn, \tmp
36 cmp \tmp, #0x000000d0 37 teq \tmp, #0x000000d0
37 beq do_DataAbort
38not_ldrd:
39 .endm 38 .endm
40
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 097181e08c25..5c1b7a7b9af6 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -368,7 +368,6 @@ int __init feroceon_of_init(void)
368 struct device_node *node; 368 struct device_node *node;
369 void __iomem *base; 369 void __iomem *base;
370 bool l2_wt_override = false; 370 bool l2_wt_override = false;
371 struct resource res;
372 371
373#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 372#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
374 l2_wt_override = true; 373 l2_wt_override = true;
@@ -376,10 +375,7 @@ int __init feroceon_of_init(void)
376 375
377 node = of_find_matching_node(NULL, feroceon_ids); 376 node = of_find_matching_node(NULL, feroceon_ids);
378 if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) { 377 if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
379 if (of_address_to_resource(node, 0, &res)) 378 base = of_iomap(node, 0);
380 return -ENODEV;
381
382 base = ioremap(res.start, resource_size(&res));
383 if (!base) 379 if (!base)
384 return -ENOMEM; 380 return -ENOMEM;
385 381
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 71b3d3309024..493692d838c6 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np,
1171 } 1171 }
1172 } 1172 }
1173 1173
1174 if (of_property_read_bool(np, "arm,shared-override")) {
1175 *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1176 *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1177 }
1178
1174 prefetch = l2x0_saved_regs.prefetch_ctrl; 1179 prefetch = l2x0_saved_regs.prefetch_ctrl;
1175 1180
1176 ret = of_property_read_u32(np, "arm,double-linefill", &val); 1181 ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 3d3d6aa60c87..bf35abcc7d59 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -39,6 +39,7 @@
39#include <asm/system_info.h> 39#include <asm/system_info.h>
40#include <asm/dma-contiguous.h> 40#include <asm/dma-contiguous.h>
41 41
42#include "dma.h"
42#include "mm.h" 43#include "mm.h"
43 44
44/* 45/*
@@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
648 size = PAGE_ALIGN(size); 649 size = PAGE_ALIGN(size);
649 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 650 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
650 651
651 if (is_coherent || nommu()) 652 if (nommu())
653 addr = __alloc_simple_buffer(dev, size, gfp, &page);
654 else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
655 addr = __alloc_from_contiguous(dev, size, prot, &page,
656 caller, want_vaddr);
657 else if (is_coherent)
652 addr = __alloc_simple_buffer(dev, size, gfp, &page); 658 addr = __alloc_simple_buffer(dev, size, gfp, &page);
653 else if (!(gfp & __GFP_WAIT)) 659 else if (!(gfp & __GFP_WAIT))
654 addr = __alloc_from_pool(size, &page); 660 addr = __alloc_from_pool(size, &page);
655 else if (!dev_get_cma_area(dev))
656 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
657 else 661 else
658 addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); 662 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
663 caller, want_vaddr);
659 664
660 if (page) 665 if (page)
661 *handle = pfn_to_dma(dev, page_to_pfn(page)); 666 *handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
683static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 688static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
684 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 689 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
685{ 690{
686 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
687 void *memory; 691 void *memory;
688 692
689 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 693 if (dma_alloc_from_coherent(dev, size, handle, &memory))
690 return memory; 694 return memory;
691 695
692 return __dma_alloc(dev, size, handle, gfp, prot, true, 696 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
693 attrs, __builtin_return_address(0)); 697 attrs, __builtin_return_address(0));
694} 698}
695 699
@@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
753 757
754 size = PAGE_ALIGN(size); 758 size = PAGE_ALIGN(size);
755 759
756 if (is_coherent || nommu()) { 760 if (nommu()) {
757 __dma_free_buffer(page, size); 761 __dma_free_buffer(page, size);
758 } else if (__free_from_pool(cpu_addr, size)) { 762 } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
759 return; 763 return;
760 } else if (!dev_get_cma_area(dev)) { 764 } else if (!dev_get_cma_area(dev)) {
761 if (want_vaddr) 765 if (want_vaddr && !is_coherent)
762 __dma_free_remap(cpu_addr, size); 766 __dma_free_remap(cpu_addr, size);
763 __dma_free_buffer(page, size); 767 __dma_free_buffer(page, size);
764 } else { 768 } else {
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644
index 000000000000..70ea6852f94e
--- /dev/null
+++ b/arch/arm/mm/dma.h
@@ -0,0 +1,32 @@
1#ifndef DMA_H
2#define DMA_H
3
4#include <asm/glue-cache.h>
5
6#ifndef MULTI_CACHE
7#define dmac_map_area __glue(_CACHE,_dma_map_area)
8#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
9
10/*
11 * These are private to the dma-mapping API. Do not use directly.
12 * Their sole purpose is to ensure that data held in the cache
13 * is visible to DMA, or data written by DMA to system memory is
14 * visible to the CPU.
15 */
16extern void dmac_map_area(const void *, size_t, int);
17extern void dmac_unmap_area(const void *, size_t, int);
18
19#else
20
21/*
22 * These are private to the dma-mapping API. Do not use directly.
23 * Their sole purpose is to ensure that data held in the cache
24 * is visible to DMA, or data written by DMA to system memory is
25 * visible to the CPU.
26 */
27#define dmac_map_area cpu_cache.dma_map_area
28#define dmac_unmap_area cpu_cache.dma_unmap_area
29
30#endif
31
32#endif
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 34b66af516ea..1ec8e7590fc6 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -21,6 +21,21 @@
21 21
22#include "mm.h" 22#include "mm.h"
23 23
24#ifdef CONFIG_ARM_HEAVY_MB
25void (*soc_mb)(void);
26
27void arm_heavy_mb(void)
28{
29#ifdef CONFIG_OUTER_CACHE_SYNC
30 if (outer_cache.sync)
31 outer_cache.sync();
32#endif
33 if (soc_mb)
34 soc_mb();
35}
36EXPORT_SYMBOL(arm_heavy_mb);
37#endif
38
24#ifdef CONFIG_CPU_CACHE_VIPT 39#ifdef CONFIG_CPU_CACHE_VIPT
25 40
26static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 41static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa793989..9df5f09585ca 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
79 79
80 type = kmap_atomic_idx_push(); 80 type = kmap_atomic_idx_push();
81 81
82 idx = type + KM_TYPE_NR * smp_processor_id(); 82 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
83 vaddr = __fix_to_virt(idx); 83 vaddr = __fix_to_virt(idx);
84#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
85 /* 85 /*
@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
106 106
107 if (kvaddr >= (void *)FIXADDR_START) { 107 if (kvaddr >= (void *)FIXADDR_START) {
108 type = kmap_atomic_idx(); 108 type = kmap_atomic_idx();
109 idx = type + KM_TYPE_NR * smp_processor_id(); 109 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
110 110
111 if (cache_is_vivt()) 111 if (cache_is_vivt())
112 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 112 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
138 return page_address(page); 138 return page_address(page);
139 139
140 type = kmap_atomic_idx_push(); 140 type = kmap_atomic_idx_push();
141 idx = type + KM_TYPE_NR * smp_processor_id(); 141 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
142 vaddr = __fix_to_virt(idx); 142 vaddr = __fix_to_virt(idx);
143#ifdef CONFIG_DEBUG_HIGHMEM 143#ifdef CONFIG_DEBUG_HIGHMEM
144 BUG_ON(!pte_none(get_fixmap_pte(vaddr))); 144 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 870838a46d52..7cd15143a507 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
292 L_PTE_RDONLY, 292 L_PTE_RDONLY,
293 .prot_l1 = PMD_TYPE_TABLE, 293 .prot_l1 = PMD_TYPE_TABLE,
294 .domain = DOMAIN_USER, 294 .domain = DOMAIN_VECTORS,
295 }, 295 },
296 [MT_HIGH_VECTORS] = { 296 [MT_HIGH_VECTORS] = {
297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
298 L_PTE_USER | L_PTE_RDONLY, 298 L_PTE_USER | L_PTE_RDONLY,
299 .prot_l1 = PMD_TYPE_TABLE, 299 .prot_l1 = PMD_TYPE_TABLE,
300 .domain = DOMAIN_USER, 300 .domain = DOMAIN_VECTORS,
301 }, 301 },
302 [MT_MEMORY_RWX] = { 302 [MT_MEMORY_RWX] = {
303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
357} 357}
358EXPORT_SYMBOL(get_mem_type); 358EXPORT_SYMBOL(get_mem_type);
359 359
360static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
361
362static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
363 __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
364
365static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
366{
367 return &bm_pte[pte_index(addr)];
368}
369
370static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
371{
372 return pte_offset_kernel(dir, addr);
373}
374
375static inline pmd_t * __init fixmap_pmd(unsigned long addr)
376{
377 pgd_t *pgd = pgd_offset_k(addr);
378 pud_t *pud = pud_offset(pgd, addr);
379 pmd_t *pmd = pmd_offset(pud, addr);
380
381 return pmd;
382}
383
384void __init early_fixmap_init(void)
385{
386 pmd_t *pmd;
387
388 /*
389 * The early fixmap range spans multiple pmds, for which
390 * we are not prepared:
391 */
392 BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
393 != FIXADDR_TOP >> PMD_SHIFT);
394
395 pmd = fixmap_pmd(FIXADDR_TOP);
396 pmd_populate_kernel(&init_mm, pmd, bm_pte);
397
398 pte_offset_fixmap = pte_offset_early_fixmap;
399}
400
360/* 401/*
361 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). 402 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
362 * As a result, this can only be called with preemption disabled, as under 403 * As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
365void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 406void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
366{ 407{
367 unsigned long vaddr = __fix_to_virt(idx); 408 unsigned long vaddr = __fix_to_virt(idx);
368 pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); 409 pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
369 410
370 /* Make sure fixmap region does not exceed available allocation. */ 411 /* Make sure fixmap region does not exceed available allocation. */
371 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > 412 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
855 } 896 }
856 897
857 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 898 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
858 md->virtual >= PAGE_OFFSET && 899 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
859 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { 900 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
860 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", 901 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
861 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 902 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1219,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void)
1219 1260
1220/* 1261/*
1221 * Set up the device mappings. Since we clear out the page tables for all 1262 * Set up the device mappings. Since we clear out the page tables for all
1222 * mappings above VMALLOC_START, we will remove any debug device mappings. 1263 * mappings above VMALLOC_START, except early fixmap, we might remove debug
1223 * This means you have to be careful how you debug this function, or any 1264 * device mappings. This means earlycon can be used to debug this function
1224 * called function. This means you can't use any function or debugging 1265 * Any other function or debugging method which may touch any device _will_
1225 * method which may touch any device, otherwise the kernel _will_ crash. 1266 * crash the kernel.
1226 */ 1267 */
1227static void __init devicemaps_init(const struct machine_desc *mdesc) 1268static void __init devicemaps_init(const struct machine_desc *mdesc)
1228{ 1269{
@@ -1237,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
1237 1278
1238 early_trap_init(vectors); 1279 early_trap_init(vectors);
1239 1280
1240 for (addr = VMALLOC_START; addr; addr += PMD_SIZE) 1281 /*
1282 * Clear page table except top pmd used by early fixmaps
1283 */
1284 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
1241 pmd_clear(pmd_off_k(addr)); 1285 pmd_clear(pmd_off_k(addr));
1242 1286
1243 /* 1287 /*
@@ -1489,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
1489 1533
1490#endif 1534#endif
1491 1535
1536static void __init early_fixmap_shutdown(void)
1537{
1538 int i;
1539 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
1540
1541 pte_offset_fixmap = pte_offset_late_fixmap;
1542 pmd_clear(fixmap_pmd(va));
1543 local_flush_tlb_kernel_page(va);
1544
1545 for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
1546 pte_t *pte;
1547 struct map_desc map;
1548
1549 map.virtual = fix_to_virt(i);
1550 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
1551
1552 /* Only i/o device mappings are supported ATM */
1553 if (pte_none(*pte) ||
1554 (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
1555 continue;
1556
1557 map.pfn = pte_pfn(*pte);
1558 map.type = MT_DEVICE;
1559 map.length = PAGE_SIZE;
1560
1561 create_mapping(&map);
1562 }
1563}
1564
1492/* 1565/*
1493 * paging_init() sets up the page tables, initialises the zone memory 1566 * paging_init() sets up the page tables, initialises the zone memory
1494 * maps, and sets up the zero page, bad page and bad page tables. 1567 * maps, and sets up the zero page, bad page and bad page tables.
@@ -1502,6 +1575,7 @@ void __init paging_init(const struct machine_desc *mdesc)
1502 map_lowmem(); 1575 map_lowmem();
1503 memblock_set_current_limit(arm_lowmem_limit); 1576 memblock_set_current_limit(arm_lowmem_limit);
1504 dma_contiguous_remap(); 1577 dma_contiguous_remap();
1578 early_fixmap_shutdown();
1505 devicemaps_init(mdesc); 1579 devicemaps_init(mdesc);
1506 kmap_init(); 1580 kmap_init();
1507 tcm_init(); 1581 tcm_init();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f11dd9f..e683db1b90a3 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
84 if (!new_pte) 84 if (!new_pte)
85 goto no_pte; 85 goto no_pte;
86 86
87#ifndef CONFIG_ARM_LPAE
88 /*
89 * Modify the PTE pointer to have the correct domain. This
90 * needs to be the vectors domain to avoid the low vectors
91 * being unmapped.
92 */
93 pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
94 pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
95#endif
96
87 init_pud = pud_offset(init_pgd, 0); 97 init_pud = pud_offset(init_pgd, 0);
88 init_pmd = pmd_offset(init_pud, 0); 98 init_pmd = pmd_offset(init_pud, 0);
89 init_pte = pte_offset_map(init_pmd, 0); 99 init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b7b9ceaa684a..51832ad33fa9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -20,6 +20,7 @@ config ARM64
20 select ARM_GIC_V2M if PCI_MSI 20 select ARM_GIC_V2M if PCI_MSI
21 select ARM_GIC_V3 21 select ARM_GIC_V3
22 select ARM_GIC_V3_ITS if PCI_MSI 22 select ARM_GIC_V3_ITS if PCI_MSI
23 select ARM_PSCI_FW
23 select BUILDTIME_EXTABLE_SORT 24 select BUILDTIME_EXTABLE_SORT
24 select CLONE_BACKWARDS 25 select CLONE_BACKWARDS
25 select COMMON_CLK 26 select COMMON_CLK
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 406485ed110a..208cec08a74f 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,11 +12,11 @@
12#ifndef _ASM_ACPI_H 12#ifndef _ASM_ACPI_H
13#define _ASM_ACPI_H 13#define _ASM_ACPI_H
14 14
15#include <linux/mm.h>
16#include <linux/irqchip/arm-gic-acpi.h> 15#include <linux/irqchip/arm-gic-acpi.h>
16#include <linux/mm.h>
17#include <linux/psci.h>
17 18
18#include <asm/cputype.h> 19#include <asm/cputype.h>
19#include <asm/psci.h>
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21 21
22/* Macros for consistency checks of the GICC subtable of MADT */ 22/* Macros for consistency checks of the GICC subtable of MADT */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
deleted file mode 100644
index 49d7e1aaebdc..000000000000
--- a/arch/arm64/include/asm/psci.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2013 ARM Limited
12 */
13
14#ifndef __ASM_PSCI_H
15#define __ASM_PSCI_H
16
17int __init psci_dt_init(void);
18
19#ifdef CONFIG_ACPI
20int __init psci_acpi_init(void);
21bool __init acpi_psci_present(void);
22bool __init acpi_psci_use_hvc(void);
23#else
24static inline int psci_acpi_init(void) { return 0; }
25static inline bool acpi_psci_present(void) { return false; }
26#endif
27
28#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 869f202748e8..51fd15a16461 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -18,23 +18,17 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/reboot.h>
22#include <linux/pm.h>
23#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/psci.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
24
25#include <uapi/linux/psci.h> 25#include <uapi/linux/psci.h>
26 26
27#include <asm/compiler.h> 27#include <asm/compiler.h>
28#include <asm/cputype.h>
29#include <asm/cpu_ops.h> 28#include <asm/cpu_ops.h>
30#include <asm/errno.h> 29#include <asm/errno.h>
31#include <asm/psci.h>
32#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
33#include <asm/suspend.h> 31#include <asm/suspend.h>
34#include <asm/system_misc.h>
35
36#define PSCI_POWER_STATE_TYPE_STANDBY 0
37#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
38 32
39static bool psci_power_state_loses_context(u32 state) 33static bool psci_power_state_loses_context(u32 state)
40{ 34{
@@ -50,122 +44,8 @@ static bool psci_power_state_is_valid(u32 state)
50 return !(state & ~valid_mask); 44 return !(state & ~valid_mask);
51} 45}
52 46
53/*
54 * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
55 * calls to its resident CPU, so we must avoid issuing those. We never migrate
56 * a Trusted OS even if it claims to be capable of migration -- doing so will
57 * require cooperation with a Trusted OS driver.
58 */
59static int resident_cpu = -1;
60
61struct psci_operations {
62 int (*cpu_suspend)(u32 state, unsigned long entry_point);
63 int (*cpu_off)(u32 state);
64 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
65 int (*migrate)(unsigned long cpuid);
66 int (*affinity_info)(unsigned long target_affinity,
67 unsigned long lowest_affinity_level);
68 int (*migrate_info_type)(void);
69};
70
71static struct psci_operations psci_ops;
72
73typedef unsigned long (psci_fn)(unsigned long, unsigned long,
74 unsigned long, unsigned long);
75asmlinkage psci_fn __invoke_psci_fn_hvc;
76asmlinkage psci_fn __invoke_psci_fn_smc;
77static psci_fn *invoke_psci_fn;
78
79enum psci_function {
80 PSCI_FN_CPU_SUSPEND,
81 PSCI_FN_CPU_ON,
82 PSCI_FN_CPU_OFF,
83 PSCI_FN_MIGRATE,
84 PSCI_FN_MAX,
85};
86
87static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); 47static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
88 48
89static u32 psci_function_id[PSCI_FN_MAX];
90
91static int psci_to_linux_errno(int errno)
92{
93 switch (errno) {
94 case PSCI_RET_SUCCESS:
95 return 0;
96 case PSCI_RET_NOT_SUPPORTED:
97 return -EOPNOTSUPP;
98 case PSCI_RET_INVALID_PARAMS:
99 return -EINVAL;
100 case PSCI_RET_DENIED:
101 return -EPERM;
102 };
103
104 return -EINVAL;
105}
106
107static u32 psci_get_version(void)
108{
109 return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
110}
111
112static int psci_cpu_suspend(u32 state, unsigned long entry_point)
113{
114 int err;
115 u32 fn;
116
117 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
118 err = invoke_psci_fn(fn, state, entry_point, 0);
119 return psci_to_linux_errno(err);
120}
121
122static int psci_cpu_off(u32 state)
123{
124 int err;
125 u32 fn;
126
127 fn = psci_function_id[PSCI_FN_CPU_OFF];
128 err = invoke_psci_fn(fn, state, 0, 0);
129 return psci_to_linux_errno(err);
130}
131
132static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
133{
134 int err;
135 u32 fn;
136
137 fn = psci_function_id[PSCI_FN_CPU_ON];
138 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
139 return psci_to_linux_errno(err);
140}
141
142static int psci_migrate(unsigned long cpuid)
143{
144 int err;
145 u32 fn;
146
147 fn = psci_function_id[PSCI_FN_MIGRATE];
148 err = invoke_psci_fn(fn, cpuid, 0, 0);
149 return psci_to_linux_errno(err);
150}
151
152static int psci_affinity_info(unsigned long target_affinity,
153 unsigned long lowest_affinity_level)
154{
155 return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
156 lowest_affinity_level, 0);
157}
158
159static int psci_migrate_info_type(void)
160{
161 return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
162}
163
164static unsigned long psci_migrate_info_up_cpu(void)
165{
166 return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
167}
168
169static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) 49static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
170{ 50{
171 int i, ret, count = 0; 51 int i, ret, count = 0;
@@ -230,238 +110,6 @@ free_mem:
230 return ret; 110 return ret;
231} 111}
232 112
233static int get_set_conduit_method(struct device_node *np)
234{
235 const char *method;
236
237 pr_info("probing for conduit method from DT.\n");
238
239 if (of_property_read_string(np, "method", &method)) {
240 pr_warn("missing \"method\" property\n");
241 return -ENXIO;
242 }
243
244 if (!strcmp("hvc", method)) {
245 invoke_psci_fn = __invoke_psci_fn_hvc;
246 } else if (!strcmp("smc", method)) {
247 invoke_psci_fn = __invoke_psci_fn_smc;
248 } else {
249 pr_warn("invalid \"method\" property: %s\n", method);
250 return -EINVAL;
251 }
252 return 0;
253}
254
255static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
256{
257 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
258}
259
260static void psci_sys_poweroff(void)
261{
262 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
263}
264
265/*
266 * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
267 * return DENIED (which would be fatal).
268 */
269static void __init psci_init_migrate(void)
270{
271 unsigned long cpuid;
272 int type, cpu;
273
274 type = psci_ops.migrate_info_type();
275
276 if (type == PSCI_0_2_TOS_MP) {
277 pr_info("Trusted OS migration not required\n");
278 return;
279 }
280
281 if (type == PSCI_RET_NOT_SUPPORTED) {
282 pr_info("MIGRATE_INFO_TYPE not supported.\n");
283 return;
284 }
285
286 if (type != PSCI_0_2_TOS_UP_MIGRATE &&
287 type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
288 pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
289 return;
290 }
291
292 cpuid = psci_migrate_info_up_cpu();
293 if (cpuid & ~MPIDR_HWID_BITMASK) {
294 pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
295 cpuid);
296 return;
297 }
298
299 cpu = get_logical_index(cpuid);
300 resident_cpu = cpu >= 0 ? cpu : -1;
301
302 pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
303}
304
305static void __init psci_0_2_set_functions(void)
306{
307 pr_info("Using standard PSCI v0.2 function IDs\n");
308 psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
309 psci_ops.cpu_suspend = psci_cpu_suspend;
310
311 psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
312 psci_ops.cpu_off = psci_cpu_off;
313
314 psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
315 psci_ops.cpu_on = psci_cpu_on;
316
317 psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
318 psci_ops.migrate = psci_migrate;
319
320 psci_ops.affinity_info = psci_affinity_info;
321
322 psci_ops.migrate_info_type = psci_migrate_info_type;
323
324 arm_pm_restart = psci_sys_reset;
325
326 pm_power_off = psci_sys_poweroff;
327}
328
329/*
330 * Probe function for PSCI firmware versions >= 0.2
331 */
332static int __init psci_probe(void)
333{
334 u32 ver = psci_get_version();
335
336 pr_info("PSCIv%d.%d detected in firmware.\n",
337 PSCI_VERSION_MAJOR(ver),
338 PSCI_VERSION_MINOR(ver));
339
340 if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
341 pr_err("Conflicting PSCI version detected.\n");
342 return -EINVAL;
343 }
344
345 psci_0_2_set_functions();
346
347 psci_init_migrate();
348
349 return 0;
350}
351
352typedef int (*psci_initcall_t)(const struct device_node *);
353
354/*
355 * PSCI init function for PSCI versions >=0.2
356 *
357 * Probe based on PSCI PSCI_VERSION function
358 */
359static int __init psci_0_2_init(struct device_node *np)
360{
361 int err;
362
363 err = get_set_conduit_method(np);
364
365 if (err)
366 goto out_put_node;
367 /*
368 * Starting with v0.2, the PSCI specification introduced a call
369 * (PSCI_VERSION) that allows probing the firmware version, so
370 * that PSCI function IDs and version specific initialization
371 * can be carried out according to the specific version reported
372 * by firmware
373 */
374 err = psci_probe();
375
376out_put_node:
377 of_node_put(np);
378 return err;
379}
380
381/*
382 * PSCI < v0.2 get PSCI Function IDs via DT.
383 */
384static int __init psci_0_1_init(struct device_node *np)
385{
386 u32 id;
387 int err;
388
389 err = get_set_conduit_method(np);
390
391 if (err)
392 goto out_put_node;
393
394 pr_info("Using PSCI v0.1 Function IDs from DT\n");
395
396 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
397 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
398 psci_ops.cpu_suspend = psci_cpu_suspend;
399 }
400
401 if (!of_property_read_u32(np, "cpu_off", &id)) {
402 psci_function_id[PSCI_FN_CPU_OFF] = id;
403 psci_ops.cpu_off = psci_cpu_off;
404 }
405
406 if (!of_property_read_u32(np, "cpu_on", &id)) {
407 psci_function_id[PSCI_FN_CPU_ON] = id;
408 psci_ops.cpu_on = psci_cpu_on;
409 }
410
411 if (!of_property_read_u32(np, "migrate", &id)) {
412 psci_function_id[PSCI_FN_MIGRATE] = id;
413 psci_ops.migrate = psci_migrate;
414 }
415
416out_put_node:
417 of_node_put(np);
418 return err;
419}
420
421static const struct of_device_id psci_of_match[] __initconst = {
422 { .compatible = "arm,psci", .data = psci_0_1_init},
423 { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
424 {},
425};
426
427int __init psci_dt_init(void)
428{
429 struct device_node *np;
430 const struct of_device_id *matched_np;
431 psci_initcall_t init_fn;
432
433 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
434
435 if (!np)
436 return -ENODEV;
437
438 init_fn = (psci_initcall_t)matched_np->data;
439 return init_fn(np);
440}
441
442#ifdef CONFIG_ACPI
443/*
444 * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
445 * explicitly clarified in SBBR
446 */
447int __init psci_acpi_init(void)
448{
449 if (!acpi_psci_present()) {
450 pr_info("is not implemented in ACPI.\n");
451 return -EOPNOTSUPP;
452 }
453
454 pr_info("probing for conduit method from ACPI.\n");
455
456 if (acpi_psci_use_hvc())
457 invoke_psci_fn = __invoke_psci_fn_hvc;
458 else
459 invoke_psci_fn = __invoke_psci_fn_smc;
460
461 return psci_probe();
462}
463#endif
464
465#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
466 114
467static int __init cpu_psci_cpu_init(unsigned int cpu) 115static int __init cpu_psci_cpu_init(unsigned int cpu)
@@ -489,11 +137,6 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
489} 137}
490 138
491#ifdef CONFIG_HOTPLUG_CPU 139#ifdef CONFIG_HOTPLUG_CPU
492static bool psci_tos_resident_on(int cpu)
493{
494 return cpu == resident_cpu;
495}
496
497static int cpu_psci_cpu_disable(unsigned int cpu) 140static int cpu_psci_cpu_disable(unsigned int cpu)
498{ 141{
499 /* Fail early if we don't have CPU_OFF support */ 142 /* Fail early if we don't have CPU_OFF support */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 926ae8d9abc5..fdc11f05ac36 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
45#include <linux/of_platform.h> 45#include <linux/of_platform.h>
46#include <linux/efi.h> 46#include <linux/efi.h>
47#include <linux/personality.h> 47#include <linux/personality.h>
48#include <linux/psci.h>
48 49
49#include <asm/acpi.h> 50#include <asm/acpi.h>
50#include <asm/fixmap.h> 51#include <asm/fixmap.h>
@@ -60,7 +61,6 @@
60#include <asm/tlbflush.h> 61#include <asm/tlbflush.h>
61#include <asm/traps.h> 62#include <asm/traps.h>
62#include <asm/memblock.h> 63#include <asm/memblock.h>
63#include <asm/psci.h>
64#include <asm/efi.h> 64#include <asm/efi.h>
65#include <asm/virt.h> 65#include <asm/virt.h>
66#include <asm/xen/hypervisor.h> 66#include <asm/xen/hypervisor.h>