aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/armksyms.c8
-rw-r--r--arch/arm/kernel/entry-header.S29
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/module.c1
-rw-r--r--arch/arm/kernel/perf_event_cpu.c14
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/swp_emulate.c15
-rw-r--r--arch/arm/kernel/thumbee.c2
-rw-r--r--arch/arm/kernel/traps.c17
9 files changed, 33 insertions, 57 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index f7b450f97e68..a88671cfe1ff 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user);
98EXPORT_SYMBOL(__get_user_1); 98EXPORT_SYMBOL(__get_user_1);
99EXPORT_SYMBOL(__get_user_2); 99EXPORT_SYMBOL(__get_user_2);
100EXPORT_SYMBOL(__get_user_4); 100EXPORT_SYMBOL(__get_user_4);
101EXPORT_SYMBOL(__get_user_8);
102
103#ifdef __ARMEB__
104EXPORT_SYMBOL(__get_user_64t_1);
105EXPORT_SYMBOL(__get_user_64t_2);
106EXPORT_SYMBOL(__get_user_64t_4);
107EXPORT_SYMBOL(__get_user_32t_8);
108#endif
101 109
102EXPORT_SYMBOL(__put_user_1); 110EXPORT_SYMBOL(__put_user_1);
103EXPORT_SYMBOL(__put_user_2); 111EXPORT_SYMBOL(__put_user_2);
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 8db307d0954b..2fdf8679b46e 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -208,26 +208,21 @@
208#endif 208#endif
209 .endif 209 .endif
210 msr spsr_cxsf, \rpsr 210 msr spsr_cxsf, \rpsr
211#if defined(CONFIG_CPU_V6) 211#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
212 ldr r0, [sp] 212 @ We must avoid clrex due to Cortex-A15 erratum #830321
213 strex r1, r2, [sp] @ clear the exclusive monitor 213 sub r0, sp, #4 @ uninhabited address
214 ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr 214 strex r1, r2, [r0] @ clear the exclusive monitor
215#elif defined(CONFIG_CPU_32v6K)
216 clrex @ clear the exclusive monitor
217 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
218#else
219 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
220#endif 215#endif
216 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
221 .endm 217 .endm
222 218
223 .macro restore_user_regs, fast = 0, offset = 0 219 .macro restore_user_regs, fast = 0, offset = 0
224 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 220 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
225 ldr lr, [sp, #\offset + S_PC]! @ get pc 221 ldr lr, [sp, #\offset + S_PC]! @ get pc
226 msr spsr_cxsf, r1 @ save in spsr_svc 222 msr spsr_cxsf, r1 @ save in spsr_svc
227#if defined(CONFIG_CPU_V6) 223#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
224 @ We must avoid clrex due to Cortex-A15 erratum #830321
228 strex r1, r2, [sp] @ clear the exclusive monitor 225 strex r1, r2, [sp] @ clear the exclusive monitor
229#elif defined(CONFIG_CPU_32v6K)
230 clrex @ clear the exclusive monitor
231#endif 226#endif
232 .if \fast 227 .if \fast
233 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr 228 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
@@ -261,7 +256,10 @@
261 .endif 256 .endif
262 ldr lr, [sp, #S_SP] @ top of the stack 257 ldr lr, [sp, #S_SP] @ top of the stack
263 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc 258 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
264 clrex @ clear the exclusive monitor 259
260 @ We must avoid clrex due to Cortex-A15 erratum #830321
261 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
262
265 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context 263 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
266 ldmia sp, {r0 - r12} 264 ldmia sp, {r0 - r12}
267 mov sp, lr 265 mov sp, lr
@@ -282,13 +280,16 @@
282 .endm 280 .endm
283#else /* ifdef CONFIG_CPU_V7M */ 281#else /* ifdef CONFIG_CPU_V7M */
284 .macro restore_user_regs, fast = 0, offset = 0 282 .macro restore_user_regs, fast = 0, offset = 0
285 clrex @ clear the exclusive monitor
286 mov r2, sp 283 mov r2, sp
287 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 284 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
288 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 285 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
289 ldr lr, [sp, #\offset + S_PC] @ get pc 286 ldr lr, [sp, #\offset + S_PC] @ get pc
290 add sp, sp, #\offset + S_SP 287 add sp, sp, #\offset + S_SP
291 msr spsr_cxsf, r1 @ save in spsr_svc 288 msr spsr_cxsf, r1 @ save in spsr_svc
289
290 @ We must avoid clrex due to Cortex-A15 erratum #830321
291 strex r1, r2, [sp] @ clear the exclusive monitor
292
292 .if \fast 293 .if \fast
293 ldmdb sp, {r1 - r12} @ get calling r1 - r12 294 ldmdb sp, {r1 - r12} @ get calling r1 - r12
294 .else 295 .else
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2c4257604513..5c4d38e32a51 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
175 c = irq_data_get_irq_chip(d); 175 c = irq_data_get_irq_chip(d);
176 if (!c->irq_set_affinity) 176 if (!c->irq_set_affinity)
177 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 177 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
178 else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) 178 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
179 cpumask_copy(d->affinity, affinity); 179 cpumask_copy(d->affinity, affinity);
180 180
181 return ret; 181 return ret;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 45e478157278..6a4dffefd357 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -91,6 +91,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
91 break; 91 break;
92 92
93 case R_ARM_ABS32: 93 case R_ARM_ABS32:
94 case R_ARM_TARGET1:
94 *(u32 *)loc += sym->st_value; 95 *(u32 *)loc += sym->st_value;
95 break; 96 break;
96 97
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index e6a6edbec613..4bf4cce759fe 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
76 76
77static void cpu_pmu_enable_percpu_irq(void *data) 77static void cpu_pmu_enable_percpu_irq(void *data)
78{ 78{
79 struct arm_pmu *cpu_pmu = data; 79 int irq = *(int *)data;
80 struct platform_device *pmu_device = cpu_pmu->plat_device;
81 int irq = platform_get_irq(pmu_device, 0);
82 80
83 enable_percpu_irq(irq, IRQ_TYPE_NONE); 81 enable_percpu_irq(irq, IRQ_TYPE_NONE);
84 cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
85} 82}
86 83
87static void cpu_pmu_disable_percpu_irq(void *data) 84static void cpu_pmu_disable_percpu_irq(void *data)
88{ 85{
89 struct arm_pmu *cpu_pmu = data; 86 int irq = *(int *)data;
90 struct platform_device *pmu_device = cpu_pmu->plat_device;
91 int irq = platform_get_irq(pmu_device, 0);
92 87
93 cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
94 disable_percpu_irq(irq); 88 disable_percpu_irq(irq);
95} 89}
96 90
@@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
103 97
104 irq = platform_get_irq(pmu_device, 0); 98 irq = platform_get_irq(pmu_device, 0);
105 if (irq >= 0 && irq_is_percpu(irq)) { 99 if (irq >= 0 && irq_is_percpu(irq)) {
106 on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); 100 on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
107 free_percpu_irq(irq, &percpu_pmu); 101 free_percpu_irq(irq, &percpu_pmu);
108 } else { 102 } else {
109 for (i = 0; i < irqs; ++i) { 103 for (i = 0; i < irqs; ++i) {
@@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
138 irq); 132 irq);
139 return err; 133 return err;
140 } 134 }
141 on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); 135 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
142 } else { 136 } else {
143 for (i = 0; i < irqs; ++i) { 137 for (i = 0; i < irqs; ++i) {
144 err = 0; 138 err = 0;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 81ef686a91ca..a35f6ebbd2c2 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -334,6 +334,8 @@ void flush_thread(void)
334 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 334 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
335 memset(&thread->fpstate, 0, sizeof(union fp_state)); 335 memset(&thread->fpstate, 0, sizeof(union fp_state));
336 336
337 flush_tls();
338
337 thread_notify(THREAD_NOTIFY_FLUSH, thread); 339 thread_notify(THREAD_NOTIFY_FLUSH, thread);
338} 340}
339 341
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 67ca8578c6d8..587fdfe1a72c 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
142 while (1) { 142 while (1) {
143 unsigned long temp; 143 unsigned long temp;
144 144
145 /*
146 * Barrier required between accessing protected resource and
147 * releasing a lock for it. Legacy code might not have done
148 * this, and we cannot determine that this is not the case
149 * being emulated, so insert always.
150 */
151 smp_mb();
152
153 if (type == TYPE_SWPB) 145 if (type == TYPE_SWPB)
154 __user_swpb_asm(*data, address, res, temp); 146 __user_swpb_asm(*data, address, res, temp);
155 else 147 else
@@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
162 } 154 }
163 155
164 if (res == 0) { 156 if (res == 0) {
165 /*
166 * Barrier also required between acquiring a lock for a
167 * protected resource and accessing the resource. Inserted for
168 * same reason as above.
169 */
170 smp_mb();
171
172 if (type == TYPE_SWPB) 157 if (type == TYPE_SWPB)
173 swpbcounter++; 158 swpbcounter++;
174 else 159 else
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index 7b8403b76666..80f0d69205e7 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
45 45
46 switch (cmd) { 46 switch (cmd) {
47 case THREAD_NOTIFY_FLUSH: 47 case THREAD_NOTIFY_FLUSH:
48 thread->thumbee_state = 0; 48 teehbr_write(0);
49 break; 49 break;
50 case THREAD_NOTIFY_SWITCH: 50 case THREAD_NOTIFY_SWITCH:
51 current_thread_info()->thumbee_state = teehbr_read(); 51 current_thread_info()->thumbee_state = teehbr_read();
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index c8e4bb714944..a964c9f40f87 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
581#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) 581#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
582asmlinkage int arm_syscall(int no, struct pt_regs *regs) 582asmlinkage int arm_syscall(int no, struct pt_regs *regs)
583{ 583{
584 struct thread_info *thread = current_thread_info();
585 siginfo_t info; 584 siginfo_t info;
586 585
587 if ((no >> 16) != (__ARM_NR_BASE>> 16)) 586 if ((no >> 16) != (__ARM_NR_BASE>> 16))
@@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
632 return regs->ARM_r0; 631 return regs->ARM_r0;
633 632
634 case NR(set_tls): 633 case NR(set_tls):
635 thread->tp_value[0] = regs->ARM_r0; 634 set_tls(regs->ARM_r0);
636 if (tls_emu)
637 return 0;
638 if (has_tls_reg) {
639 asm ("mcr p15, 0, %0, c13, c0, 3"
640 : : "r" (regs->ARM_r0));
641 } else {
642 /*
643 * User space must never try to access this directly.
644 * Expect your app to break eventually if you do so.
645 * The user helper at 0xffff0fe0 must be used instead.
646 * (see entry-armv.S for details)
647 */
648 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
649 }
650 return 0; 635 return 0;
651 636
652#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG 637#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG