aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
commit1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch)
tree44db563f64cf5f8d62af8f99a61e2b248c44ea3a /arch/arc/kernel
parent03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff)
parentf9eccf24615672896dc13251410c3f2f33a14f95 (diff)
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano: - Fix the vt8500 timer leading to a system lock up when dealing with too small delta (Roman Volkov) - Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST (Daniel Lezcano) - Prevent to compile timers using the 'iomem' API when the architecture has not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r--arch/arc/kernel/ctx_sw.c2
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S3
-rw-r--r--arch/arc/kernel/intc-arcv2.c15
-rw-r--r--arch/arc/kernel/irq.c33
-rw-r--r--arch/arc/kernel/mcip.c2
-rw-r--r--arch/arc/kernel/perf_event.c32
-rw-r--r--arch/arc/kernel/process.c9
-rw-r--r--arch/arc/kernel/setup.c1
-rw-r--r--arch/arc/kernel/smp.c8
-rw-r--r--arch/arc/kernel/unwind.c94
10 files changed, 93 insertions, 106 deletions
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index c14a5bea0c76..5d446df2c413 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
58 "st sp, [r24] \n\t" 58 "st sp, [r24] \n\t"
59#endif 59#endif
60 60
61 "sync \n\t"
62
63 /* 61 /*
64 * setup _current_task with incoming tsk. 62 * setup _current_task with incoming tsk.
65 * optionally, set r25 to that as well 63 * optionally, set r25 to that as well
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index e248594097e7..e6890b1f8650 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -44,9 +44,6 @@ __switch_to:
44 * don't need to do anything special to return it 44 * don't need to do anything special to return it
45 */ 45 */
46 46
47 /* hardware memory barrier */
48 sync
49
50 /* 47 /*
51 * switch to new task, contained in r1 48 * switch to new task, contained in r1
52 * Temp reg r3 is required to get the ptr to store val 49 * Temp reg r3 is required to get the ptr to store val
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 26c156827479..0394f9f61b46 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
106static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, 106static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
107 irq_hw_number_t hw) 107 irq_hw_number_t hw)
108{ 108{
109 if (irq == TIMER0_IRQ || irq == IPI_IRQ) 109 /*
110 * core intc IRQs [16, 23]:
111 * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
112 */
113 if (hw < 24) {
114 /*
115 * A subsequent request_percpu_irq() fails if percpu_devid is
116 * not set. That in turns sets NOAUTOEN, meaning each core needs
117 * to call enable_percpu_irq()
118 */
119 irq_set_percpu_devid(irq);
110 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); 120 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
111 else 121 } else {
112 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); 122 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
123 }
113 124
114 return 0; 125 return 0;
115} 126}
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 2ee226546c6a..ba17f85285cf 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -29,11 +29,11 @@ void __init init_IRQ(void)
29 29
30#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
31 /* a SMP H/w block could do IPI IRQ request here */ 31 /* a SMP H/w block could do IPI IRQ request here */
32 if (plat_smp_ops.init_irq_cpu) 32 if (plat_smp_ops.init_per_cpu)
33 plat_smp_ops.init_irq_cpu(smp_processor_id()); 33 plat_smp_ops.init_per_cpu(smp_processor_id());
34 34
35 if (machine_desc->init_cpu_smp) 35 if (machine_desc->init_per_cpu)
36 machine_desc->init_cpu_smp(smp_processor_id()); 36 machine_desc->init_per_cpu(smp_processor_id());
37#endif 37#endif
38} 38}
39 39
@@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
51 set_irq_regs(old_regs); 51 set_irq_regs(old_regs);
52} 52}
53 53
54/*
55 * API called for requesting percpu interrupts - called by each CPU
56 * - For boot CPU, actually request the IRQ with genirq core + enables
57 * - For subsequent callers only enable called locally
58 *
59 * Relies on being called by boot cpu first (i.e. request called ahead) of
60 * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
61 * which are guaranteed to be setup on boot core first.
62 * Late probed peripherals such as perf can't use this as there no guarantee
63 * of being called on boot CPU first.
64 */
65
54void arc_request_percpu_irq(int irq, int cpu, 66void arc_request_percpu_irq(int irq, int cpu,
55 irqreturn_t (*isr)(int irq, void *dev), 67 irqreturn_t (*isr)(int irq, void *dev),
56 const char *irq_nm, 68 const char *irq_nm,
@@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
60 if (!cpu) { 72 if (!cpu) {
61 int rc; 73 int rc;
62 74
75#ifdef CONFIG_ISA_ARCOMPACT
63 /* 76 /*
64 * These 2 calls are essential to making percpu IRQ APIs work 77 * A subsequent request_percpu_irq() fails if percpu_devid is
65 * Ideally these details could be hidden in irq chip map function 78 * not set. That in turns sets NOAUTOEN, meaning each core needs
66 * but the issue is IPIs IRQs being static (non-DT) and platform 79 * to call enable_percpu_irq()
67 * specific, so we can't identify them there. 80 *
81 * For ARCv2, this is done in irq map function since we know
82 * which irqs are strictly per cpu
68 */ 83 */
69 irq_set_percpu_devid(irq); 84 irq_set_percpu_devid(irq);
70 irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ 85#endif
71 86
72 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); 87 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
73 if (rc) 88 if (rc)
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 74a9b074ac3e..bd237acdf4f2 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
132struct plat_smp_ops plat_smp_ops = { 132struct plat_smp_ops plat_smp_ops = {
133 .info = smp_cpuinfo_buf, 133 .info = smp_cpuinfo_buf,
134 .init_early_smp = mcip_probe_n_setup, 134 .init_early_smp = mcip_probe_n_setup,
135 .init_irq_cpu = mcip_setup_per_cpu, 135 .init_per_cpu = mcip_setup_per_cpu,
136 .ipi_send = mcip_ipi_send, 136 .ipi_send = mcip_ipi_send,
137 .ipi_clear = mcip_ipi_clear, 137 .ipi_clear = mcip_ipi_clear,
138}; 138};
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 0c08bb1ce15a..8b134cfe5e1f 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
428 428
429#endif /* CONFIG_ISA_ARCV2 */ 429#endif /* CONFIG_ISA_ARCV2 */
430 430
431void arc_cpu_pmu_irq_init(void) 431static void arc_cpu_pmu_irq_init(void *data)
432{ 432{
433 struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); 433 int irq = *(int *)data;
434 434
435 arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, 435 enable_percpu_irq(irq, IRQ_TYPE_NONE);
436 "ARC perf counters", pmu_cpu);
437 436
438 /* Clear all pending interrupt flags */ 437 /* Clear all pending interrupt flags */
439 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 438 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
@@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
515 514
516 if (has_interrupts) { 515 if (has_interrupts) {
517 int irq = platform_get_irq(pdev, 0); 516 int irq = platform_get_irq(pdev, 0);
518 unsigned long flags;
519 517
520 if (irq < 0) { 518 if (irq < 0) {
521 pr_err("Cannot get IRQ number for the platform\n"); 519 pr_err("Cannot get IRQ number for the platform\n");
@@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
524 522
525 arc_pmu->irq = irq; 523 arc_pmu->irq = irq;
526 524
527 /* 525 /* intc map function ensures irq_set_percpu_devid() called */
528 * arc_cpu_pmu_irq_init() needs to be called on all cores for 526 request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
529 * their respective local PMU. 527 this_cpu_ptr(&arc_pmu_cpu));
530 * However we use opencoded on_each_cpu() to ensure it is called 528
531 * on core0 first, so that arc_request_percpu_irq() sets up 529 on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
532 * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable 530
533 * perf IRQ on non master cores.
534 * see arc_request_percpu_irq()
535 */
536 preempt_disable();
537 local_irq_save(flags);
538 arc_cpu_pmu_irq_init();
539 local_irq_restore(flags);
540 smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
541 preempt_enable();
542
543 /* Clean all pending interrupt flags */
544 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
545 } else 531 } else
546 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 532 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
547 533
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 91d5a0f1f3f7..a3f750e76b68 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls)
44void arch_cpu_idle(void) 44void arch_cpu_idle(void)
45{ 45{
46 /* sleep, but enable all interrupts before committing */ 46 /* sleep, but enable all interrupts before committing */
47 if (is_isa_arcompact()) { 47 __asm__ __volatile__(
48 __asm__("sleep 0x3"); 48 "sleep %0 \n"
49 } else { 49 :
50 __asm__("sleep 0x10"); 50 :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */
51 }
52} 51}
53 52
54asmlinkage void ret_from_fork(void); 53asmlinkage void ret_from_fork(void);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index c33e77c0ad3e..e1b87444ea9a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
429#endif 429#endif
430 430
431 arc_unwind_init(); 431 arc_unwind_init();
432 arc_unwind_setup();
433} 432}
434 433
435static int __init customize_machine(void) 434static int __init customize_machine(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 580587805fa3..ef6e9e15b82a 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -132,11 +132,11 @@ void start_kernel_secondary(void)
132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
133 133
134 /* Some SMP H/w setup - for each cpu */ 134 /* Some SMP H/w setup - for each cpu */
135 if (plat_smp_ops.init_irq_cpu) 135 if (plat_smp_ops.init_per_cpu)
136 plat_smp_ops.init_irq_cpu(cpu); 136 plat_smp_ops.init_per_cpu(cpu);
137 137
138 if (machine_desc->init_cpu_smp) 138 if (machine_desc->init_per_cpu)
139 machine_desc->init_cpu_smp(cpu); 139 machine_desc->init_per_cpu(cpu);
140 140
141 arc_local_timer_setup(); 141 arc_local_timer_setup();
142 142
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 93c6ea52b671..5eb707640e9c 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
170 170
171static unsigned long read_pointer(const u8 **pLoc, 171static unsigned long read_pointer(const u8 **pLoc,
172 const void *end, signed ptrType); 172 const void *end, signed ptrType);
173static void init_unwind_hdr(struct unwind_table *table,
174 void *(*alloc) (unsigned long));
175
176/*
177 * wrappers for header alloc (vs. calling one vs. other at call site)
178 * to elide section mismatches warnings
179 */
180static void *__init unw_hdr_alloc_early(unsigned long sz)
181{
182 return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
183 MAX_DMA_ADDRESS);
184}
185
186static void *unw_hdr_alloc(unsigned long sz)
187{
188 return kmalloc(sz, GFP_KERNEL);
189}
173 190
174static void init_unwind_table(struct unwind_table *table, const char *name, 191static void init_unwind_table(struct unwind_table *table, const char *name,
175 const void *core_start, unsigned long core_size, 192 const void *core_start, unsigned long core_size,
@@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
209 __start_unwind, __end_unwind - __start_unwind, 226 __start_unwind, __end_unwind - __start_unwind,
210 NULL, 0); 227 NULL, 0);
211 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ 228 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
229
230 init_unwind_hdr(&root_table, unw_hdr_alloc_early);
212} 231}
213 232
214static const u32 bad_cie, not_fde; 233static const u32 bad_cie, not_fde;
@@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
241 e2->fde = v; 260 e2->fde = v;
242} 261}
243 262
244static void __init setup_unwind_table(struct unwind_table *table, 263static void init_unwind_hdr(struct unwind_table *table,
245 void *(*alloc) (unsigned long)) 264 void *(*alloc) (unsigned long))
246{ 265{
247 const u8 *ptr; 266 const u8 *ptr;
248 unsigned long tableSize = table->size, hdrSize; 267 unsigned long tableSize = table->size, hdrSize;
@@ -277,10 +296,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
277 if (cie == &not_fde) 296 if (cie == &not_fde)
278 continue; 297 continue;
279 if (cie == NULL || cie == &bad_cie) 298 if (cie == NULL || cie == &bad_cie)
280 return; 299 goto ret_err;
281 ptrType = fde_pointer_type(cie); 300 ptrType = fde_pointer_type(cie);
282 if (ptrType < 0) 301 if (ptrType < 0)
283 return; 302 goto ret_err;
284 303
285 ptr = (const u8 *)(fde + 2); 304 ptr = (const u8 *)(fde + 2);
286 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, 305 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@@ -296,13 +315,15 @@ static void __init setup_unwind_table(struct unwind_table *table,
296 } 315 }
297 316
298 if (tableSize || !n) 317 if (tableSize || !n)
299 return; 318 goto ret_err;
300 319
301 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) 320 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
302 + 2 * n * sizeof(unsigned long); 321 + 2 * n * sizeof(unsigned long);
322
303 header = alloc(hdrSize); 323 header = alloc(hdrSize);
304 if (!header) 324 if (!header)
305 return; 325 goto ret_err;
326
306 header->version = 1; 327 header->version = 1;
307 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; 328 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
308 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; 329 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
@@ -340,18 +361,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
340 table->hdrsz = hdrSize; 361 table->hdrsz = hdrSize;
341 smp_wmb(); 362 smp_wmb();
342 table->header = (const void *)header; 363 table->header = (const void *)header;
343} 364 return;
344
345static void *__init balloc(unsigned long sz)
346{
347 return __alloc_bootmem_nopanic(sz,
348 sizeof(unsigned int),
349 __pa(MAX_DMA_ADDRESS));
350}
351 365
352void __init arc_unwind_setup(void) 366ret_err:
353{ 367 panic("Attention !!! Dwarf FDE parsing errors\n");;
354 setup_unwind_table(&root_table, balloc);
355} 368}
356 369
357#ifdef CONFIG_MODULES 370#ifdef CONFIG_MODULES
@@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
377 table_start, table_size, 390 table_start, table_size,
378 NULL, 0); 391 NULL, 0);
379 392
393 init_unwind_hdr(table, unw_hdr_alloc);
394
380#ifdef UNWIND_DEBUG 395#ifdef UNWIND_DEBUG
381 unw_debug("Table added for [%s] %lx %lx\n", 396 unw_debug("Table added for [%s] %lx %lx\n",
382 module->name, table->core.pc, table->core.range); 397 module->name, table->core.pc, table->core.range);
@@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
439 info.init_only = init_only; 454 info.init_only = init_only;
440 455
441 unlink_table(&info); /* XXX: SMP */ 456 unlink_table(&info); /* XXX: SMP */
457 kfree(table->header);
442 kfree(table); 458 kfree(table);
443} 459}
444 460
@@ -588,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie)
588 const u8 *ptr = (const u8 *)(cie + 2); 604 const u8 *ptr = (const u8 *)(cie + 2);
589 unsigned version = *ptr; 605 unsigned version = *ptr;
590 606
591 if (version != 1)
592 return -1; /* unsupported */
593
594 if (*++ptr) { 607 if (*++ptr) {
595 const char *aug; 608 const char *aug;
596 const u8 *end = (const u8 *)(cie + 1) + *cie; 609 const u8 *end = (const u8 *)(cie + 1) + *cie;
@@ -986,42 +999,13 @@ int arc_unwind(struct unwind_frame_info *frame)
986 (const u8 *)(fde + 999 (const u8 *)(fde +
987 1) + 1000 1) +
988 *fde, ptrType); 1001 *fde, ptrType);
989 if (pc >= endLoc) 1002 if (pc >= endLoc) {
990 fde = NULL; 1003 fde = NULL;
991 } else
992 fde = NULL;
993 }
994 if (fde == NULL) {
995 for (fde = table->address, tableSize = table->size;
996 cie = NULL, tableSize > sizeof(*fde)
997 && tableSize - sizeof(*fde) >= *fde;
998 tableSize -= sizeof(*fde) + *fde,
999 fde += 1 + *fde / sizeof(*fde)) {
1000 cie = cie_for_fde(fde, table);
1001 if (cie == &bad_cie) {
1002 cie = NULL; 1004 cie = NULL;
1003 break;
1004 } 1005 }
1005 if (cie == NULL 1006 } else {
1006 || cie == &not_fde 1007 fde = NULL;
1007 || (ptrType = fde_pointer_type(cie)) < 0) 1008 cie = NULL;
1008 continue;
1009 ptr = (const u8 *)(fde + 2);
1010 startLoc = read_pointer(&ptr,
1011 (const u8 *)(fde + 1) +
1012 *fde, ptrType);
1013 if (!startLoc)
1014 continue;
1015 if (!(ptrType & DW_EH_PE_indirect))
1016 ptrType &=
1017 DW_EH_PE_FORM | DW_EH_PE_signed;
1018 endLoc =
1019 startLoc + read_pointer(&ptr,
1020 (const u8 *)(fde +
1021 1) +
1022 *fde, ptrType);
1023 if (pc >= startLoc && pc < endLoc)
1024 break;
1025 } 1009 }
1026 } 1010 }
1027 } 1011 }
@@ -1031,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame)
1031 ptr = (const u8 *)(cie + 2); 1015 ptr = (const u8 *)(cie + 2);
1032 end = (const u8 *)(cie + 1) + *cie; 1016 end = (const u8 *)(cie + 1) + *cie;
1033 frame->call_frame = 1; 1017 frame->call_frame = 1;
1034 if ((state.version = *ptr) != 1) 1018 if (*++ptr) {
1035 cie = NULL; /* unsupported version */
1036 else if (*++ptr) {
1037 /* check if augmentation size is first (thus present) */ 1019 /* check if augmentation size is first (thus present) */
1038 if (*ptr == 'z') { 1020 if (*ptr == 'z') {
1039 while (++ptr < end && *ptr) { 1021 while (++ptr < end && *ptr) {