diff options
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r-- | arch/arc/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/arc/kernel/clk.c | 21 | ||||
-rw-r--r-- | arch/arc/kernel/ctx_sw.c | 13 | ||||
-rw-r--r-- | arch/arc/kernel/devtree.c | 13 | ||||
-rw-r--r-- | arch/arc/kernel/intc-arcv2.c | 17 | ||||
-rw-r--r-- | arch/arc/kernel/intc-compact.c | 17 | ||||
-rw-r--r-- | arch/arc/kernel/irq.c | 50 | ||||
-rw-r--r-- | arch/arc/kernel/mcip.c | 7 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 17 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 25 | ||||
-rw-r--r-- | arch/arc/kernel/time.c | 238 |
11 files changed, 230 insertions, 190 deletions
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile index 1bc2036b19d7..cfcdedf52ff8 100644 --- a/arch/arc/kernel/Makefile +++ b/arch/arc/kernel/Makefile | |||
@@ -9,7 +9,7 @@ | |||
9 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | 9 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' |
10 | 10 | ||
11 | obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o process.o devtree.o | 11 | obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o process.o devtree.o |
12 | obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o | 12 | obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o |
13 | obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o | 13 | obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o |
14 | obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o | 14 | obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o |
15 | obj-$(CONFIG_PCI) += pcibios.o | 15 | obj-$(CONFIG_PCI) += pcibios.o |
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c deleted file mode 100644 index 10c7b0b5a079..000000000000 --- a/arch/arc/kernel/clk.c +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <asm/clk.h> | ||
10 | |||
11 | unsigned long core_freq = 80000000; | ||
12 | |||
13 | /* | ||
14 | * As of now we default to device-tree provided clock | ||
15 | * In future we can determine this in early boot | ||
16 | */ | ||
17 | int arc_set_core_freq(unsigned long freq) | ||
18 | { | ||
19 | core_freq = freq; | ||
20 | return 0; | ||
21 | } | ||
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index 5d446df2c413..6f4cb0dab1b9 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c | |||
@@ -16,6 +16,9 @@ | |||
16 | 16 | ||
17 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #ifdef CONFIG_ARC_PLAT_EZNPS | ||
20 | #include <plat/ctop.h> | ||
21 | #endif | ||
19 | 22 | ||
20 | #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) | 23 | #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) |
21 | 24 | ||
@@ -67,9 +70,16 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) | |||
67 | #ifndef CONFIG_SMP | 70 | #ifndef CONFIG_SMP |
68 | "st %2, [@_current_task] \n\t" | 71 | "st %2, [@_current_task] \n\t" |
69 | #else | 72 | #else |
73 | #ifdef CONFIG_ARC_PLAT_EZNPS | ||
74 | "lr r24, [%4] \n\t" | ||
75 | #ifndef CONFIG_EZNPS_MTM_EXT | ||
76 | "lsr r24, r24, 4 \n\t" | ||
77 | #endif | ||
78 | #else | ||
70 | "lr r24, [identity] \n\t" | 79 | "lr r24, [identity] \n\t" |
71 | "lsr r24, r24, 8 \n\t" | 80 | "lsr r24, r24, 8 \n\t" |
72 | "bmsk r24, r24, 7 \n\t" | 81 | "bmsk r24, r24, 7 \n\t" |
82 | #endif | ||
73 | "add2 r24, @_current_task, r24 \n\t" | 83 | "add2 r24, @_current_task, r24 \n\t" |
74 | "st %2, [r24] \n\t" | 84 | "st %2, [r24] \n\t" |
75 | #endif | 85 | #endif |
@@ -107,6 +117,9 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) | |||
107 | 117 | ||
108 | : "=r"(tmp) | 118 | : "=r"(tmp) |
109 | : "n"(KSP_WORD_OFF), "r"(next), "r"(prev) | 119 | : "n"(KSP_WORD_OFF), "r"(next), "r"(prev) |
120 | #ifdef CONFIG_ARC_PLAT_EZNPS | ||
121 | , "i"(CTOP_AUX_LOGIC_GLOBAL_ID) | ||
122 | #endif | ||
110 | : "blink" | 123 | : "blink" |
111 | ); | 124 | ); |
112 | 125 | ||
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c index 7e844fd8213f..f1e07c2344f8 100644 --- a/arch/arc/kernel/devtree.c +++ b/arch/arc/kernel/devtree.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/memblock.h> | 14 | #include <linux/memblock.h> |
15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
16 | #include <linux/of_fdt.h> | 16 | #include <linux/of_fdt.h> |
17 | #include <asm/clk.h> | ||
18 | #include <asm/mach_desc.h> | 17 | #include <asm/mach_desc.h> |
19 | 18 | ||
20 | #ifdef CONFIG_SERIAL_EARLYCON | 19 | #ifdef CONFIG_SERIAL_EARLYCON |
@@ -28,14 +27,12 @@ unsigned int __init arc_early_base_baud(void) | |||
28 | 27 | ||
29 | static void __init arc_set_early_base_baud(unsigned long dt_root) | 28 | static void __init arc_set_early_base_baud(unsigned long dt_root) |
30 | { | 29 | { |
31 | unsigned int core_clk = arc_get_core_freq(); | ||
32 | |||
33 | if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x")) | 30 | if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x")) |
34 | arc_base_baud = core_clk/3; | 31 | arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */ |
35 | else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp")) | 32 | else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp")) |
36 | arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */ | 33 | arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x) */ |
37 | else | 34 | else |
38 | arc_base_baud = core_clk; | 35 | arc_base_baud = 50000000; /* Fixed default 50MHz */ |
39 | } | 36 | } |
40 | #else | 37 | #else |
41 | #define arc_set_early_base_baud(dt_root) | 38 | #define arc_set_early_base_baud(dt_root) |
@@ -65,8 +62,6 @@ const struct machine_desc * __init setup_machine_fdt(void *dt) | |||
65 | { | 62 | { |
66 | const struct machine_desc *mdesc; | 63 | const struct machine_desc *mdesc; |
67 | unsigned long dt_root; | 64 | unsigned long dt_root; |
68 | const void *clk; | ||
69 | int len; | ||
70 | 65 | ||
71 | if (!early_init_dt_scan(dt)) | 66 | if (!early_init_dt_scan(dt)) |
72 | return NULL; | 67 | return NULL; |
@@ -76,10 +71,6 @@ const struct machine_desc * __init setup_machine_fdt(void *dt) | |||
76 | machine_halt(); | 71 | machine_halt(); |
77 | 72 | ||
78 | dt_root = of_get_flat_dt_root(); | 73 | dt_root = of_get_flat_dt_root(); |
79 | clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len); | ||
80 | if (clk) | ||
81 | arc_set_core_freq(of_read_ulong(clk, len/4)); | ||
82 | |||
83 | arc_set_early_base_baud(dt_root); | 74 | arc_set_early_base_baud(dt_root); |
84 | 75 | ||
85 | return mdesc; | 76 | return mdesc; |
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 942526322ae7..6c24faf48b16 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c | |||
@@ -137,23 +137,30 @@ static const struct irq_domain_ops arcv2_irq_ops = { | |||
137 | .map = arcv2_irq_map, | 137 | .map = arcv2_irq_map, |
138 | }; | 138 | }; |
139 | 139 | ||
140 | static struct irq_domain *root_domain; | ||
141 | 140 | ||
142 | static int __init | 141 | static int __init |
143 | init_onchip_IRQ(struct device_node *intc, struct device_node *parent) | 142 | init_onchip_IRQ(struct device_node *intc, struct device_node *parent) |
144 | { | 143 | { |
144 | struct irq_domain *root_domain; | ||
145 | |||
145 | if (parent) | 146 | if (parent) |
146 | panic("DeviceTree incore intc not a root irq controller\n"); | 147 | panic("DeviceTree incore intc not a root irq controller\n"); |
147 | 148 | ||
148 | root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0, | 149 | root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS, &arcv2_irq_ops, NULL); |
149 | &arcv2_irq_ops, NULL); | ||
150 | |||
151 | if (!root_domain) | 150 | if (!root_domain) |
152 | panic("root irq domain not avail\n"); | 151 | panic("root irq domain not avail\n"); |
153 | 152 | ||
154 | /* with this we don't need to export root_domain */ | 153 | /* |
154 | * Needed for primary domain lookup to succeed | ||
155 | * This is a primary irqchip, and can never have a parent | ||
156 | */ | ||
155 | irq_set_default_host(root_domain); | 157 | irq_set_default_host(root_domain); |
156 | 158 | ||
159 | #ifdef CONFIG_SMP | ||
160 | irq_create_mapping(root_domain, IPI_IRQ); | ||
161 | #endif | ||
162 | irq_create_mapping(root_domain, SOFTIRQ_IRQ); | ||
163 | |||
157 | return 0; | 164 | return 0; |
158 | } | 165 | } |
159 | 166 | ||
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index 224d1c3aa9c4..c5cceca36118 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/irqchip.h> | 14 | #include <linux/irqchip.h> |
15 | #include <asm/irq.h> | 15 | #include <asm/irq.h> |
16 | 16 | ||
17 | #define TIMER0_IRQ 3 /* Fixed by ISA */ | ||
18 | |||
17 | /* | 19 | /* |
18 | * Early Hardware specific Interrupt setup | 20 | * Early Hardware specific Interrupt setup |
19 | * -Platform independent, needed for each CPU (not foldable into init_IRQ) | 21 | * -Platform independent, needed for each CPU (not foldable into init_IRQ) |
@@ -79,8 +81,9 @@ static struct irq_chip onchip_intc = { | |||
79 | static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, | 81 | static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, |
80 | irq_hw_number_t hw) | 82 | irq_hw_number_t hw) |
81 | { | 83 | { |
82 | switch (irq) { | 84 | switch (hw) { |
83 | case TIMER0_IRQ: | 85 | case TIMER0_IRQ: |
86 | irq_set_percpu_devid(irq); | ||
84 | irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); | 87 | irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); |
85 | break; | 88 | break; |
86 | default: | 89 | default: |
@@ -94,21 +97,23 @@ static const struct irq_domain_ops arc_intc_domain_ops = { | |||
94 | .map = arc_intc_domain_map, | 97 | .map = arc_intc_domain_map, |
95 | }; | 98 | }; |
96 | 99 | ||
97 | static struct irq_domain *root_domain; | ||
98 | |||
99 | static int __init | 100 | static int __init |
100 | init_onchip_IRQ(struct device_node *intc, struct device_node *parent) | 101 | init_onchip_IRQ(struct device_node *intc, struct device_node *parent) |
101 | { | 102 | { |
103 | struct irq_domain *root_domain; | ||
104 | |||
102 | if (parent) | 105 | if (parent) |
103 | panic("DeviceTree incore intc not a root irq controller\n"); | 106 | panic("DeviceTree incore intc not a root irq controller\n"); |
104 | 107 | ||
105 | root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0, | 108 | root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS, |
106 | &arc_intc_domain_ops, NULL); | 109 | &arc_intc_domain_ops, NULL); |
107 | |||
108 | if (!root_domain) | 110 | if (!root_domain) |
109 | panic("root irq domain not avail\n"); | 111 | panic("root irq domain not avail\n"); |
110 | 112 | ||
111 | /* with this we don't need to export root_domain */ | 113 | /* |
114 | * Needed for primary domain lookup to succeed | ||
115 | * This is a primary irqchip, and can never have a parent | ||
116 | */ | ||
112 | irq_set_default_host(root_domain); | 117 | irq_set_default_host(root_domain); |
113 | 118 | ||
114 | return 0; | 119 | return 0; |
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index ba17f85285cf..538b36afe89e 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c | |||
@@ -41,53 +41,7 @@ void __init init_IRQ(void) | |||
41 | * "C" Entry point for any ARC ISR, called from low level vector handler | 41 | * "C" Entry point for any ARC ISR, called from low level vector handler |
42 | * @irq is the vector number read from ICAUSE reg of on-chip intc | 42 | * @irq is the vector number read from ICAUSE reg of on-chip intc |
43 | */ | 43 | */ |
44 | void arch_do_IRQ(unsigned int irq, struct pt_regs *regs) | 44 | void arch_do_IRQ(unsigned int hwirq, struct pt_regs *regs) |
45 | { | 45 | { |
46 | struct pt_regs *old_regs = set_irq_regs(regs); | 46 | handle_domain_irq(NULL, hwirq, regs); |
47 | |||
48 | irq_enter(); | ||
49 | generic_handle_irq(irq); | ||
50 | irq_exit(); | ||
51 | set_irq_regs(old_regs); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * API called for requesting percpu interrupts - called by each CPU | ||
56 | * - For boot CPU, actually request the IRQ with genirq core + enables | ||
57 | * - For subsequent callers only enable called locally | ||
58 | * | ||
59 | * Relies on being called by boot cpu first (i.e. request called ahead) of | ||
60 | * any enable as expected by genirq. Hence Suitable only for TIMER, IPI | ||
61 | * which are guaranteed to be setup on boot core first. | ||
62 | * Late probed peripherals such as perf can't use this as there no guarantee | ||
63 | * of being called on boot CPU first. | ||
64 | */ | ||
65 | |||
66 | void arc_request_percpu_irq(int irq, int cpu, | ||
67 | irqreturn_t (*isr)(int irq, void *dev), | ||
68 | const char *irq_nm, | ||
69 | void *percpu_dev) | ||
70 | { | ||
71 | /* Boot cpu calls request, all call enable */ | ||
72 | if (!cpu) { | ||
73 | int rc; | ||
74 | |||
75 | #ifdef CONFIG_ISA_ARCOMPACT | ||
76 | /* | ||
77 | * A subsequent request_percpu_irq() fails if percpu_devid is | ||
78 | * not set. That in turns sets NOAUTOEN, meaning each core needs | ||
79 | * to call enable_percpu_irq() | ||
80 | * | ||
81 | * For ARCv2, this is done in irq map function since we know | ||
82 | * which irqs are strictly per cpu | ||
83 | */ | ||
84 | irq_set_percpu_devid(irq); | ||
85 | #endif | ||
86 | |||
87 | rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); | ||
88 | if (rc) | ||
89 | panic("Percpu IRQ request failed for %d\n", irq); | ||
90 | } | ||
91 | |||
92 | enable_percpu_irq(irq, 0); | ||
93 | } | 47 | } |
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index c41c364b926c..72f9179b1a24 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <asm/mcip.h> | 15 | #include <asm/mcip.h> |
16 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
17 | 17 | ||
18 | #define IPI_IRQ 19 | ||
19 | #define SOFTIRQ_IRQ 21 | ||
20 | |||
21 | static char smp_cpuinfo_buf[128]; | 18 | static char smp_cpuinfo_buf[128]; |
22 | static int idu_detected; | 19 | static int idu_detected; |
23 | 20 | ||
@@ -116,15 +113,13 @@ static void mcip_probe_n_setup(void) | |||
116 | IS_AVAIL1(mp.dbg, "DEBUG "), | 113 | IS_AVAIL1(mp.dbg, "DEBUG "), |
117 | IS_AVAIL1(mp.gfrc, "GFRC")); | 114 | IS_AVAIL1(mp.gfrc, "GFRC")); |
118 | 115 | ||
116 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; | ||
119 | idu_detected = mp.idu; | 117 | idu_detected = mp.idu; |
120 | 118 | ||
121 | if (mp.dbg) { | 119 | if (mp.dbg) { |
122 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); | 120 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); |
123 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); | 121 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); |
124 | } | 122 | } |
125 | |||
126 | if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc) | ||
127 | panic("kernel trying to use non-existent GFRC\n"); | ||
128 | } | 123 | } |
129 | 124 | ||
130 | struct plat_smp_ops plat_smp_ops = { | 125 | struct plat_smp_ops plat_smp_ops = { |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 151acf0c9383..f63b8bfefb0c 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/console.h> | 13 | #include <linux/console.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/cpu.h> | 15 | #include <linux/cpu.h> |
16 | #include <linux/clk-provider.h> | ||
17 | #include <linux/of_fdt.h> | 16 | #include <linux/of_fdt.h> |
18 | #include <linux/of_platform.h> | 17 | #include <linux/of_platform.h> |
19 | #include <linux/cache.h> | 18 | #include <linux/cache.h> |
@@ -24,7 +23,6 @@ | |||
24 | #include <asm/page.h> | 23 | #include <asm/page.h> |
25 | #include <asm/irq.h> | 24 | #include <asm/irq.h> |
26 | #include <asm/unwind.h> | 25 | #include <asm/unwind.h> |
27 | #include <asm/clk.h> | ||
28 | #include <asm/mach_desc.h> | 26 | #include <asm/mach_desc.h> |
29 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
30 | 28 | ||
@@ -220,10 +218,6 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) | |||
220 | if (tbl->info.id == 0) | 218 | if (tbl->info.id == 0) |
221 | n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n"); | 219 | n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n"); |
222 | 220 | ||
223 | n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n", | ||
224 | (unsigned int)(arc_get_core_freq() / 1000000), | ||
225 | (unsigned int)(arc_get_core_freq() / 10000) % 100); | ||
226 | |||
227 | n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", | 221 | n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", |
228 | IS_AVAIL1(cpu->extn.timer0, "Timer0 "), | 222 | IS_AVAIL1(cpu->extn.timer0, "Timer0 "), |
229 | IS_AVAIL1(cpu->extn.timer1, "Timer1 "), | 223 | IS_AVAIL1(cpu->extn.timer1, "Timer1 "), |
@@ -314,9 +308,6 @@ static void arc_chk_core_config(void) | |||
314 | if (!cpu->extn.timer1) | 308 | if (!cpu->extn.timer1) |
315 | panic("Timer1 is not present!\n"); | 309 | panic("Timer1 is not present!\n"); |
316 | 310 | ||
317 | if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc) | ||
318 | panic("RTC is not present\n"); | ||
319 | |||
320 | #ifdef CONFIG_ARC_HAS_DCCM | 311 | #ifdef CONFIG_ARC_HAS_DCCM |
321 | /* | 312 | /* |
322 | * DCCM can be arbit placed in hardware. | 313 | * DCCM can be arbit placed in hardware. |
@@ -444,7 +435,6 @@ void __init setup_arch(char **cmdline_p) | |||
444 | 435 | ||
445 | static int __init customize_machine(void) | 436 | static int __init customize_machine(void) |
446 | { | 437 | { |
447 | of_clk_init(NULL); | ||
448 | /* | 438 | /* |
449 | * Traverses flattened DeviceTree - registering platform devices | 439 | * Traverses flattened DeviceTree - registering platform devices |
450 | * (if any) complete with their resources | 440 | * (if any) complete with their resources |
@@ -477,6 +467,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
477 | { | 467 | { |
478 | char *str; | 468 | char *str; |
479 | int cpu_id = ptr_to_cpu(v); | 469 | int cpu_id = ptr_to_cpu(v); |
470 | struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk"); | ||
471 | u32 freq = 0; | ||
480 | 472 | ||
481 | if (!cpu_online(cpu_id)) { | 473 | if (!cpu_online(cpu_id)) { |
482 | seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); | 474 | seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); |
@@ -489,6 +481,11 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
489 | 481 | ||
490 | seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); | 482 | seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); |
491 | 483 | ||
484 | of_property_read_u32(core_clk, "clock-frequency", &freq); | ||
485 | if (freq) | ||
486 | seq_printf(m, "CPU speed\t: %u.%02u Mhz\n", | ||
487 | freq / 1000000, (freq / 10000) % 100); | ||
488 | |||
492 | seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", | 489 | seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", |
493 | loops_per_jiffy / (500000 / HZ), | 490 | loops_per_jiffy / (500000 / HZ), |
494 | (loops_per_jiffy / (5000 / HZ)) % 100); | 491 | (loops_per_jiffy / (5000 / HZ)) % 100); |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 4cb3add77c75..f183cc648851 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -126,11 +126,6 @@ void start_kernel_secondary(void) | |||
126 | current->active_mm = mm; | 126 | current->active_mm = mm; |
127 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 127 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
128 | 128 | ||
129 | notify_cpu_starting(cpu); | ||
130 | set_cpu_online(cpu, true); | ||
131 | |||
132 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); | ||
133 | |||
134 | /* Some SMP H/w setup - for each cpu */ | 129 | /* Some SMP H/w setup - for each cpu */ |
135 | if (plat_smp_ops.init_per_cpu) | 130 | if (plat_smp_ops.init_per_cpu) |
136 | plat_smp_ops.init_per_cpu(cpu); | 131 | plat_smp_ops.init_per_cpu(cpu); |
@@ -138,7 +133,10 @@ void start_kernel_secondary(void) | |||
138 | if (machine_desc->init_per_cpu) | 133 | if (machine_desc->init_per_cpu) |
139 | machine_desc->init_per_cpu(cpu); | 134 | machine_desc->init_per_cpu(cpu); |
140 | 135 | ||
141 | arc_local_timer_setup(); | 136 | notify_cpu_starting(cpu); |
137 | set_cpu_online(cpu, true); | ||
138 | |||
139 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); | ||
142 | 140 | ||
143 | local_irq_enable(); | 141 | local_irq_enable(); |
144 | preempt_disable(); | 142 | preempt_disable(); |
@@ -346,6 +344,10 @@ irqreturn_t do_IPI(int irq, void *dev_id) | |||
346 | 344 | ||
347 | /* | 345 | /* |
348 | * API called by platform code to hookup arch-common ISR to their IPI IRQ | 346 | * API called by platform code to hookup arch-common ISR to their IPI IRQ |
347 | * | ||
348 | * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map | ||
349 | * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise | ||
350 | * request_percpu_irq() below will fail | ||
349 | */ | 351 | */ |
350 | static DEFINE_PER_CPU(int, ipi_dev); | 352 | static DEFINE_PER_CPU(int, ipi_dev); |
351 | 353 | ||
@@ -353,7 +355,16 @@ int smp_ipi_irq_setup(int cpu, int irq) | |||
353 | { | 355 | { |
354 | int *dev = per_cpu_ptr(&ipi_dev, cpu); | 356 | int *dev = per_cpu_ptr(&ipi_dev, cpu); |
355 | 357 | ||
356 | arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev); | 358 | /* Boot cpu calls request, all call enable */ |
359 | if (!cpu) { | ||
360 | int rc; | ||
361 | |||
362 | rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev); | ||
363 | if (rc) | ||
364 | panic("Percpu IRQ request failed for %d\n", irq); | ||
365 | } | ||
366 | |||
367 | enable_percpu_irq(irq, 0); | ||
357 | 368 | ||
358 | return 0; | 369 | return 0; |
359 | } | 370 | } |
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 7d9a736fc7e5..4549ab255dd1 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c | |||
@@ -29,21 +29,16 @@ | |||
29 | * which however is currently broken | 29 | * which however is currently broken |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
34 | #include <linux/module.h> | 33 | #include <linux/clk.h> |
35 | #include <linux/sched.h> | 34 | #include <linux/clk-provider.h> |
36 | #include <linux/kernel.h> | ||
37 | #include <linux/time.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/timex.h> | ||
40 | #include <linux/profile.h> | ||
41 | #include <linux/clocksource.h> | 35 | #include <linux/clocksource.h> |
42 | #include <linux/clockchips.h> | 36 | #include <linux/clockchips.h> |
37 | #include <linux/cpu.h> | ||
38 | #include <linux/of.h> | ||
39 | #include <linux/of_irq.h> | ||
43 | #include <asm/irq.h> | 40 | #include <asm/irq.h> |
44 | #include <asm/arcregs.h> | 41 | #include <asm/arcregs.h> |
45 | #include <asm/clk.h> | ||
46 | #include <asm/mach_desc.h> | ||
47 | 42 | ||
48 | #include <asm/mcip.h> | 43 | #include <asm/mcip.h> |
49 | 44 | ||
@@ -60,16 +55,35 @@ | |||
60 | 55 | ||
61 | #define ARC_TIMER_MAX 0xFFFFFFFF | 56 | #define ARC_TIMER_MAX 0xFFFFFFFF |
62 | 57 | ||
63 | /********** Clock Source Device *********/ | 58 | static unsigned long arc_timer_freq; |
64 | |||
65 | #ifdef CONFIG_ARC_HAS_GFRC | ||
66 | 59 | ||
67 | static int arc_counter_setup(void) | 60 | static int noinline arc_get_timer_clk(struct device_node *node) |
68 | { | 61 | { |
69 | return 1; | 62 | struct clk *clk; |
63 | int ret; | ||
64 | |||
65 | clk = of_clk_get(node, 0); | ||
66 | if (IS_ERR(clk)) { | ||
67 | pr_err("timer missing clk"); | ||
68 | return PTR_ERR(clk); | ||
69 | } | ||
70 | |||
71 | ret = clk_prepare_enable(clk); | ||
72 | if (ret) { | ||
73 | pr_err("Couldn't enable parent clk\n"); | ||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | arc_timer_freq = clk_get_rate(clk); | ||
78 | |||
79 | return 0; | ||
70 | } | 80 | } |
71 | 81 | ||
72 | static cycle_t arc_counter_read(struct clocksource *cs) | 82 | /********** Clock Source Device *********/ |
83 | |||
84 | #ifdef CONFIG_ARC_HAS_GFRC | ||
85 | |||
86 | static cycle_t arc_read_gfrc(struct clocksource *cs) | ||
73 | { | 87 | { |
74 | unsigned long flags; | 88 | unsigned long flags; |
75 | union { | 89 | union { |
@@ -94,15 +108,31 @@ static cycle_t arc_counter_read(struct clocksource *cs) | |||
94 | return stamp.full; | 108 | return stamp.full; |
95 | } | 109 | } |
96 | 110 | ||
97 | static struct clocksource arc_counter = { | 111 | static struct clocksource arc_counter_gfrc = { |
98 | .name = "ARConnect GFRC", | 112 | .name = "ARConnect GFRC", |
99 | .rating = 400, | 113 | .rating = 400, |
100 | .read = arc_counter_read, | 114 | .read = arc_read_gfrc, |
101 | .mask = CLOCKSOURCE_MASK(64), | 115 | .mask = CLOCKSOURCE_MASK(64), |
102 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 116 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
103 | }; | 117 | }; |
104 | 118 | ||
105 | #else | 119 | static void __init arc_cs_setup_gfrc(struct device_node *node) |
120 | { | ||
121 | int exists = cpuinfo_arc700[0].extn.gfrc; | ||
122 | int ret; | ||
123 | |||
124 | if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected")) | ||
125 | return; | ||
126 | |||
127 | ret = arc_get_timer_clk(node); | ||
128 | if (ret) | ||
129 | return; | ||
130 | |||
131 | clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); | ||
132 | } | ||
133 | CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); | ||
134 | |||
135 | #endif | ||
106 | 136 | ||
107 | #ifdef CONFIG_ARC_HAS_RTC | 137 | #ifdef CONFIG_ARC_HAS_RTC |
108 | 138 | ||
@@ -110,15 +140,7 @@ static struct clocksource arc_counter = { | |||
110 | #define AUX_RTC_LOW 0x104 | 140 | #define AUX_RTC_LOW 0x104 |
111 | #define AUX_RTC_HIGH 0x105 | 141 | #define AUX_RTC_HIGH 0x105 |
112 | 142 | ||
113 | int arc_counter_setup(void) | 143 | static cycle_t arc_read_rtc(struct clocksource *cs) |
114 | { | ||
115 | write_aux_reg(AUX_RTC_CTRL, 1); | ||
116 | |||
117 | /* Not usable in SMP */ | ||
118 | return !IS_ENABLED(CONFIG_SMP); | ||
119 | } | ||
120 | |||
121 | static cycle_t arc_counter_read(struct clocksource *cs) | ||
122 | { | 144 | { |
123 | unsigned long status; | 145 | unsigned long status; |
124 | union { | 146 | union { |
@@ -142,47 +164,78 @@ static cycle_t arc_counter_read(struct clocksource *cs) | |||
142 | return stamp.full; | 164 | return stamp.full; |
143 | } | 165 | } |
144 | 166 | ||
145 | static struct clocksource arc_counter = { | 167 | static struct clocksource arc_counter_rtc = { |
146 | .name = "ARCv2 RTC", | 168 | .name = "ARCv2 RTC", |
147 | .rating = 350, | 169 | .rating = 350, |
148 | .read = arc_counter_read, | 170 | .read = arc_read_rtc, |
149 | .mask = CLOCKSOURCE_MASK(64), | 171 | .mask = CLOCKSOURCE_MASK(64), |
150 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 172 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
151 | }; | 173 | }; |
152 | 174 | ||
153 | #else /* !CONFIG_ARC_HAS_RTC */ | 175 | static void __init arc_cs_setup_rtc(struct device_node *node) |
154 | |||
155 | /* | ||
156 | * set 32bit TIMER1 to keep counting monotonically and wraparound | ||
157 | */ | ||
158 | int arc_counter_setup(void) | ||
159 | { | 176 | { |
160 | write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); | 177 | int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc; |
161 | write_aux_reg(ARC_REG_TIMER1_CNT, 0); | 178 | int ret; |
162 | write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); | 179 | |
180 | if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected")) | ||
181 | return; | ||
182 | |||
183 | /* Local to CPU hence not usable in SMP */ | ||
184 | if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP")) | ||
185 | return; | ||
186 | |||
187 | ret = arc_get_timer_clk(node); | ||
188 | if (ret) | ||
189 | return; | ||
190 | |||
191 | write_aux_reg(AUX_RTC_CTRL, 1); | ||
163 | 192 | ||
164 | /* Not usable in SMP */ | 193 | clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); |
165 | return !IS_ENABLED(CONFIG_SMP); | ||
166 | } | 194 | } |
195 | CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); | ||
167 | 196 | ||
168 | static cycle_t arc_counter_read(struct clocksource *cs) | 197 | #endif |
198 | |||
199 | /* | ||
200 | * 32bit TIMER1 to keep counting monotonically and wraparound | ||
201 | */ | ||
202 | |||
203 | static cycle_t arc_read_timer1(struct clocksource *cs) | ||
169 | { | 204 | { |
170 | return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT); | 205 | return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT); |
171 | } | 206 | } |
172 | 207 | ||
173 | static struct clocksource arc_counter = { | 208 | static struct clocksource arc_counter_timer1 = { |
174 | .name = "ARC Timer1", | 209 | .name = "ARC Timer1", |
175 | .rating = 300, | 210 | .rating = 300, |
176 | .read = arc_counter_read, | 211 | .read = arc_read_timer1, |
177 | .mask = CLOCKSOURCE_MASK(32), | 212 | .mask = CLOCKSOURCE_MASK(32), |
178 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 213 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
179 | }; | 214 | }; |
180 | 215 | ||
181 | #endif | 216 | static void __init arc_cs_setup_timer1(struct device_node *node) |
182 | #endif | 217 | { |
218 | int ret; | ||
219 | |||
220 | /* Local to CPU hence not usable in SMP */ | ||
221 | if (IS_ENABLED(CONFIG_SMP)) | ||
222 | return; | ||
223 | |||
224 | ret = arc_get_timer_clk(node); | ||
225 | if (ret) | ||
226 | return; | ||
227 | |||
228 | write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); | ||
229 | write_aux_reg(ARC_REG_TIMER1_CNT, 0); | ||
230 | write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); | ||
231 | |||
232 | clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); | ||
233 | } | ||
183 | 234 | ||
184 | /********** Clock Event Device *********/ | 235 | /********** Clock Event Device *********/ |
185 | 236 | ||
237 | static int arc_timer_irq; | ||
238 | |||
186 | /* | 239 | /* |
187 | * Arm the timer to interrupt after @cycles | 240 | * Arm the timer to interrupt after @cycles |
188 | * The distinction for oneshot/periodic is done in arc_event_timer_ack() below | 241 | * The distinction for oneshot/periodic is done in arc_event_timer_ack() below |
@@ -209,7 +262,7 @@ static int arc_clkevent_set_periodic(struct clock_event_device *dev) | |||
209 | * At X Hz, 1 sec = 1000ms -> X cycles; | 262 | * At X Hz, 1 sec = 1000ms -> X cycles; |
210 | * 10ms -> X / 100 cycles | 263 | * 10ms -> X / 100 cycles |
211 | */ | 264 | */ |
212 | arc_timer_event_setup(arc_get_core_freq() / HZ); | 265 | arc_timer_event_setup(arc_timer_freq / HZ); |
213 | return 0; | 266 | return 0; |
214 | } | 267 | } |
215 | 268 | ||
@@ -218,7 +271,6 @@ static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { | |||
218 | .features = CLOCK_EVT_FEAT_ONESHOT | | 271 | .features = CLOCK_EVT_FEAT_ONESHOT | |
219 | CLOCK_EVT_FEAT_PERIODIC, | 272 | CLOCK_EVT_FEAT_PERIODIC, |
220 | .rating = 300, | 273 | .rating = 300, |
221 | .irq = TIMER0_IRQ, /* hardwired, no need for resources */ | ||
222 | .set_next_event = arc_clkevent_set_next_event, | 274 | .set_next_event = arc_clkevent_set_next_event, |
223 | .set_state_periodic = arc_clkevent_set_periodic, | 275 | .set_state_periodic = arc_clkevent_set_periodic, |
224 | }; | 276 | }; |
@@ -244,45 +296,81 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) | |||
244 | return IRQ_HANDLED; | 296 | return IRQ_HANDLED; |
245 | } | 297 | } |
246 | 298 | ||
299 | static int arc_timer_cpu_notify(struct notifier_block *self, | ||
300 | unsigned long action, void *hcpu) | ||
301 | { | ||
302 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); | ||
303 | |||
304 | evt->cpumask = cpumask_of(smp_processor_id()); | ||
305 | |||
306 | switch (action & ~CPU_TASKS_FROZEN) { | ||
307 | case CPU_STARTING: | ||
308 | clockevents_config_and_register(evt, arc_timer_freq, | ||
309 | 0, ULONG_MAX); | ||
310 | enable_percpu_irq(arc_timer_irq, 0); | ||
311 | break; | ||
312 | case CPU_DYING: | ||
313 | disable_percpu_irq(arc_timer_irq); | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | return NOTIFY_OK; | ||
318 | } | ||
319 | |||
320 | static struct notifier_block arc_timer_cpu_nb = { | ||
321 | .notifier_call = arc_timer_cpu_notify, | ||
322 | }; | ||
323 | |||
247 | /* | 324 | /* |
248 | * Setup the local event timer for @cpu | 325 | * clockevent setup for boot CPU |
249 | */ | 326 | */ |
250 | void arc_local_timer_setup() | 327 | static void __init arc_clockevent_setup(struct device_node *node) |
251 | { | 328 | { |
252 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); | 329 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); |
253 | int cpu = smp_processor_id(); | 330 | int ret; |
331 | |||
332 | register_cpu_notifier(&arc_timer_cpu_nb); | ||
254 | 333 | ||
255 | evt->cpumask = cpumask_of(cpu); | 334 | arc_timer_irq = irq_of_parse_and_map(node, 0); |
256 | clockevents_config_and_register(evt, arc_get_core_freq(), | 335 | if (arc_timer_irq <= 0) |
336 | panic("clockevent: missing irq"); | ||
337 | |||
338 | ret = arc_get_timer_clk(node); | ||
339 | if (ret) | ||
340 | panic("clockevent: missing clk"); | ||
341 | |||
342 | evt->irq = arc_timer_irq; | ||
343 | evt->cpumask = cpumask_of(smp_processor_id()); | ||
344 | clockevents_config_and_register(evt, arc_timer_freq, | ||
257 | 0, ARC_TIMER_MAX); | 345 | 0, ARC_TIMER_MAX); |
258 | 346 | ||
259 | /* setup the per-cpu timer IRQ handler - for all cpus */ | 347 | /* Needs apriori irq_set_percpu_devid() done in intc map function */ |
260 | arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler, | 348 | ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, |
261 | "Timer0 (per-cpu-tick)", evt); | 349 | "Timer0 (per-cpu-tick)", evt); |
350 | if (ret) | ||
351 | panic("clockevent: unable to request irq\n"); | ||
352 | |||
353 | enable_percpu_irq(arc_timer_irq, 0); | ||
262 | } | 354 | } |
263 | 355 | ||
356 | static void __init arc_of_timer_init(struct device_node *np) | ||
357 | { | ||
358 | static int init_count = 0; | ||
359 | |||
360 | if (!init_count) { | ||
361 | init_count = 1; | ||
362 | arc_clockevent_setup(np); | ||
363 | } else { | ||
364 | arc_cs_setup_timer1(np); | ||
365 | } | ||
366 | } | ||
367 | CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init); | ||
368 | |||
264 | /* | 369 | /* |
265 | * Called from start_kernel() - boot CPU only | 370 | * Called from start_kernel() - boot CPU only |
266 | * | ||
267 | * -Sets up h/w timers as applicable on boot cpu | ||
268 | * -Also sets up any global state needed for timer subsystem: | ||
269 | * - for "counting" timer, registers a clocksource, usable across CPUs | ||
270 | * (provided that underlying counter h/w is synchronized across cores) | ||
271 | * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic) | ||
272 | */ | 371 | */ |
273 | void __init time_init(void) | 372 | void __init time_init(void) |
274 | { | 373 | { |
275 | /* | 374 | of_clk_init(NULL); |
276 | * sets up the timekeeping free-flowing counter which also returns | 375 | clocksource_probe(); |
277 | * whether the counter is usable as clocksource | ||
278 | */ | ||
279 | if (arc_counter_setup()) | ||
280 | /* | ||
281 | * CLK upto 4.29 GHz can be safely represented in 32 bits | ||
282 | * because Max 32 bit number is 4,294,967,295 | ||
283 | */ | ||
284 | clocksource_register_hz(&arc_counter, arc_get_core_freq()); | ||
285 | |||
286 | /* sets up the periodic event timer */ | ||
287 | arc_local_timer_setup(); | ||
288 | } | 376 | } |