aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-09 12:33:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-09 12:33:18 -0400
commite853ccf08b9ac32ce731600de9618c1a462e8b70 (patch)
tree9abe6729a914aee5621297c49066c58c854c809e
parentd7b1fd9140c266c956bf1b4a2c3329aff8da5323 (diff)
parent2a5e95d4181c3f177a41b7c141a816859478c4d7 (diff)
Merge tag 'arc-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC changes from Vineet Gupta: "Mostly cleanup/refactoring in core intc, cache flush, IPI send..." * tag 'arc-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: mm, arc: remove obsolete pagefault oom killer comment ARC: help gcc elide icache helper for !SMP ARC: move common ops for line/full cache into helpers ARC: cache boot reporting updates ARC: [intc] mask/unmask can be hidden again ARC: [plat-arcfpga] No need for init_irq hack ARC: [intc] don't mask all IRQ by default ARC: prune extra header includes from smp.c ARC: update some comments ARC: [SMP] unify cpu private IRQ requests (TIMER/IPI)
-rw-r--r--arch/arc/include/asm/arcregs.h2
-rw-r--r--arch/arc/include/asm/irq.h4
-rw-r--r--arch/arc/include/asm/irqflags.h18
-rw-r--r--arch/arc/kernel/irq.c53
-rw-r--r--arch/arc/kernel/smp.c23
-rw-r--r--arch/arc/kernel/time.c28
-rw-r--r--arch/arc/mm/cache_arc700.c168
-rw-r--r--arch/arc/mm/fault.c1
-rw-r--r--arch/arc/mm/tlbex.S4
-rw-r--r--arch/arc/plat-arcfpga/Makefile2
-rw-r--r--arch/arc/plat-arcfpga/include/plat/irq.h2
-rw-r--r--arch/arc/plat-arcfpga/irq.c25
-rw-r--r--arch/arc/plat-arcfpga/platform.c3
13 files changed, 159 insertions, 174 deletions
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 355cb470c2a4..372466b371bf 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -296,7 +296,7 @@ struct cpuinfo_arc_mmu {
296}; 296};
297 297
298struct cpuinfo_arc_cache { 298struct cpuinfo_arc_cache {
299 unsigned int sz, line_len, assoc, ver; 299 unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6;
300}; 300};
301 301
302struct cpuinfo_arc_ccm { 302struct cpuinfo_arc_ccm {
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index fb4efb648971..f38652fb2ed7 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -16,9 +16,13 @@
16#define TIMER0_IRQ 3 16#define TIMER0_IRQ 3
17#define TIMER1_IRQ 4 17#define TIMER1_IRQ 4
18 18
19#include <linux/interrupt.h>
19#include <asm-generic/irq.h> 20#include <asm-generic/irq.h>
20 21
21extern void arc_init_IRQ(void); 22extern void arc_init_IRQ(void);
22void arc_local_timer_setup(void); 23void arc_local_timer_setup(void);
24void arc_request_percpu_irq(int irq, int cpu,
25 irqreturn_t (*isr)(int irq, void *dev),
26 const char *irq_nm, void *percpu_dev);
23 27
24#endif 28#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index cb7efc29f16f..587df8236e8b 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -131,24 +131,6 @@ static inline int arch_irqs_disabled(void)
131 return arch_irqs_disabled_flags(arch_local_save_flags()); 131 return arch_irqs_disabled_flags(arch_local_save_flags());
132} 132}
133 133
134static inline void arch_mask_irq(unsigned int irq)
135{
136 unsigned int ienb;
137
138 ienb = read_aux_reg(AUX_IENABLE);
139 ienb &= ~(1 << irq);
140 write_aux_reg(AUX_IENABLE, ienb);
141}
142
143static inline void arch_unmask_irq(unsigned int irq)
144{
145 unsigned int ienb;
146
147 ienb = read_aux_reg(AUX_IENABLE);
148 ienb |= (1 << irq);
149 write_aux_reg(AUX_IENABLE, ienb);
150}
151
152#else 134#else
153 135
154#ifdef CONFIG_TRACE_IRQFLAGS 136#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 7d653c0d0773..620ec2fe32a9 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -19,21 +19,16 @@
19 19
20/* 20/*
21 * Early Hardware specific Interrupt setup 21 * Early Hardware specific Interrupt setup
22 * -Platform independent, needed for each CPU (not foldable into init_IRQ)
22 * -Called very early (start_kernel -> setup_arch -> setup_processor) 23 * -Called very early (start_kernel -> setup_arch -> setup_processor)
23 * -Platform Independent (must for any ARC700)
24 * -Needed for each CPU (hence not foldable into init_IRQ)
25 * 24 *
26 * what it does ? 25 * what it does ?
27 * -Disable all IRQs (on CPU side)
28 * -Optionally, setup the High priority Interrupts as Level 2 IRQs 26 * -Optionally, setup the High priority Interrupts as Level 2 IRQs
29 */ 27 */
30void arc_init_IRQ(void) 28void arc_init_IRQ(void)
31{ 29{
32 int level_mask = 0; 30 int level_mask = 0;
33 31
34 /* Disable all IRQs: enable them as devices request */
35 write_aux_reg(AUX_IENABLE, 0);
36
37 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ 32 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
38 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; 33 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
39 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; 34 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
@@ -60,20 +55,28 @@ void arc_init_IRQ(void)
60 * below, per IRQ. 55 * below, per IRQ.
61 */ 56 */
62 57
63static void arc_mask_irq(struct irq_data *data) 58static void arc_irq_mask(struct irq_data *data)
64{ 59{
65 arch_mask_irq(data->irq); 60 unsigned int ienb;
61
62 ienb = read_aux_reg(AUX_IENABLE);
63 ienb &= ~(1 << data->irq);
64 write_aux_reg(AUX_IENABLE, ienb);
66} 65}
67 66
68static void arc_unmask_irq(struct irq_data *data) 67static void arc_irq_unmask(struct irq_data *data)
69{ 68{
70 arch_unmask_irq(data->irq); 69 unsigned int ienb;
70
71 ienb = read_aux_reg(AUX_IENABLE);
72 ienb |= (1 << data->irq);
73 write_aux_reg(AUX_IENABLE, ienb);
71} 74}
72 75
73static struct irq_chip onchip_intc = { 76static struct irq_chip onchip_intc = {
74 .name = "ARC In-core Intc", 77 .name = "ARC In-core Intc",
75 .irq_mask = arc_mask_irq, 78 .irq_mask = arc_irq_mask,
76 .irq_unmask = arc_unmask_irq, 79 .irq_unmask = arc_irq_unmask,
77}; 80};
78 81
79static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, 82static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
@@ -150,6 +153,32 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
150 set_irq_regs(old_regs); 153 set_irq_regs(old_regs);
151} 154}
152 155
156void arc_request_percpu_irq(int irq, int cpu,
157 irqreturn_t (*isr)(int irq, void *dev),
158 const char *irq_nm,
159 void *percpu_dev)
160{
161 /* Boot cpu calls request, all call enable */
162 if (!cpu) {
163 int rc;
164
165 /*
166 * These 2 calls are essential to making percpu IRQ APIs work
167 * Ideally these details could be hidden in irq chip map function
168 * but the issue is IPIs IRQs being static (non-DT) and platform
169 * specific, so we can't identify them there.
170 */
171 irq_set_percpu_devid(irq);
172 irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */
173
174 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
175 if (rc)
176 panic("Percpu IRQ request failed for %d\n", irq);
177 }
178
179 enable_percpu_irq(irq, 0);
180}
181
153/* 182/*
154 * arch_local_irq_enable - Enable interrupts. 183 * arch_local_irq_enable - Enable interrupts.
155 * 184 *
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index c802bb500602..dcd317c47d09 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -12,23 +12,15 @@
12 * -- Initial Write (Borrowed heavily from ARM) 12 * -- Initial Write (Borrowed heavily from ARM)
13 */ 13 */
14 14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/spinlock.h> 15#include <linux/spinlock.h>
18#include <linux/sched.h> 16#include <linux/sched.h>
19#include <linux/interrupt.h> 17#include <linux/interrupt.h>
20#include <linux/profile.h> 18#include <linux/profile.h>
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/mm.h> 19#include <linux/mm.h>
24#include <linux/cpu.h> 20#include <linux/cpu.h>
25#include <linux/smp.h>
26#include <linux/irq.h> 21#include <linux/irq.h>
27#include <linux/delay.h>
28#include <linux/atomic.h> 22#include <linux/atomic.h>
29#include <linux/percpu.h>
30#include <linux/cpumask.h> 23#include <linux/cpumask.h>
31#include <linux/spinlock_types.h>
32#include <linux/reboot.h> 24#include <linux/reboot.h>
33#include <asm/processor.h> 25#include <asm/processor.h>
34#include <asm/setup.h> 26#include <asm/setup.h>
@@ -136,7 +128,7 @@ void start_kernel_secondary(void)
136 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 128 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
137 129
138 if (machine_desc->init_smp) 130 if (machine_desc->init_smp)
139 machine_desc->init_smp(smp_processor_id()); 131 machine_desc->init_smp(cpu);
140 132
141 arc_local_timer_setup(); 133 arc_local_timer_setup();
142 134
@@ -338,18 +330,11 @@ irqreturn_t do_IPI(int irq, void *dev_id)
338 */ 330 */
339static DEFINE_PER_CPU(int, ipi_dev); 331static DEFINE_PER_CPU(int, ipi_dev);
340 332
341static struct irqaction arc_ipi_irq = {
342 .name = "IPI Interrupt",
343 .flags = IRQF_PERCPU,
344 .handler = do_IPI,
345};
346
347int smp_ipi_irq_setup(int cpu, int irq) 333int smp_ipi_irq_setup(int cpu, int irq)
348{ 334{
349 if (!cpu) 335 int *dev = per_cpu_ptr(&ipi_dev, cpu);
350 return setup_irq(irq, &arc_ipi_irq); 336
351 else 337 arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev);
352 arch_unmask_irq(irq);
353 338
354 return 0; 339 return 0;
355} 340}
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 36c2aa99436f..dbe74f418019 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -144,12 +144,12 @@ static struct clocksource arc_counter = {
144/********** Clock Event Device *********/ 144/********** Clock Event Device *********/
145 145
146/* 146/*
147 * Arm the timer to interrupt after @limit cycles 147 * Arm the timer to interrupt after @cycles
148 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below 148 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
149 */ 149 */
150static void arc_timer_event_setup(unsigned int limit) 150static void arc_timer_event_setup(unsigned int cycles)
151{ 151{
152 write_aux_reg(ARC_REG_TIMER0_LIMIT, limit); 152 write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
153 write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ 153 write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
154 154
155 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); 155 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
@@ -168,6 +168,10 @@ static void arc_clkevent_set_mode(enum clock_event_mode mode,
168{ 168{
169 switch (mode) { 169 switch (mode) {
170 case CLOCK_EVT_MODE_PERIODIC: 170 case CLOCK_EVT_MODE_PERIODIC:
171 /*
172 * At X Hz, 1 sec = 1000ms -> X cycles;
173 * 10ms -> X / 100 cycles
174 */
171 arc_timer_event_setup(arc_get_core_freq() / HZ); 175 arc_timer_event_setup(arc_get_core_freq() / HZ);
172 break; 176 break;
173 case CLOCK_EVT_MODE_ONESHOT: 177 case CLOCK_EVT_MODE_ONESHOT:
@@ -210,12 +214,6 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
210 return IRQ_HANDLED; 214 return IRQ_HANDLED;
211} 215}
212 216
213static struct irqaction arc_timer_irq = {
214 .name = "Timer0 (clock-evt-dev)",
215 .flags = IRQF_TIMER | IRQF_PERCPU,
216 .handler = timer_irq_handler,
217};
218
219/* 217/*
220 * Setup the local event timer for @cpu 218 * Setup the local event timer for @cpu
221 */ 219 */
@@ -228,15 +226,9 @@ void arc_local_timer_setup()
228 clockevents_config_and_register(evt, arc_get_core_freq(), 226 clockevents_config_and_register(evt, arc_get_core_freq(),
229 0, ARC_TIMER_MAX); 227 0, ARC_TIMER_MAX);
230 228
231 /* 229 /* setup the per-cpu timer IRQ handler - for all cpus */
232 * setup the per-cpu timer IRQ handler - for all cpus 230 arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler,
233 * For non boot CPU explicitly unmask at intc 231 "Timer0 (per-cpu-tick)", evt);
234 * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
235 */
236 if (!cpu)
237 setup_irq(TIMER0_IRQ, &arc_timer_irq);
238 else
239 arch_unmask_irq(TIMER0_IRQ);
240} 232}
241 233
242/* 234/*
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 353b202c37c9..4670afc3b971 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -77,21 +77,19 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
77{ 77{
78 int n = 0; 78 int n = 0;
79 79
80#define PR_CACHE(p, enb, str) \ 80#define PR_CACHE(p, cfg, str) \
81{ \
82 if (!(p)->ver) \ 81 if (!(p)->ver) \
83 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ 82 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
84 else \ 83 else \
85 n += scnprintf(buf + n, len - n, \ 84 n += scnprintf(buf + n, len - n, \
86 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \ 85 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
87 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \ 86 (p)->sz_k, (p)->assoc, (p)->line_len, \
88 enb ? "" : "DISABLED (kernel-build)"); \ 87 (p)->vipt ? "VIPT" : "PIPT", \
89} 88 (p)->alias ? " aliasing" : "", \
89 IS_ENABLED(cfg) ? "" : " (not used)");
90 90
91 PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE), 91 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
92 "I-Cache"); 92 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
93 PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE),
94 "D-Cache");
95 93
96 return buf; 94 return buf;
97} 95}
@@ -116,20 +114,31 @@ void read_decode_cache_bcr(void)
116 p_ic = &cpuinfo_arc700[cpu].icache; 114 p_ic = &cpuinfo_arc700[cpu].icache;
117 READ_BCR(ARC_REG_IC_BCR, ibcr); 115 READ_BCR(ARC_REG_IC_BCR, ibcr);
118 116
117 if (!ibcr.ver)
118 goto dc_chk;
119
119 BUG_ON(ibcr.config != 3); 120 BUG_ON(ibcr.config != 3);
120 p_ic->assoc = 2; /* Fixed to 2w set assoc */ 121 p_ic->assoc = 2; /* Fixed to 2w set assoc */
121 p_ic->line_len = 8 << ibcr.line_len; 122 p_ic->line_len = 8 << ibcr.line_len;
122 p_ic->sz = 0x200 << ibcr.sz; 123 p_ic->sz_k = 1 << (ibcr.sz - 1);
123 p_ic->ver = ibcr.ver; 124 p_ic->ver = ibcr.ver;
125 p_ic->vipt = 1;
126 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
124 127
128dc_chk:
125 p_dc = &cpuinfo_arc700[cpu].dcache; 129 p_dc = &cpuinfo_arc700[cpu].dcache;
126 READ_BCR(ARC_REG_DC_BCR, dbcr); 130 READ_BCR(ARC_REG_DC_BCR, dbcr);
127 131
132 if (!dbcr.ver)
133 return;
134
128 BUG_ON(dbcr.config != 2); 135 BUG_ON(dbcr.config != 2);
129 p_dc->assoc = 4; /* Fixed to 4w set assoc */ 136 p_dc->assoc = 4; /* Fixed to 4w set assoc */
130 p_dc->line_len = 16 << dbcr.line_len; 137 p_dc->line_len = 16 << dbcr.line_len;
131 p_dc->sz = 0x200 << dbcr.sz; 138 p_dc->sz_k = 1 << (dbcr.sz - 1);
132 p_dc->ver = dbcr.ver; 139 p_dc->ver = dbcr.ver;
140 p_dc->vipt = 1;
141 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
133} 142}
134 143
135/* 144/*
@@ -142,14 +151,16 @@ void read_decode_cache_bcr(void)
142void arc_cache_init(void) 151void arc_cache_init(void)
143{ 152{
144 unsigned int __maybe_unused cpu = smp_processor_id(); 153 unsigned int __maybe_unused cpu = smp_processor_id();
145 struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc;
146 char str[256]; 154 char str[256];
147 155
148 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 156 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
149 157
150#ifdef CONFIG_ARC_HAS_ICACHE 158 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
151 ic = &cpuinfo_arc700[cpu].icache; 159 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
152 if (ic->ver) { 160
161 if (!ic->ver)
162 panic("cache support enabled but non-existent cache\n");
163
153 if (ic->line_len != L1_CACHE_BYTES) 164 if (ic->line_len != L1_CACHE_BYTES)
154 panic("ICache line [%d] != kernel Config [%d]", 165 panic("ICache line [%d] != kernel Config [%d]",
155 ic->line_len, L1_CACHE_BYTES); 166 ic->line_len, L1_CACHE_BYTES);
@@ -158,26 +169,26 @@ void arc_cache_init(void)
158 panic("Cache ver [%d] doesn't match MMU ver [%d]\n", 169 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
159 ic->ver, CONFIG_ARC_MMU_VER); 170 ic->ver, CONFIG_ARC_MMU_VER);
160 } 171 }
161#endif
162 172
163#ifdef CONFIG_ARC_HAS_DCACHE 173 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
164 dc = &cpuinfo_arc700[cpu].dcache; 174 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
165 if (dc->ver) { 175 int handled;
166 unsigned int dcache_does_alias; 176
177 if (!dc->ver)
178 panic("cache support enabled but non-existent cache\n");
167 179
168 if (dc->line_len != L1_CACHE_BYTES) 180 if (dc->line_len != L1_CACHE_BYTES)
169 panic("DCache line [%d] != kernel Config [%d]", 181 panic("DCache line [%d] != kernel Config [%d]",
170 dc->line_len, L1_CACHE_BYTES); 182 dc->line_len, L1_CACHE_BYTES);
171 183
172 /* check for D-Cache aliasing */ 184 /* check for D-Cache aliasing */
173 dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE; 185 handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
174 186
175 if (dcache_does_alias && !cache_is_vipt_aliasing()) 187 if (dc->alias && !handled)
176 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 188 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
177 else if (!dcache_does_alias && cache_is_vipt_aliasing()) 189 else if (!dc->alias && handled)
178 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 190 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
179 } 191 }
180#endif
181} 192}
182 193
183#define OP_INV 0x1 194#define OP_INV 0x1
@@ -255,10 +266,32 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
255 * Machine specific helpers for Entire D-Cache or Per Line ops 266 * Machine specific helpers for Entire D-Cache or Per Line ops
256 */ 267 */
257 268
258static inline void wait_for_flush(void) 269static unsigned int __before_dc_op(const int op)
270{
271 unsigned int reg = reg;
272
273 if (op == OP_FLUSH_N_INV) {
274 /* Dcache provides 2 cmd: FLUSH or INV
275 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
276 * flush-n-inv is achieved by INV cmd but with IM=1
277 * So toggle INV sub-mode depending on op request and default
278 */
279 reg = read_aux_reg(ARC_REG_DC_CTRL);
280 write_aux_reg(ARC_REG_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH)
281 ;
282 }
283
284 return reg;
285}
286
287static void __after_dc_op(const int op, unsigned int reg)
259{ 288{
260 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS) 289 if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
261 ; 290 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
291
292 /* Switch back to default Invalidate mode */
293 if (op == OP_FLUSH_N_INV)
294 write_aux_reg(ARC_REG_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
262} 295}
263 296
264/* 297/*
@@ -269,18 +302,10 @@ static inline void wait_for_flush(void)
269 */ 302 */
270static inline void __dc_entire_op(const int cacheop) 303static inline void __dc_entire_op(const int cacheop)
271{ 304{
272 unsigned int tmp = tmp; 305 unsigned int ctrl_reg;
273 int aux; 306 int aux;
274 307
275 if (cacheop == OP_FLUSH_N_INV) { 308 ctrl_reg = __before_dc_op(cacheop);
276 /* Dcache provides 2 cmd: FLUSH or INV
277 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
278 * flush-n-inv is achieved by INV cmd but with IM=1
279 * Default INV sub-mode is DISCARD, which needs to be toggled
280 */
281 tmp = read_aux_reg(ARC_REG_DC_CTRL);
282 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
283 }
284 309
285 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 310 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
286 aux = ARC_REG_DC_IVDC; 311 aux = ARC_REG_DC_IVDC;
@@ -289,12 +314,7 @@ static inline void __dc_entire_op(const int cacheop)
289 314
290 write_aux_reg(aux, 0x1); 315 write_aux_reg(aux, 0x1);
291 316
292 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ 317 __after_dc_op(cacheop, ctrl_reg);
293 wait_for_flush();
294
295 /* Switch back the DISCARD ONLY Invalidate mode */
296 if (cacheop == OP_FLUSH_N_INV)
297 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
298} 318}
299 319
300/* For kernel mappings cache operation: index is same as paddr */ 320/* For kernel mappings cache operation: index is same as paddr */
@@ -306,29 +326,16 @@ static inline void __dc_entire_op(const int cacheop)
306static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, 326static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
307 unsigned long sz, const int cacheop) 327 unsigned long sz, const int cacheop)
308{ 328{
309 unsigned long flags, tmp = tmp; 329 unsigned long flags;
330 unsigned int ctrl_reg;
310 331
311 local_irq_save(flags); 332 local_irq_save(flags);
312 333
313 if (cacheop == OP_FLUSH_N_INV) { 334 ctrl_reg = __before_dc_op(cacheop);
314 /*
315 * Dcache provides 2 cmd: FLUSH or INV
316 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
317 * flush-n-inv is achieved by INV cmd but with IM=1
318 * Default INV sub-mode is DISCARD, which needs to be toggled
319 */
320 tmp = read_aux_reg(ARC_REG_DC_CTRL);
321 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
322 }
323 335
324 __cache_line_loop(paddr, vaddr, sz, cacheop); 336 __cache_line_loop(paddr, vaddr, sz, cacheop);
325 337
326 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ 338 __after_dc_op(cacheop, ctrl_reg);
327 wait_for_flush();
328
329 /* Switch back the DISCARD ONLY Invalidate mode */
330 if (cacheop == OP_FLUSH_N_INV)
331 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
332 339
333 local_irq_restore(flags); 340 local_irq_restore(flags);
334} 341}
@@ -389,8 +396,16 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
389/*********************************************************** 396/***********************************************************
390 * Machine specific helper for per line I-Cache invalidate. 397 * Machine specific helper for per line I-Cache invalidate.
391 */ 398 */
392static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, 399
393 unsigned long sz) 400static inline void __ic_entire_inv(void)
401{
402 write_aux_reg(ARC_REG_IC_IVIC, 1);
403 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
404}
405
406static inline void
407__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
408 unsigned long sz)
394{ 409{
395 unsigned long flags; 410 unsigned long flags;
396 411
@@ -399,30 +414,39 @@ static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
399 local_irq_restore(flags); 414 local_irq_restore(flags);
400} 415}
401 416
402static inline void __ic_entire_inv(void) 417#ifndef CONFIG_SMP
403{ 418
404 write_aux_reg(ARC_REG_IC_IVIC, 1); 419#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
405 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
406}
407 420
408struct ic_line_inv_vaddr_ipi { 421#else
422
423struct ic_inv_args {
409 unsigned long paddr, vaddr; 424 unsigned long paddr, vaddr;
410 int sz; 425 int sz;
411}; 426};
412 427
413static void __ic_line_inv_vaddr_helper(void *info) 428static void __ic_line_inv_vaddr_helper(void *info)
414{ 429{
415 struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info; 430 struct ic_inv *ic_inv_args = (struct ic_inv_args *) info;
431
416 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); 432 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
417} 433}
418 434
419static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, 435static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
420 unsigned long sz) 436 unsigned long sz)
421{ 437{
422 struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz}; 438 struct ic_inv_args ic_inv = {
439 .paddr = paddr,
440 .vaddr = vaddr,
441 .sz = sz
442 };
443
423 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); 444 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
424} 445}
425#else 446
447#endif /* CONFIG_SMP */
448
449#else /* !CONFIG_ARC_HAS_ICACHE */
426 450
427#define __ic_entire_inv() 451#define __ic_entire_inv()
428#define __ic_line_inv_vaddr(pstart, vstart, sz) 452#define __ic_line_inv_vaddr(pstart, vstart, sz)
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 9c69552350c4..6f7e3a68803a 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -159,7 +159,6 @@ good_area:
159 return; 159 return;
160 } 160 }
161 161
162 /* TBD: switch to pagefault_out_of_memory() */
163 if (fault & VM_FAULT_OOM) 162 if (fault & VM_FAULT_OOM)
164 goto out_of_memory; 163 goto out_of_memory;
165 else if (fault & VM_FAULT_SIGBUS) 164 else if (fault & VM_FAULT_SIGBUS)
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 79bfc81358c9..d572f1c2c724 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -220,9 +220,9 @@ ex_saved_reg1:
220 220
221.macro CONV_PTE_TO_TLB 221.macro CONV_PTE_TO_TLB
222 and r3, r0, PTE_BITS_RWX ; r w x 222 and r3, r0, PTE_BITS_RWX ; r w x
223 lsl r2, r3, 3 ; r w x 0 0 0 223 lsl r2, r3, 3 ; r w x 0 0 0 (GLOBAL, kernel only)
224 and.f 0, r0, _PAGE_GLOBAL 224 and.f 0, r0, _PAGE_GLOBAL
225 or.z r2, r2, r3 ; r w x r w x 225 or.z r2, r2, r3 ; r w x r w x (!GLOBAL, user page)
226 226
227 and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE 227 and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
228 or r3, r3, r2 228 or r3, r3, r2
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile
index 4d1bddc34b5b..66fd0ecd68b3 100644
--- a/arch/arc/plat-arcfpga/Makefile
+++ b/arch/arc/plat-arcfpga/Makefile
@@ -8,5 +8,5 @@
8 8
9KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include 9KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include
10 10
11obj-y := platform.o irq.o 11obj-y := platform.o
12obj-$(CONFIG_ISS_SMP_EXTN) += smp.o 12obj-$(CONFIG_ISS_SMP_EXTN) += smp.o
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h
index 6adbc53c3a5b..2c9dea690ac4 100644
--- a/arch/arc/plat-arcfpga/include/plat/irq.h
+++ b/arch/arc/plat-arcfpga/include/plat/irq.h
@@ -24,6 +24,4 @@
24#define IDU_INTERRUPT_0 16 24#define IDU_INTERRUPT_0 16
25#endif 25#endif
26 26
27extern void __init plat_fpga_init_IRQ(void);
28
29#endif 27#endif
diff --git a/arch/arc/plat-arcfpga/irq.c b/arch/arc/plat-arcfpga/irq.c
deleted file mode 100644
index d2215fd889c2..000000000000
--- a/arch/arc/plat-arcfpga/irq.c
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * ARC FPGA Platform IRQ hookups
3 *
4 * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/interrupt.h>
12#include <plat/irq.h>
13
14void __init plat_fpga_init_IRQ(void)
15{
16 /*
17 * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the
18 * request_irq() comes from any other CPU, the low level IRQ unamsking
19 * essential for getting Interrupts won't be enabled on cpu0, locking
20 * up the UART state machine.
21 */
22#ifdef CONFIG_SMP
23 arch_unmask_irq(UART0_IRQ);
24#endif
25}
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
index b8d0d456627f..1038949a99a1 100644
--- a/arch/arc/plat-arcfpga/platform.c
+++ b/arch/arc/plat-arcfpga/platform.c
@@ -57,7 +57,6 @@ MACHINE_START(ANGEL4, "angel4")
57 .dt_compat = aa4_compat, 57 .dt_compat = aa4_compat,
58 .init_early = plat_fpga_early_init, 58 .init_early = plat_fpga_early_init,
59 .init_machine = plat_fpga_populate_dev, 59 .init_machine = plat_fpga_populate_dev,
60 .init_irq = plat_fpga_init_IRQ,
61#ifdef CONFIG_ISS_SMP_EXTN 60#ifdef CONFIG_ISS_SMP_EXTN
62 .init_smp = iss_model_init_smp, 61 .init_smp = iss_model_init_smp,
63#endif 62#endif
@@ -72,7 +71,6 @@ MACHINE_START(ML509, "ml509")
72 .dt_compat = ml509_compat, 71 .dt_compat = ml509_compat,
73 .init_early = plat_fpga_early_init, 72 .init_early = plat_fpga_early_init,
74 .init_machine = plat_fpga_populate_dev, 73 .init_machine = plat_fpga_populate_dev,
75 .init_irq = plat_fpga_init_IRQ,
76#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
77 .init_smp = iss_model_init_smp, 75 .init_smp = iss_model_init_smp,
78#endif 76#endif
@@ -87,5 +85,4 @@ MACHINE_START(NSIMOSCI, "nsimosci")
87 .dt_compat = nsimosci_compat, 85 .dt_compat = nsimosci_compat,
88 .init_early = NULL, 86 .init_early = NULL,
89 .init_machine = plat_fpga_populate_dev, 87 .init_machine = plat_fpga_populate_dev,
90 .init_irq = NULL,
91MACHINE_END 88MACHINE_END