aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-07-10 02:10:27 -0400
committerVineet Gupta <vgupta@synopsys.com>2013-08-29 08:21:15 -0400
commit4b06ff35fb1dcafbcbdcbe9ce794ab0770f2a843 (patch)
tree711b71aa5658cecbba27dd18562b5ac4d1a7ecc0 /arch
parentfce16bc35ae4a45634f3dc348d8d297a25c277cf (diff)
ARC: Code cosmetics (Nothing semantical)
* reduce editor lines taken by pt_regs * ARCompact ISA specific part of TLB Miss handlers clubbed together * cleanup some comments Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/ptrace.h36
-rw-r--r--arch/arc/include/asm/spinlock_types.h6
-rw-r--r--arch/arc/mm/cache_arc700.c8
-rw-r--r--arch/arc/mm/tlbex.S131
4 files changed, 77 insertions, 104 deletions
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index c9938e7a7dbd..1bfeec2c0558 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -20,27 +20,17 @@ struct pt_regs {
20 20
21 /* Real registers */ 21 /* Real registers */
22 long bta; /* bta_l1, bta_l2, erbta */ 22 long bta; /* bta_l1, bta_l2, erbta */
23 long lp_start; 23
24 long lp_end; 24 long lp_start, lp_end, lp_count;
25 long lp_count; 25
26 long status32; /* status32_l1, status32_l2, erstatus */ 26 long status32; /* status32_l1, status32_l2, erstatus */
27 long ret; /* ilink1, ilink2 or eret */ 27 long ret; /* ilink1, ilink2 or eret */
28 long blink; 28 long blink;
29 long fp; 29 long fp;
30 long r26; /* gp */ 30 long r26; /* gp */
31 long r12; 31
32 long r11; 32 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
33 long r10; 33
34 long r9;
35 long r8;
36 long r7;
37 long r6;
38 long r5;
39 long r4;
40 long r3;
41 long r2;
42 long r1;
43 long r0;
44 long sp; /* user/kernel sp depending on where we came from */ 34 long sp; /* user/kernel sp depending on where we came from */
45 long orig_r0; 35 long orig_r0;
46 36
@@ -70,19 +60,7 @@ struct pt_regs {
70/* Callee saved registers - need to be saved only when you are scheduled out */ 60/* Callee saved registers - need to be saved only when you are scheduled out */
71 61
72struct callee_regs { 62struct callee_regs {
73 long r25; 63 long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
74 long r24;
75 long r23;
76 long r22;
77 long r21;
78 long r20;
79 long r19;
80 long r18;
81 long r17;
82 long r16;
83 long r15;
84 long r14;
85 long r13;
86}; 64};
87 65
88#define instruction_pointer(regs) ((regs)->ret) 66#define instruction_pointer(regs) ((regs)->ret)
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 8276bfd61704..662627ced4f2 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -20,9 +20,9 @@ typedef struct {
20#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ } 20#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
21 21
22/* 22/*
23 * Unlocked: 0x01_00_00_00 23 * Unlocked : 0x0100_0000
24 * Read lock(s): 0x00_FF_00_00 to say 0x01 24 * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
25 * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000 25 * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
26 */ 26 */
27typedef struct { 27typedef struct {
28 volatile unsigned int counter; 28 volatile unsigned int counter;
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index f415d851b765..5a1259cd948c 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -622,12 +622,12 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
622/* 622/*
623 * General purpose helper to make I and D cache lines consistent. 623 * General purpose helper to make I and D cache lines consistent.
624 * @paddr is phy addr of region 624 * @paddr is phy addr of region
625 * @vaddr is typically user or kernel vaddr (vmalloc) 625 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
626 * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in 626 * However in one instance, when called by kprobe (for a breakpt in
627 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will 627 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
628 * use a paddr to index the cache (despite VIPT). This is fine since since a 628 * use a paddr to index the cache (despite VIPT). This is fine since since a
629 * built-in kernel page will not have any virtual mappings (not even kernel) 629 * builtin kernel page will not have any virtual mappings.
630 * kprobe on loadable module is different as it will have kvaddr. 630 * kprobe on loadable module will be kernel vaddr.
631 */ 631 */
632void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) 632void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
633{ 633{
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index fc34ebc103bc..9cce00e94b43 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -44,17 +44,36 @@
44#include <asm/arcregs.h> 44#include <asm/arcregs.h>
45#include <asm/cache.h> 45#include <asm/cache.h>
46#include <asm/processor.h> 46#include <asm/processor.h>
47#if (CONFIG_ARC_MMU_VER == 1)
48#include <asm/tlb-mmu1.h> 47#include <asm/tlb-mmu1.h>
49#endif
50 48
51;-------------------------------------------------------------------------- 49;-----------------------------------------------------------------
52; scratch memory to save the registers (r0-r3) used to code TLB refill Handler 50; ARC700 Exception Handling doesn't auto-switch stack and it only provides
53; For details refer to comments before TLBMISS_FREEUP_REGS below 51; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
52;
53; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
54; "global" is used to free-up FIRST core reg to be able to code the rest of
55; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
56; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
57; need to be saved as well by extending the "global" to be 4 words. Hence
58; ".size ex_saved_reg1, 16"
59; [All of this dance is to avoid stack switching for each TLB Miss, since we
60; only need to save only a handful of regs, as opposed to complete reg file]
61;
62; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
63; core reg as it will not be SMP safe.
64; Thus scratch AUX reg is used (and no longer used to cache task PGD).
65; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
66; Epilogue thus has to locate the "per-cpu" storage for regs.
67; To avoid cache line bouncing the per-cpu global is aligned/sized per
68; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
69; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
70
71; As simple as that....
54;-------------------------------------------------------------------------- 72;--------------------------------------------------------------------------
55 73
74; scratch memory to save [r0-r3] used to code TLB refill Handler
56ARCFP_DATA ex_saved_reg1 75ARCFP_DATA ex_saved_reg1
57 .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned 76 .align 1 << L1_CACHE_SHIFT
58 .type ex_saved_reg1, @object 77 .type ex_saved_reg1, @object
59#ifdef CONFIG_SMP 78#ifdef CONFIG_SMP
60 .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 79 .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
@@ -66,6 +85,44 @@ ex_saved_reg1:
66 .zero 16 85 .zero 16
67#endif 86#endif
68 87
88.macro TLBMISS_FREEUP_REGS
89#ifdef CONFIG_SMP
90 sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
91 GET_CPU_ID r0 ; get to per cpu scratch mem,
92 lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
93 add r0, @ex_saved_reg1, r0
94#else
95 st r0, [@ex_saved_reg1]
96 mov_s r0, @ex_saved_reg1
97#endif
98 st_s r1, [r0, 4]
99 st_s r2, [r0, 8]
100 st_s r3, [r0, 12]
101
102 ; VERIFY if the ASID in MMU-PID Reg is same as
103 ; one in Linux data structures
104
105 DBG_ASID_MISMATCH
106.endm
107
108.macro TLBMISS_RESTORE_REGS
109#ifdef CONFIG_SMP
110 GET_CPU_ID r0 ; get to per cpu scratch mem
111 lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
112 add r0, @ex_saved_reg1, r0
113 ld_s r3, [r0,12]
114 ld_s r2, [r0, 8]
115 ld_s r1, [r0, 4]
116 lr r0, [ARC_REG_SCRATCH_DATA0]
117#else
118 mov_s r0, @ex_saved_reg1
119 ld_s r3, [r0,12]
120 ld_s r2, [r0, 8]
121 ld_s r1, [r0, 4]
122 ld_s r0, [r0]
123#endif
124.endm
125
69;============================================================================ 126;============================================================================
70; Troubleshooting Stuff 127; Troubleshooting Stuff
71;============================================================================ 128;============================================================================
@@ -191,68 +248,6 @@ ex_saved_reg1:
191#endif 248#endif
192.endm 249.endm
193 250
194;-----------------------------------------------------------------
195; ARC700 Exception Handling doesn't auto-switch stack and it only provides
196; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
197;
198; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
199; "global" is used to free-up FIRST core reg to be able to code the rest of
200; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
201; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
202; need to be saved as well by extending the "global" to be 4 words. Hence
203; ".size ex_saved_reg1, 16"
204; [All of this dance is to avoid stack switching for each TLB Miss, since we
205; only need to save only a handful of regs, as opposed to complete reg file]
206;
207; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
208; core reg as it will not be SMP safe.
209; Thus scratch AUX reg is used (and no longer used to cache task PGD).
210; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
211; Epilogue thus has to locate the "per-cpu" storage for regs.
212; To avoid cache line bouncing the per-cpu global is aligned/sized per
213; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
214; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
215
216; As simple as that....
217
218.macro TLBMISS_FREEUP_REGS
219#ifdef CONFIG_SMP
220 sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
221 GET_CPU_ID r0 ; get to per cpu scratch mem,
222 lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
223 add r0, @ex_saved_reg1, r0
224#else
225 st r0, [@ex_saved_reg1]
226 mov_s r0, @ex_saved_reg1
227#endif
228 st_s r1, [r0, 4]
229 st_s r2, [r0, 8]
230 st_s r3, [r0, 12]
231
232 ; VERIFY if the ASID in MMU-PID Reg is same as
233 ; one in Linux data structures
234
235 DBG_ASID_MISMATCH
236.endm
237
238;-----------------------------------------------------------------
239.macro TLBMISS_RESTORE_REGS
240#ifdef CONFIG_SMP
241 GET_CPU_ID r0 ; get to per cpu scratch mem
242 lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
243 add r0, @ex_saved_reg1, r0
244 ld_s r3, [r0,12]
245 ld_s r2, [r0, 8]
246 ld_s r1, [r0, 4]
247 lr r0, [ARC_REG_SCRATCH_DATA0]
248#else
249 mov_s r0, @ex_saved_reg1
250 ld_s r3, [r0,12]
251 ld_s r2, [r0, 8]
252 ld_s r1, [r0, 4]
253 ld_s r0, [r0]
254#endif
255.endm
256 251
257ARCFP_CODE ;Fast Path Code, candidate for ICCM 252ARCFP_CODE ;Fast Path Code, candidate for ICCM
258 253