aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/tlbex.S
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-07-10 02:10:27 -0400
committerVineet Gupta <vgupta@synopsys.com>2013-08-29 08:21:15 -0400
commit4b06ff35fb1dcafbcbdcbe9ce794ab0770f2a843 (patch)
tree711b71aa5658cecbba27dd18562b5ac4d1a7ecc0 /arch/arc/mm/tlbex.S
parentfce16bc35ae4a45634f3dc348d8d297a25c277cf (diff)
ARC: Code cosmetics (Nothing semantical)
* reduce editor lines taken by pt_regs * ARCompact ISA specific part of TLB Miss handlers clubbed together * cleanup some comments Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/mm/tlbex.S')
-rw-r--r--arch/arc/mm/tlbex.S131
1 files changed, 63 insertions, 68 deletions
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index fc34ebc103bc..9cce00e94b43 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -44,17 +44,36 @@
44#include <asm/arcregs.h> 44#include <asm/arcregs.h>
45#include <asm/cache.h> 45#include <asm/cache.h>
46#include <asm/processor.h> 46#include <asm/processor.h>
47#if (CONFIG_ARC_MMU_VER == 1)
48#include <asm/tlb-mmu1.h> 47#include <asm/tlb-mmu1.h>
49#endif
50 48
51;-------------------------------------------------------------------------- 49;-----------------------------------------------------------------
52; scratch memory to save the registers (r0-r3) used to code TLB refill Handler 50; ARC700 Exception Handling doesn't auto-switch stack and it only provides
53; For details refer to comments before TLBMISS_FREEUP_REGS below 51; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
52;
53; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
54; "global" is used to free-up FIRST core reg to be able to code the rest of
55; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
56; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
57; need to be saved as well by extending the "global" to be 4 words. Hence
58; ".size ex_saved_reg1, 16"
59; [All of this dance is to avoid stack switching for each TLB Miss, since we
60; only need to save only a handful of regs, as opposed to complete reg file]
61;
62; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
63; core reg as it will not be SMP safe.
64; Thus scratch AUX reg is used (and no longer used to cache task PGD).
65; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
66; Epilogue thus has to locate the "per-cpu" storage for regs.
67; To avoid cache line bouncing the per-cpu global is aligned/sized per
68; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
69; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
70
71; As simple as that....
54;-------------------------------------------------------------------------- 72;--------------------------------------------------------------------------
55 73
74; scratch memory to save [r0-r3] used to code TLB refill Handler
56ARCFP_DATA ex_saved_reg1 75ARCFP_DATA ex_saved_reg1
57 .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned 76 .align 1 << L1_CACHE_SHIFT
58 .type ex_saved_reg1, @object 77 .type ex_saved_reg1, @object
59#ifdef CONFIG_SMP 78#ifdef CONFIG_SMP
60 .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 79 .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
@@ -66,6 +85,44 @@ ex_saved_reg1:
66 .zero 16 85 .zero 16
67#endif 86#endif
68 87
88.macro TLBMISS_FREEUP_REGS
89#ifdef CONFIG_SMP
90 sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
91 GET_CPU_ID r0 ; get to per cpu scratch mem,
92 lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
93 add r0, @ex_saved_reg1, r0
94#else
95 st r0, [@ex_saved_reg1]
96 mov_s r0, @ex_saved_reg1
97#endif
98 st_s r1, [r0, 4]
99 st_s r2, [r0, 8]
100 st_s r3, [r0, 12]
101
102 ; VERIFY if the ASID in MMU-PID Reg is same as
103 ; one in Linux data structures
104
105 DBG_ASID_MISMATCH
106.endm
107
108.macro TLBMISS_RESTORE_REGS
109#ifdef CONFIG_SMP
110 GET_CPU_ID r0 ; get to per cpu scratch mem
111 lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
112 add r0, @ex_saved_reg1, r0
113 ld_s r3, [r0,12]
114 ld_s r2, [r0, 8]
115 ld_s r1, [r0, 4]
116 lr r0, [ARC_REG_SCRATCH_DATA0]
117#else
118 mov_s r0, @ex_saved_reg1
119 ld_s r3, [r0,12]
120 ld_s r2, [r0, 8]
121 ld_s r1, [r0, 4]
122 ld_s r0, [r0]
123#endif
124.endm
125
69;============================================================================ 126;============================================================================
70; Troubleshooting Stuff 127; Troubleshooting Stuff
71;============================================================================ 128;============================================================================
@@ -191,68 +248,6 @@ ex_saved_reg1:
191#endif 248#endif
192.endm 249.endm
193 250
194;-----------------------------------------------------------------
195; ARC700 Exception Handling doesn't auto-switch stack and it only provides
196; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
197;
198; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
199; "global" is used to free-up FIRST core reg to be able to code the rest of
200; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
201; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
202; need to be saved as well by extending the "global" to be 4 words. Hence
203; ".size ex_saved_reg1, 16"
204; [All of this dance is to avoid stack switching for each TLB Miss, since we
205; only need to save only a handful of regs, as opposed to complete reg file]
206;
207; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
208; core reg as it will not be SMP safe.
209; Thus scratch AUX reg is used (and no longer used to cache task PGD).
210; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
211; Epilogue thus has to locate the "per-cpu" storage for regs.
212; To avoid cache line bouncing the per-cpu global is aligned/sized per
213; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
214; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
215
216; As simple as that....
217
218.macro TLBMISS_FREEUP_REGS
219#ifdef CONFIG_SMP
220 sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
221 GET_CPU_ID r0 ; get to per cpu scratch mem,
222 lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
223 add r0, @ex_saved_reg1, r0
224#else
225 st r0, [@ex_saved_reg1]
226 mov_s r0, @ex_saved_reg1
227#endif
228 st_s r1, [r0, 4]
229 st_s r2, [r0, 8]
230 st_s r3, [r0, 12]
231
232 ; VERIFY if the ASID in MMU-PID Reg is same as
233 ; one in Linux data structures
234
235 DBG_ASID_MISMATCH
236.endm
237
238;-----------------------------------------------------------------
239.macro TLBMISS_RESTORE_REGS
240#ifdef CONFIG_SMP
241 GET_CPU_ID r0 ; get to per cpu scratch mem
242 lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
243 add r0, @ex_saved_reg1, r0
244 ld_s r3, [r0,12]
245 ld_s r2, [r0, 8]
246 ld_s r1, [r0, 4]
247 lr r0, [ARC_REG_SCRATCH_DATA0]
248#else
249 mov_s r0, @ex_saved_reg1
250 ld_s r3, [r0,12]
251 ld_s r2, [r0, 8]
252 ld_s r1, [r0, 4]
253 ld_s r0, [r0]
254#endif
255.endm
256 251
257ARCFP_CODE ;Fast Path Code, candidate for ICCM 252ARCFP_CODE ;Fast Path Code, candidate for ICCM
258 253