diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-01-18 04:42:19 -0500 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-02-15 12:45:52 -0500 |
commit | cc562d2eae93bc2768a6575d31c089719e8939e8 (patch) | |
tree | a40edb29139b37593eab077ab2ad91b6c1c5a405 | |
parent | f1f3347da9440eedd2350f4f5d13d8860f570b92 (diff) |
ARC: MMU Exception Handling
* MMU I-TLB / D-TLB Miss Exceptions
- Fast Path TLB Refill Handler
- slowpath TLB creation via do_page_fault() -> update_mmu_cache()
* Duplicate PD Exception Handler
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r-- | arch/arc/include/asm/arcregs.h | 91 | ||||
-rw-r--r-- | arch/arc/include/asm/tlb-mmu1.h | 104 | ||||
-rw-r--r-- | arch/arc/include/asm/tlb.h | 41 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 267 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 351 |
5 files changed, 854 insertions, 0 deletions
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index c12eb9b4f449..1c24485fd04b 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | /* Build Configuration Registers */ | 14 | /* Build Configuration Registers */ |
15 | #define ARC_REG_VECBASE_BCR 0x68 | 15 | #define ARC_REG_VECBASE_BCR 0x68 |
16 | #define ARC_REG_MMU_BCR 0x6f | ||
16 | 17 | ||
17 | /* status32 Bits Positions */ | 18 | /* status32 Bits Positions */ |
18 | #define STATUS_H_BIT 0 /* CPU Halted */ | 19 | #define STATUS_H_BIT 0 /* CPU Halted */ |
@@ -36,6 +37,35 @@ | |||
36 | #define STATUS_U_MASK (1<<STATUS_U_BIT) | 37 | #define STATUS_U_MASK (1<<STATUS_U_BIT) |
37 | #define STATUS_L_MASK (1<<STATUS_L_BIT) | 38 | #define STATUS_L_MASK (1<<STATUS_L_BIT) |
38 | 39 | ||
40 | /* | ||
41 | * ECR: Exception Cause Reg bits-n-pieces | ||
42 | * [23:16] = Exception Vector | ||
43 | * [15: 8] = Exception Cause Code | ||
44 | * [ 7: 0] = Exception Parameters (for certain types only) | ||
45 | */ | ||
46 | #define ECR_VEC_MASK 0xff0000 | ||
47 | #define ECR_CODE_MASK 0x00ff00 | ||
48 | #define ECR_PARAM_MASK 0x0000ff | ||
49 | |||
50 | /* Exception Cause Vector Values */ | ||
51 | #define ECR_V_INSN_ERR 0x02 | ||
52 | #define ECR_V_MACH_CHK 0x20 | ||
53 | #define ECR_V_ITLB_MISS 0x21 | ||
54 | #define ECR_V_DTLB_MISS 0x22 | ||
55 | #define ECR_V_PROTV 0x23 | ||
56 | |||
57 | /* Protection Violation Exception Cause Code Values */ | ||
58 | #define ECR_C_PROTV_INST_FETCH 0x00 | ||
59 | #define ECR_C_PROTV_LOAD 0x01 | ||
60 | #define ECR_C_PROTV_STORE 0x02 | ||
61 | #define ECR_C_PROTV_XCHG 0x03 | ||
62 | #define ECR_C_PROTV_MISALIG_DATA 0x04 | ||
63 | |||
64 | /* DTLB Miss Exception Cause Code Values */ | ||
65 | #define ECR_C_BIT_DTLB_LD_MISS 8 | ||
66 | #define ECR_C_BIT_DTLB_ST_MISS 9 | ||
67 | |||
68 | |||
39 | /* Auxiliary registers */ | 69 | /* Auxiliary registers */ |
40 | #define AUX_IDENTITY 4 | 70 | #define AUX_IDENTITY 4 |
41 | #define AUX_INTR_VEC_BASE 0x25 | 71 | #define AUX_INTR_VEC_BASE 0x25 |
@@ -58,6 +88,44 @@ | |||
58 | #define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */ | 88 | #define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */ |
59 | #define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ | 89 | #define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ |
60 | 90 | ||
91 | #if defined(CONFIG_ARC_MMU_V1) | ||
92 | #define CONFIG_ARC_MMU_VER 1 | ||
93 | #elif defined(CONFIG_ARC_MMU_V2) | ||
94 | #define CONFIG_ARC_MMU_VER 2 | ||
95 | #elif defined(CONFIG_ARC_MMU_V3) | ||
96 | #define CONFIG_ARC_MMU_VER 3 | ||
97 | #else | ||
98 | #error "Error: MMU ver" | ||
99 | #endif | ||
100 | |||
101 | /* MMU Management regs */ | ||
102 | #define ARC_REG_TLBPD0 0x405 | ||
103 | #define ARC_REG_TLBPD1 0x406 | ||
104 | #define ARC_REG_TLBINDEX 0x407 | ||
105 | #define ARC_REG_TLBCOMMAND 0x408 | ||
106 | #define ARC_REG_PID 0x409 | ||
107 | #define ARC_REG_SCRATCH_DATA0 0x418 | ||
108 | |||
109 | /* Bits in MMU PID register */ | ||
110 | #define MMU_ENABLE (1 << 31) /* Enable MMU for process */ | ||
111 | |||
112 | /* Error code if probe fails */ | ||
113 | #define TLB_LKUP_ERR 0x80000000 | ||
114 | |||
115 | /* TLB Commands */ | ||
116 | #define TLBWrite 0x1 | ||
117 | #define TLBRead 0x2 | ||
118 | #define TLBGetIndex 0x3 | ||
119 | #define TLBProbe 0x4 | ||
120 | |||
121 | #if (CONFIG_ARC_MMU_VER >= 2) | ||
122 | #define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ | ||
123 | #define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ | ||
124 | #else | ||
125 | #undef TLBWriteNI /* These cmds don't exist on older MMU */ | ||
126 | #undef TLBIVUTLB | ||
127 | #endif | ||
128 | |||
61 | /* Instruction cache related Auxiliary registers */ | 129 | /* Instruction cache related Auxiliary registers */ |
62 | #define ARC_REG_IC_BCR 0x77 /* Build Config reg */ | 130 | #define ARC_REG_IC_BCR 0x77 /* Build Config reg */ |
63 | #define ARC_REG_IC_IVIC 0x10 | 131 | #define ARC_REG_IC_IVIC 0x10 |
@@ -205,6 +273,24 @@ struct arc_fpu { | |||
205 | * Build Configuration Registers, with encoded hardware config | 273 | * Build Configuration Registers, with encoded hardware config |
206 | */ | 274 | */ |
207 | 275 | ||
276 | struct bcr_mmu_1_2 { | ||
277 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
278 | unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; | ||
279 | #else | ||
280 | unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; | ||
281 | #endif | ||
282 | }; | ||
283 | |||
284 | struct bcr_mmu_3 { | ||
285 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
286 | unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, | ||
287 | u_itlb:4, u_dtlb:4; | ||
288 | #else | ||
289 | unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, | ||
290 | ways:4, ver:8; | ||
291 | #endif | ||
292 | }; | ||
293 | |||
208 | struct bcr_cache { | 294 | struct bcr_cache { |
209 | #ifdef CONFIG_CPU_BIG_ENDIAN | 295 | #ifdef CONFIG_CPU_BIG_ENDIAN |
210 | unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; | 296 | unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; |
@@ -218,12 +304,17 @@ struct bcr_cache { | |||
218 | * Generic structures to hold build configuration used at runtime | 304 | * Generic structures to hold build configuration used at runtime |
219 | */ | 305 | */ |
220 | 306 | ||
307 | struct cpuinfo_arc_mmu { | ||
308 | unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb; | ||
309 | }; | ||
310 | |||
221 | struct cpuinfo_arc_cache { | 311 | struct cpuinfo_arc_cache { |
222 | unsigned int has_aliasing, sz, line_len, assoc, ver; | 312 | unsigned int has_aliasing, sz, line_len, assoc, ver; |
223 | }; | 313 | }; |
224 | 314 | ||
225 | struct cpuinfo_arc { | 315 | struct cpuinfo_arc { |
226 | struct cpuinfo_arc_cache icache, dcache; | 316 | struct cpuinfo_arc_cache icache, dcache; |
317 | struct cpuinfo_arc_mmu mmu; | ||
227 | }; | 318 | }; |
228 | 319 | ||
229 | extern struct cpuinfo_arc cpuinfo_arc700[]; | 320 | extern struct cpuinfo_arc cpuinfo_arc700[]; |
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h new file mode 100644 index 000000000000..a5ff961b1efc --- /dev/null +++ b/arch/arc/include/asm/tlb-mmu1.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef __ASM_TLB_MMU_V1_H__ | ||
10 | #define __ASM_TLB_MMU_V1_H__ | ||
11 | |||
12 | #if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1) | ||
13 | |||
14 | #include <asm/tlb.h> | ||
15 | |||
16 | .macro TLB_WRITE_HEURISTICS | ||
17 | |||
18 | #define JH_HACK1 | ||
19 | #undef JH_HACK2 | ||
20 | #undef JH_HACK3 | ||
21 | |||
22 | #ifdef JH_HACK3 | ||
23 | ; Calculate set index for 2-way MMU | ||
24 | ; -avoiding use of GetIndex from MMU | ||
25 | ; and its unpleasant LFSR pseudo-random sequence | ||
26 | ; | ||
27 | ; r1 = TLBPD0 from TLB_RELOAD above | ||
28 | ; | ||
29 | ; -- jh_ex_way_set not cleared on startup | ||
30 | ; didn't want to change setup.c | ||
31 | ; hence extra instruction to clean | ||
32 | ; | ||
33 | ; -- should be in cache since in same line | ||
34 | ; as r0/r1 saves above | ||
35 | ; | ||
36 | ld r0,[jh_ex_way_sel] ; victim pointer | ||
37 | and r0,r0,1 ; clean | ||
38 | xor.f r0,r0,1 ; flip | ||
39 | st r0,[jh_ex_way_sel] ; store back | ||
40 | asr r0,r1,12 ; get set # <<1, note bit 12=R=0 | ||
41 | or.nz r0,r0,1 ; set way bit | ||
42 | and r0,r0,0xff ; clean | ||
43 | sr r0,[ARC_REG_TLBINDEX] | ||
44 | #endif | ||
45 | |||
46 | #ifdef JH_HACK2 | ||
47 | ; JH hack #2 | ||
48 | ; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU | ||
49 | ; Slower in thrash case (where it matters) because more code is executed | ||
50 | ; Inefficient due to two-register paradigm of this miss handler | ||
51 | ; | ||
52 | /* r1 = data TLBPD0 at this point */ | ||
53 | lr r0,[eret] /* instruction address */ | ||
54 | xor r0,r0,r1 /* compare set # */ | ||
55 | and.f r0,r0,0x000fe000 /* 2-way MMU mask */ | ||
56 | bne 88f /* not in same set - no need to probe */ | ||
57 | |||
58 | lr r0,[eret] /* instruction address */ | ||
59 | and r0,r0,PAGE_MASK /* VPN of instruction address */ | ||
60 | ; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/ | ||
61 | and r1,r1,0xff /* Data ASID */ | ||
62 | or r0,r0,r1 /* Instruction address + Data ASID */ | ||
63 | |||
64 | lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/ | ||
65 | sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */ | ||
66 | sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */ | ||
67 | lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */ | ||
68 | sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */ | ||
69 | |||
70 | xor r0,r0,1 /* flip bottom bit of data index */ | ||
71 | b.d 89f | ||
72 | sr r0,[ARC_REG_TLBINDEX] /* and put it back */ | ||
73 | 88: | ||
74 | sr TLBGetIndex, [ARC_REG_TLBCOMMAND] | ||
75 | 89: | ||
76 | #endif | ||
77 | |||
78 | #ifdef JH_HACK1 | ||
79 | ; | ||
80 | ; Always checks whether instruction will be kicked out by dtlb miss | ||
81 | ; | ||
82 | mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3 | ||
83 | lr r0,[eret] /* instruction address */ | ||
84 | and r0,r0,PAGE_MASK /* VPN of instruction address */ | ||
85 | bmsk r1,r3,7 /* Data ASID, bits 7-0 */ | ||
86 | or_s r0,r0,r1 /* Instruction address + Data ASID */ | ||
87 | |||
88 | sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */ | ||
89 | sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */ | ||
90 | lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */ | ||
91 | sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */ | ||
92 | |||
93 | sr TLBGetIndex, [ARC_REG_TLBCOMMAND] | ||
94 | lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */ | ||
95 | cmp r0,r1 /* if no match on indices, go around */ | ||
96 | xor.eq r1,r1,1 /* flip bottom bit of data index */ | ||
97 | sr r1,[ARC_REG_TLBINDEX] /* and put it back */ | ||
98 | #endif | ||
99 | |||
100 | .endm | ||
101 | |||
102 | #endif | ||
103 | |||
104 | #endif | ||
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h new file mode 100644 index 000000000000..b571e121929b --- /dev/null +++ b/arch/arc/include/asm/tlb.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_ARC_TLB_H | ||
10 | #define _ASM_ARC_TLB_H | ||
11 | |||
12 | #ifdef __KERNEL__ | ||
13 | |||
14 | #include <asm/pgtable.h> | ||
15 | |||
16 | /* Masks for actual TLB "PD"s */ | ||
17 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) | ||
18 | #define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ | ||
19 | _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ | ||
20 | _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
24 | #include <linux/pagemap.h> | ||
25 | #include <asm-generic/tlb.h> | ||
26 | |||
27 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | ||
28 | void tlb_paranoid_check(unsigned int pid_sw, unsigned long address); | ||
29 | #else | ||
30 | #define tlb_paranoid_check(a, b) | ||
31 | #endif | ||
32 | |||
33 | void arc_mmu_init(void); | ||
34 | extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); | ||
35 | void __init read_decode_mmu_bcr(void); | ||
36 | |||
37 | #endif /* __ASSEMBLY__ */ | ||
38 | |||
39 | #endif /* __KERNEL__ */ | ||
40 | |||
41 | #endif /* _ASM_ARC_TLB_H */ | ||
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index f1edae2410a7..404e5be4f704 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -21,3 +21,270 @@ int asid_cache = FIRST_ASID; | |||
21 | * see get_new_mmu_context (asm-arc/mmu_context.h) | 21 | * see get_new_mmu_context (asm-arc/mmu_context.h) |
22 | */ | 22 | */ |
23 | struct mm_struct *asid_mm_map[NUM_ASID + 1]; | 23 | struct mm_struct *asid_mm_map[NUM_ASID + 1]; |
24 | |||
25 | |||
26 | /* | ||
27 | * Routine to create a TLB entry | ||
28 | */ | ||
29 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | ||
30 | { | ||
31 | unsigned long flags; | ||
32 | unsigned int idx, asid_or_sasid; | ||
33 | unsigned long pd0_flags; | ||
34 | |||
35 | /* | ||
36 | * create_tlb() assumes that current->mm == vma->mm, since | ||
37 | * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) | ||
38 | * -completes the lazy write to SASID reg (again valid for curr tsk) | ||
39 | * | ||
40 | * Removing the assumption involves | ||
41 | * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. | ||
42 | * -Fix the TLB paranoid debug code to not trigger false negatives. | ||
43 | * -More importantly it makes this handler inconsistent with fast-path | ||
44 | * TLB Refill handler which always deals with "current" | ||
45 | * | ||
46 | * Lets see the use cases when current->mm != vma->mm and we land here | ||
47 | * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault | ||
48 | * Here VM wants to pre-install a TLB entry for user stack while | ||
49 | * current->mm still points to pre-execve mm (hence the condition). | ||
50 | * However the stack vaddr is soon relocated (randomization) and | ||
51 | * move_page_tables() tries to undo that TLB entry. | ||
52 | * Thus not creating TLB entry is not any worse. | ||
53 | * | ||
54 | * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a | ||
55 | * breakpoint in debugged task. Not creating a TLB now is not | ||
56 | * performance critical. | ||
57 | * | ||
58 | * Both the cases above are not good enough for code churn. | ||
59 | */ | ||
60 | if (current->active_mm != vma->vm_mm) | ||
61 | return; | ||
62 | |||
63 | local_irq_save(flags); | ||
64 | |||
65 | tlb_paranoid_check(vma->vm_mm->context.asid, address); | ||
66 | |||
67 | address &= PAGE_MASK; | ||
68 | |||
69 | /* update this PTE credentials */ | ||
70 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); | ||
71 | |||
72 | /* Create HW TLB entry Flags (in PD0) from PTE Flags */ | ||
73 | #if (CONFIG_ARC_MMU_VER <= 2) | ||
74 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1); | ||
75 | #else | ||
76 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0)); | ||
77 | #endif | ||
78 | |||
79 | /* ASID for this task */ | ||
80 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | ||
81 | |||
82 | write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); | ||
83 | |||
84 | /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ | ||
85 | write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1)); | ||
86 | |||
87 | /* First verify if entry for this vaddr+ASID already exists */ | ||
88 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | ||
89 | idx = read_aux_reg(ARC_REG_TLBINDEX); | ||
90 | |||
91 | /* | ||
92 | * If Not already present get a free slot from MMU. | ||
93 | * Otherwise, Probe would have located the entry and set INDEX Reg | ||
94 | * with existing location. This will cause Write CMD to over-write | ||
95 | * existing entry with new PD0 and PD1 | ||
96 | */ | ||
97 | if (likely(idx & TLB_LKUP_ERR)) | ||
98 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | ||
99 | |||
100 | /* | ||
101 | * Commit the Entry to MMU | ||
102 | * It doesnt sound safe to use the TLBWriteNI cmd here | ||
103 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | ||
104 | */ | ||
105 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | ||
106 | |||
107 | local_irq_restore(flags); | ||
108 | } | ||
109 | |||
110 | /* arch hook called by core VM at the end of handle_mm_fault( ), | ||
111 | * when a new PTE is entered in Page Tables or an existing one | ||
112 | * is modified. We aggresively pre-install a TLB entry | ||
113 | */ | ||
114 | |||
115 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress, | ||
116 | pte_t *ptep) | ||
117 | { | ||
118 | |||
119 | create_tlb(vma, vaddress, ptep); | ||
120 | } | ||
121 | |||
122 | /* Read the Cache Build Confuration Registers, Decode them and save into | ||
123 | * the cpuinfo structure for later use. | ||
124 | * No Validation is done here, simply read/convert the BCRs | ||
125 | */ | ||
126 | void __init read_decode_mmu_bcr(void) | ||
127 | { | ||
128 | unsigned int tmp; | ||
129 | struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */ | ||
130 | struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */ | ||
131 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | ||
132 | |||
133 | tmp = read_aux_reg(ARC_REG_MMU_BCR); | ||
134 | mmu->ver = (tmp >> 24); | ||
135 | |||
136 | if (mmu->ver <= 2) { | ||
137 | mmu2 = (struct bcr_mmu_1_2 *)&tmp; | ||
138 | mmu->pg_sz = PAGE_SIZE; | ||
139 | mmu->sets = 1 << mmu2->sets; | ||
140 | mmu->ways = 1 << mmu2->ways; | ||
141 | mmu->u_dtlb = mmu2->u_dtlb; | ||
142 | mmu->u_itlb = mmu2->u_itlb; | ||
143 | } else { | ||
144 | mmu3 = (struct bcr_mmu_3 *)&tmp; | ||
145 | mmu->pg_sz = 512 << mmu3->pg_sz; | ||
146 | mmu->sets = 1 << mmu3->sets; | ||
147 | mmu->ways = 1 << mmu3->ways; | ||
148 | mmu->u_dtlb = mmu3->u_dtlb; | ||
149 | mmu->u_itlb = mmu3->u_itlb; | ||
150 | } | ||
151 | |||
152 | mmu->num_tlb = mmu->sets * mmu->ways; | ||
153 | } | ||
154 | |||
155 | void __init arc_mmu_init(void) | ||
156 | { | ||
157 | /* | ||
158 | * ASID mgmt data structures are compile time init | ||
159 | * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes | ||
160 | */ | ||
161 | |||
162 | local_flush_tlb_all(); | ||
163 | |||
164 | /* Enable the MMU */ | ||
165 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4} | ||
170 | * The mapping is Column-first. | ||
171 | * --------------------- ----------- | ||
172 | * |way0|way1|way2|way3| |way0|way1| | ||
173 | * --------------------- ----------- | ||
174 | * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 | | ||
175 | * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 | | ||
176 | * ~ ~ ~ ~ | ||
177 | * [set127] | 508| 509| 510| 511| | 254| 255| | ||
178 | * --------------------- ----------- | ||
179 | * For normal operations we don't(must not) care how above works since | ||
180 | * MMU cmd getIndex(vaddr) abstracts that out. | ||
181 | * However for walking WAYS of a SET, we need to know this | ||
182 | */ | ||
183 | #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) | ||
184 | |||
185 | /* Handling of Duplicate PD (TLB entry) in MMU. | ||
186 | * -Could be due to buggy customer tapeouts or obscure kernel bugs | ||
187 | * -MMU complaints not at the time of duplicate PD installation, but at the | ||
188 | * time of lookup matching multiple ways. | ||
189 | * -Ideally these should never happen - but if they do - workaround by deleting | ||
190 | * the duplicate one. | ||
191 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) | ||
192 | */ | ||
193 | volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */ | ||
194 | |||
195 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | ||
196 | struct pt_regs *regs) | ||
197 | { | ||
198 | int set, way, n; | ||
199 | unsigned int pd0[4], pd1[4]; /* assume max 4 ways */ | ||
200 | unsigned long flags, is_valid; | ||
201 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | ||
202 | |||
203 | local_irq_save(flags); | ||
204 | |||
205 | /* re-enable the MMU */ | ||
206 | write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); | ||
207 | |||
208 | /* loop thru all sets of TLB */ | ||
209 | for (set = 0; set < mmu->sets; set++) { | ||
210 | |||
211 | /* read out all the ways of current set */ | ||
212 | for (way = 0, is_valid = 0; way < mmu->ways; way++) { | ||
213 | write_aux_reg(ARC_REG_TLBINDEX, | ||
214 | SET_WAY_TO_IDX(mmu, set, way)); | ||
215 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); | ||
216 | pd0[way] = read_aux_reg(ARC_REG_TLBPD0); | ||
217 | pd1[way] = read_aux_reg(ARC_REG_TLBPD1); | ||
218 | is_valid |= pd0[way] & _PAGE_PRESENT; | ||
219 | } | ||
220 | |||
221 | /* If all the WAYS in SET are empty, skip to next SET */ | ||
222 | if (!is_valid) | ||
223 | continue; | ||
224 | |||
225 | /* Scan the set for duplicate ways: needs a nested loop */ | ||
226 | for (way = 0; way < mmu->ways; way++) { | ||
227 | if (!pd0[way]) | ||
228 | continue; | ||
229 | |||
230 | for (n = way + 1; n < mmu->ways; n++) { | ||
231 | if ((pd0[way] & PAGE_MASK) == | ||
232 | (pd0[n] & PAGE_MASK)) { | ||
233 | |||
234 | if (dup_pd_verbose) { | ||
235 | pr_info("Duplicate PD's @" | ||
236 | "[%d:%d]/[%d:%d]\n", | ||
237 | set, way, set, n); | ||
238 | pr_info("TLBPD0[%u]: %08x\n", | ||
239 | way, pd0[way]); | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * clear entry @way and not @n. This is | ||
244 | * critical to our optimised loop | ||
245 | */ | ||
246 | pd0[way] = pd1[way] = 0; | ||
247 | write_aux_reg(ARC_REG_TLBINDEX, | ||
248 | SET_WAY_TO_IDX(mmu, set, way)); | ||
249 | __tlb_entry_erase(); | ||
250 | } | ||
251 | } | ||
252 | } | ||
253 | } | ||
254 | |||
255 | local_irq_restore(flags); | ||
256 | } | ||
257 | |||
258 | /*********************************************************************** | ||
259 | * Diagnostic Routines | ||
260 | * -Called from Low Level TLB Hanlders if things don;t look good | ||
261 | **********************************************************************/ | ||
262 | |||
263 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | ||
264 | |||
265 | /* | ||
266 | * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS | ||
267 | * don't match | ||
268 | */ | ||
269 | void print_asid_mismatch(int is_fast_path) | ||
270 | { | ||
271 | int pid_sw, pid_hw; | ||
272 | pid_sw = current->active_mm->context.asid; | ||
273 | pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; | ||
274 | |||
275 | pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", | ||
276 | is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw); | ||
277 | |||
278 | __asm__ __volatile__("flag 1"); | ||
279 | } | ||
280 | |||
281 | void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr) | ||
282 | { | ||
283 | unsigned int pid_hw; | ||
284 | |||
285 | pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; | ||
286 | |||
287 | if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID))) | ||
288 | print_asid_mismatch(0); | ||
289 | } | ||
290 | #endif | ||
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S new file mode 100644 index 000000000000..fc5b97129e55 --- /dev/null +++ b/arch/arc/mm/tlbex.S | |||
@@ -0,0 +1,351 @@ | |||
1 | /* | ||
2 | * TLB Exception Handling for ARC | ||
3 | * | ||
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Vineetg: April 2011 : | ||
11 | * -MMU v1: moved out legacy code into a seperate file | ||
12 | * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, | ||
13 | * helps avoid a shift when preparing PD0 from PTE | ||
14 | * | ||
15 | * Vineetg: July 2009 | ||
16 | * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB | ||
17 | * entry, so that it doesn't knock out it's I-TLB entry | ||
18 | * -Some more fine tuning: | ||
19 | * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc | ||
20 | * | ||
21 | * Vineetg: July 2009 | ||
22 | * -Practically rewrote the I/D TLB Miss handlers | ||
23 | * Now 40 and 135 instructions a peice as compared to 131 and 449 resp. | ||
24 | * Hence Leaner by 1.5 K | ||
25 | * Used Conditional arithmetic to replace excessive branching | ||
26 | * Also used short instructions wherever possible | ||
27 | * | ||
28 | * Vineetg: Aug 13th 2008 | ||
29 | * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing | ||
30 | * more information in case of a Fatality | ||
31 | * | ||
32 | * Vineetg: March 25th Bug #92690 | ||
33 | * -Added Debug Code to check if sw-ASID == hw-ASID | ||
34 | |||
35 | * Rahul Trivedi, Amit Bhor: Codito Technologies 2004 | ||
36 | */ | ||
37 | |||
38 | .cpu A7 | ||
39 | |||
40 | #include <linux/linkage.h> | ||
41 | #include <asm/entry.h> | ||
42 | #include <asm/tlb.h> | ||
43 | #include <asm/pgtable.h> | ||
44 | #include <asm/arcregs.h> | ||
45 | #include <asm/cache.h> | ||
46 | #include <asm/processor.h> | ||
47 | #if (CONFIG_ARC_MMU_VER == 1) | ||
48 | #include <asm/tlb-mmu1.h> | ||
49 | #endif | ||
50 | |||
51 | ;-------------------------------------------------------------------------- | ||
52 | ; scratch memory to save the registers (r0-r3) used to code TLB refill Handler | ||
53 | ; For details refer to comments before TLBMISS_FREEUP_REGS below | ||
54 | ;-------------------------------------------------------------------------- | ||
55 | |||
56 | .section .data | ||
57 | .global ex_saved_reg1 | ||
58 | .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned | ||
59 | .type ex_saved_reg1, @object | ||
60 | .size ex_saved_reg1, 16 | ||
61 | ex_saved_reg1: | ||
62 | .zero 16 | ||
63 | |||
64 | ;============================================================================ | ||
65 | ; Troubleshooting Stuff | ||
66 | ;============================================================================ | ||
67 | |||
68 | ; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid | ||
69 | ; When Creating TLB Entries, instead of doing 3 dependent loads from memory, | ||
70 | ; we use the MMU PID Reg to get current ASID. | ||
71 | ; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble. | ||
72 | ; So we try to detect this in TLB Mis shandler | ||
73 | |||
74 | |||
75 | .macro DBG_ASID_MISMATCH | ||
76 | |||
77 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | ||
78 | |||
79 | ; make sure h/w ASID is same as s/w ASID | ||
80 | |||
81 | GET_CURR_TASK_ON_CPU r3 | ||
82 | ld r0, [r3, TASK_ACT_MM] | ||
83 | ld r0, [r0, MM_CTXT+MM_CTXT_ASID] | ||
84 | |||
85 | lr r1, [ARC_REG_PID] | ||
86 | and r1, r1, 0xFF | ||
87 | breq r1, r0, 5f | ||
88 | |||
89 | ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode | ||
90 | lr r0, [erstatus] | ||
91 | bbit0 r0, STATUS_U_BIT, 5f | ||
92 | |||
93 | ; We sure are in troubled waters, Flag the error, but to do so | ||
94 | ; need to switch to kernel mode stack to call error routine | ||
95 | GET_TSK_STACK_BASE r3, sp | ||
96 | |||
97 | ; Call printk to shoutout aloud | ||
98 | mov r0, 1 | ||
99 | j print_asid_mismatch | ||
100 | |||
101 | 5: ; ASIDs match so proceed normally | ||
102 | nop | ||
103 | |||
104 | #endif | ||
105 | |||
106 | .endm | ||
107 | |||
108 | ;============================================================================ | ||
109 | ;TLB Miss handling Code | ||
110 | ;============================================================================ | ||
111 | |||
112 | ;----------------------------------------------------------------------------- | ||
113 | ; This macro does the page-table lookup for the faulting address. | ||
114 | ; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address | ||
115 | .macro LOAD_FAULT_PTE | ||
116 | |||
117 | lr r2, [efa] | ||
118 | |||
119 | lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd | ||
120 | |||
121 | lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD | ||
122 | ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr | ||
123 | and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags | ||
124 | ; contains Ptr to Page Table | ||
125 | bz.d do_slow_path_pf ; if no Page Table, do page fault | ||
126 | |||
127 | ; Get the PTE entry: The idea is | ||
128 | ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr | ||
129 | ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index | ||
130 | ; (3) z = pgtbl[y] | ||
131 | ; To avoid the multiply by in end, we do the -2, <<2 below | ||
132 | |||
133 | lsr r0, r2, (PAGE_SHIFT - 2) | ||
134 | and r0, r0, ( (PTRS_PER_PTE - 1) << 2) | ||
135 | ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr | ||
136 | |||
137 | .endm | ||
138 | |||
139 | ;----------------------------------------------------------------- | ||
140 | ; Convert Linux PTE entry into TLB entry | ||
141 | ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu | ||
142 | ; IN: r0 = PTE, r1 = ptr to PTE | ||
143 | |||
144 | .macro CONV_PTE_TO_TLB | ||
145 | and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE | ||
146 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 | ||
147 | |||
148 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb | ||
149 | #if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ | ||
150 | lsr r2, r2 ; shift PTE flags to match layout in PD0 | ||
151 | #endif | ||
152 | |||
153 | lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid | ||
154 | |||
155 | or r3, r3, r2 ; S | vaddr | {sasid|asid} | ||
156 | sr r3,[ARC_REG_TLBPD0] ; rewrite PD0 | ||
157 | .endm | ||
158 | |||
159 | ;----------------------------------------------------------------- | ||
160 | ; Commit the TLB entry into MMU | ||
161 | |||
162 | .macro COMMIT_ENTRY_TO_MMU | ||
163 | |||
164 | /* Get free TLB slot: Set = computed from vaddr, way = random */ | ||
165 | sr TLBGetIndex, [ARC_REG_TLBCOMMAND] | ||
166 | |||
167 | /* Commit the Write */ | ||
168 | #if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */ | ||
169 | sr TLBWriteNI, [ARC_REG_TLBCOMMAND] | ||
170 | #else | ||
171 | sr TLBWrite, [ARC_REG_TLBCOMMAND] | ||
172 | #endif | ||
173 | .endm | ||
174 | |||
175 | ;----------------------------------------------------------------- | ||
176 | ; ARC700 Exception Handling doesn't auto-switch stack and it only provides | ||
177 | ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" | ||
178 | ; | ||
179 | ; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a | ||
180 | ; "global" is used to free-up FIRST core reg to be able to code the rest of | ||
181 | ; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). | ||
182 | ; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 | ||
183 | ; need to be saved as well by extending the "global" to be 4 words. Hence | ||
184 | ; ".size ex_saved_reg1, 16" | ||
185 | ; [All of this dance is to avoid stack switching for each TLB Miss, since we | ||
186 | ; only need to save only a handful of regs, as opposed to complete reg file] | ||
187 | |||
188 | ; As simple as that.... | ||
189 | |||
190 | .macro TLBMISS_FREEUP_REGS | ||
191 | st r0, [@ex_saved_reg1] | ||
192 | mov_s r0, @ex_saved_reg1 | ||
193 | st_s r1, [r0, 4] | ||
194 | st_s r2, [r0, 8] | ||
195 | st_s r3, [r0, 12] | ||
196 | |||
197 | ; VERIFY if the ASID in MMU-PID Reg is same as | ||
198 | ; one in Linux data structures | ||
199 | |||
200 | DBG_ASID_MISMATCH | ||
201 | .endm | ||
202 | |||
203 | ;----------------------------------------------------------------- | ||
204 | .macro TLBMISS_RESTORE_REGS | ||
205 | mov_s r0, @ex_saved_reg1 | ||
206 | ld_s r3, [r0,12] | ||
207 | ld_s r2, [r0, 8] | ||
208 | ld_s r1, [r0, 4] | ||
209 | ld_s r0, [r0] | ||
210 | .endm | ||
211 | |||
212 | .section .text, "ax",@progbits ;Fast Path Code, candidate for ICCM | ||
213 | |||
214 | ;----------------------------------------------------------------------------- | ||
215 | ; I-TLB Miss Exception Handler | ||
216 | ;----------------------------------------------------------------------------- | ||
217 | |||
218 | ARC_ENTRY EV_TLBMissI | ||
219 | |||
220 | TLBMISS_FREEUP_REGS | ||
221 | |||
222 | ;---------------------------------------------------------------- | ||
223 | ; Get the PTE corresponding to V-addr accessed | ||
224 | LOAD_FAULT_PTE | ||
225 | |||
226 | ;---------------------------------------------------------------- | ||
227 | ; VERIFY_PTE: Check if PTE permissions approp for executing code | ||
228 | cmp_s r2, VMALLOC_START | ||
229 | mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE) | ||
230 | mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE) | ||
231 | |||
232 | and r3, r0, r2 ; Mask out NON Flag bits from PTE | ||
233 | xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) | ||
234 | bnz do_slow_path_pf | ||
235 | |||
236 | ; Let Linux VM know that the page was accessed | ||
237 | or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit | ||
238 | st_s r0, [r1] ; Write back PTE | ||
239 | |||
240 | CONV_PTE_TO_TLB | ||
241 | COMMIT_ENTRY_TO_MMU | ||
242 | TLBMISS_RESTORE_REGS | ||
243 | rtie | ||
244 | |||
245 | ARC_EXIT EV_TLBMissI | ||
246 | |||
247 | ;----------------------------------------------------------------------------- | ||
248 | ; D-TLB Miss Exception Handler | ||
249 | ;----------------------------------------------------------------------------- | ||
250 | |||
251 | ARC_ENTRY EV_TLBMissD | ||
252 | |||
253 | TLBMISS_FREEUP_REGS | ||
254 | |||
255 | ;---------------------------------------------------------------- | ||
256 | ; Get the PTE corresponding to V-addr accessed | ||
257 | ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE | ||
258 | LOAD_FAULT_PTE | ||
259 | |||
260 | ;---------------------------------------------------------------- | ||
261 | ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) | ||
262 | |||
263 | mov_s r2, 0 | ||
264 | lr r3, [ecr] | ||
265 | btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access | ||
266 | or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE | ||
267 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access | ||
268 | or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE | ||
269 | ; Above laddering takes care of XCHG access | ||
270 | ; which is both Read and Write | ||
271 | |||
272 | ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx | ||
273 | ; For copy_(to|from)_user, despite exception taken in kernel mode, | ||
274 | ; this code is not hit, because EFA would still be the user mode | ||
275 | ; address (EFA < 0x6000_0000). | ||
276 | ; This code is for legit kernel mode faults, vmalloc specifically | ||
277 | ; (EFA: 0x7000_0000 to 0x7FFF_FFFF) | ||
278 | |||
279 | lr r3, [efa] | ||
280 | cmp r3, VMALLOC_START - 1 ; If kernel mode access | ||
281 | asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx | ||
282 | or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode | ||
283 | |||
284 | ; By now, r2 setup with all the Flags we need to check in PTE | ||
285 | and r3, r0, r2 ; Mask out NON Flag bits from PTE | ||
286 | brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test) | ||
287 | |||
288 | ;---------------------------------------------------------------- | ||
289 | ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty | ||
290 | lr r3, [ecr] | ||
291 | or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always | ||
292 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? | ||
293 | or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well | ||
294 | st_s r0, [r1] ; Write back PTE | ||
295 | |||
296 | CONV_PTE_TO_TLB | ||
297 | |||
298 | #if (CONFIG_ARC_MMU_VER == 1) | ||
299 | ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of | ||
300 | ; memcpy where 3 parties contend for 2 ways, ensuing a livelock. | ||
301 | ; But only for old MMU or one with Metal Fix | ||
302 | TLB_WRITE_HEURISTICS | ||
303 | #endif | ||
304 | |||
305 | COMMIT_ENTRY_TO_MMU | ||
306 | TLBMISS_RESTORE_REGS | ||
307 | rtie | ||
308 | |||
309 | ;-------- Common routine to call Linux Page Fault Handler ----------- | ||
310 | do_slow_path_pf: | ||
311 | |||
312 | ; Restore the 4-scratch regs saved by fast path miss handler | ||
313 | TLBMISS_RESTORE_REGS | ||
314 | |||
315 | ; Slow path TLB Miss handled as a regular ARC Exception | ||
316 | ; (stack switching / save the complete reg-file). | ||
317 | ; That requires freeing up r9 | ||
318 | EXCPN_PROLOG_FREEUP_REG r9 | ||
319 | |||
320 | lr r9, [erstatus] | ||
321 | |||
322 | SWITCH_TO_KERNEL_STK | ||
323 | SAVE_ALL_SYS | ||
324 | |||
325 | ; ------- setup args for Linux Page fault Hanlder --------- | ||
326 | mov_s r0, sp | ||
327 | lr r2, [efa] | ||
328 | lr r3, [ecr] | ||
329 | |||
330 | ; Both st and ex imply WRITE access of some sort, hence do_page_fault( ) | ||
331 | ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or | ||
332 | ; DTLB-ld Miss | ||
333 | ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03 | ||
334 | ; Following code uses that fact that st/ex have one bit in common | ||
335 | |||
336 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS | ||
337 | mov.z r1, 0 | ||
338 | mov.nz r1, 1 | ||
339 | |||
340 | ; We don't want exceptions to be disabled while the fault is handled. | ||
341 | ; Now that we have saved the context we return from exception hence | ||
342 | ; exceptions get re-enable | ||
343 | |||
344 | FAKE_RET_FROM_EXCPN r9 | ||
345 | |||
346 | bl do_page_fault | ||
347 | b ret_from_exception | ||
348 | |||
349 | ARC_EXIT EV_TLBMissD | ||
350 | |||
351 | ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr | ||