diff options
Diffstat (limited to 'arch/openrisc/mm/tlb.c')
-rw-r--r-- | arch/openrisc/mm/tlb.c | 193 |
1 files changed, 193 insertions, 0 deletions
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c new file mode 100644 index 00000000000..56b0b89624a --- /dev/null +++ b/arch/openrisc/mm/tlb.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * OpenRISC tlb.c | ||
3 | * | ||
4 | * Linux architectural port borrowing liberally from similar works of | ||
5 | * others. All original copyrights apply as per the original source | ||
6 | * declaration. | ||
7 | * | ||
8 | * Modifications for the OpenRISC architecture: | ||
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | ||
10 | * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se> | ||
11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #include <linux/sched.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/init.h> | ||
28 | |||
29 | #include <asm/system.h> | ||
30 | #include <asm/segment.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/mmu_context.h> | ||
34 | #include <asm/spr_defs.h> | ||
35 | |||
36 | #define NO_CONTEXT -1 | ||
37 | |||
38 | #define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ | ||
39 | SPR_DMMUCFGR_NTS_OFF)) | ||
40 | #define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ | ||
41 | SPR_IMMUCFGR_NTS_OFF)) | ||
42 | #define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1)) | ||
43 | #define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1)) | ||
44 | /* | ||
45 | * Invalidate all TLB entries. | ||
46 | * | ||
47 | * This comes down to setting the 'valid' bit for all xTLBMR registers to 0. | ||
48 | * Easiest way to accomplish this is to just zero out the xTLBMR register | ||
49 | * completely. | ||
50 | * | ||
51 | */ | ||
52 | |||
53 | void flush_tlb_all(void) | ||
54 | { | ||
55 | int i; | ||
56 | unsigned long num_tlb_sets; | ||
57 | |||
58 | /* Determine number of sets for IMMU. */ | ||
59 | /* FIXME: Assumption is I & D nsets equal. */ | ||
60 | num_tlb_sets = NUM_ITLB_SETS; | ||
61 | |||
62 | for (i = 0; i < num_tlb_sets; i++) { | ||
63 | mtspr_off(SPR_DTLBMR_BASE(0), i, 0); | ||
64 | mtspr_off(SPR_ITLBMR_BASE(0), i, 0); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | #define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI) | ||
69 | #define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI) | ||
70 | |||
71 | /* | ||
72 | * Invalidate a single page. This is what the xTLBEIR register is for. | ||
73 | * | ||
74 | * There's no point in checking the vma for PAGE_EXEC to determine whether it's | ||
75 | * the data or instruction TLB that should be flushed... that would take more | ||
76 | * than the few instructions that the following compiles down to! | ||
77 | * | ||
78 | * The case where we don't have the xTLBEIR register really only works for | ||
79 | * MMU's with a single way and is hard-coded that way. | ||
80 | */ | ||
81 | |||
82 | #define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr) | ||
83 | #define flush_dtlb_page_no_eir(addr) \ | ||
84 | mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0); | ||
85 | |||
86 | #define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr) | ||
87 | #define flush_itlb_page_no_eir(addr) \ | ||
88 | mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0); | ||
89 | |||
90 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | ||
91 | { | ||
92 | if (have_dtlbeir) | ||
93 | flush_dtlb_page_eir(addr); | ||
94 | else | ||
95 | flush_dtlb_page_no_eir(addr); | ||
96 | |||
97 | if (have_itlbeir) | ||
98 | flush_itlb_page_eir(addr); | ||
99 | else | ||
100 | flush_itlb_page_no_eir(addr); | ||
101 | } | ||
102 | |||
103 | void flush_tlb_range(struct vm_area_struct *vma, | ||
104 | unsigned long start, unsigned long end) | ||
105 | { | ||
106 | int addr; | ||
107 | bool dtlbeir; | ||
108 | bool itlbeir; | ||
109 | |||
110 | dtlbeir = have_dtlbeir; | ||
111 | itlbeir = have_itlbeir; | ||
112 | |||
113 | for (addr = start; addr < end; addr += PAGE_SIZE) { | ||
114 | if (dtlbeir) | ||
115 | flush_dtlb_page_eir(addr); | ||
116 | else | ||
117 | flush_dtlb_page_no_eir(addr); | ||
118 | |||
119 | if (itlbeir) | ||
120 | flush_itlb_page_eir(addr); | ||
121 | else | ||
122 | flush_itlb_page_no_eir(addr); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Invalidate the selected mm context only. | ||
128 | * | ||
129 | * FIXME: Due to some bug here, we're flushing everything for now. | ||
130 | * This should be changed to loop over over mm and call flush_tlb_range. | ||
131 | */ | ||
132 | |||
133 | void flush_tlb_mm(struct mm_struct *mm) | ||
134 | { | ||
135 | |||
136 | /* Was seeing bugs with the mm struct passed to us. Scrapped most of | ||
137 | this function. */ | ||
138 | /* Several architctures do this */ | ||
139 | flush_tlb_all(); | ||
140 | } | ||
141 | |||
142 | /* called in schedule() just before actually doing the switch_to */ | ||
143 | |||
144 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
145 | struct task_struct *next_tsk) | ||
146 | { | ||
147 | /* remember the pgd for the fault handlers | ||
148 | * this is similar to the pgd register in some other CPU's. | ||
149 | * we need our own copy of it because current and active_mm | ||
150 | * might be invalid at points where we still need to derefer | ||
151 | * the pgd. | ||
152 | */ | ||
153 | current_pgd = next->pgd; | ||
154 | |||
155 | /* We don't have context support implemented, so flush all | ||
156 | * entries belonging to previous map | ||
157 | */ | ||
158 | |||
159 | if (prev != next) | ||
160 | flush_tlb_mm(prev); | ||
161 | |||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Initialize the context related info for a new mm_struct | ||
166 | * instance. | ||
167 | */ | ||
168 | |||
169 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
170 | { | ||
171 | mm->context = NO_CONTEXT; | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | /* called by __exit_mm to destroy the used MMU context if any before | ||
176 | * destroying the mm itself. this is only called when the last user of the mm | ||
177 | * drops it. | ||
178 | */ | ||
179 | |||
180 | void destroy_context(struct mm_struct *mm) | ||
181 | { | ||
182 | flush_tlb_mm(mm); | ||
183 | |||
184 | } | ||
185 | |||
186 | /* called once during VM initialization, from init.c */ | ||
187 | |||
188 | void __init tlb_init(void) | ||
189 | { | ||
190 | /* Do nothing... */ | ||
191 | /* invalidate the entire TLB */ | ||
192 | /* flush_tlb_all(); */ | ||
193 | } | ||