diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-01-18 04:42:19 -0500 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-02-15 12:45:51 -0500 |
commit | f1f3347da9440eedd2350f4f5d13d8860f570b92 (patch) | |
tree | 47d652aeaf9c4455d085578c976c73789620ba8c /arch/arc | |
parent | 5dda4dc570ac41e3bd73ef871c500aeb7005c6b0 (diff) |
ARC: MMU Context Management
ARC700 MMU provides for tagging TLB entries with a 8-bit ASID to avoid
having to flush the TLB every task switch.
It also allows for a quick way to invalidate all the TLB entries for
task useful for:
* COW sementics during fork()
* task exit()ing
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/include/asm/arcregs.h | 7 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu.h | 23 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 209 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 23 |
4 files changed, 262 insertions, 0 deletions
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index c6e28053fb70..c12eb9b4f449 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
@@ -85,6 +85,13 @@ | |||
85 | #define DC_CTRL_INV_MODE_FLUSH 0x40 | 85 | #define DC_CTRL_INV_MODE_FLUSH 0x40 |
86 | #define DC_CTRL_FLUSH_STATUS 0x100 | 86 | #define DC_CTRL_FLUSH_STATUS 0x100 |
87 | 87 | ||
88 | /* MMU Management regs */ | ||
89 | #define ARC_REG_PID 0x409 | ||
90 | #define ARC_REG_SCRATCH_DATA0 0x418 | ||
91 | |||
92 | /* Bits in MMU PID register */ | ||
93 | #define MMU_ENABLE (1 << 31) /* Enable MMU for process */ | ||
94 | |||
88 | /* | 95 | /* |
89 | * Floating Pt Registers | 96 | * Floating Pt Registers |
90 | * Status regs are read-only (build-time) so need not be saved/restored | 97 | * Status regs are read-only (build-time) so need not be saved/restored |
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h new file mode 100644 index 000000000000..56b02320f1a9 --- /dev/null +++ b/arch/arc/include/asm/mmu.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_ARC_MMU_H | ||
10 | #define _ASM_ARC_MMU_H | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | typedef struct { | ||
15 | unsigned long asid; /* Pvt Addr-Space ID for mm */ | ||
16 | #ifdef CONFIG_ARC_TLB_DBG | ||
17 | struct task_struct *tsk; | ||
18 | #endif | ||
19 | } mm_context_t; | ||
20 | |||
21 | #endif | ||
22 | |||
23 | #endif | ||
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h new file mode 100644 index 000000000000..d12f3dec8b70 --- /dev/null +++ b/arch/arc/include/asm/mmu_context.h | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * vineetg: May 2011 | ||
9 | * -Refactored get_new_mmu_context( ) to only handle live-mm. | ||
10 | * retiring-mm handled in other hooks | ||
11 | * | ||
12 | * Vineetg: March 25th, 2008: Bug #92690 | ||
13 | * -Major rewrite of Core ASID allocation routine get_new_mmu_context | ||
14 | * | ||
15 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_ARC_MMU_CONTEXT_H | ||
19 | #define _ASM_ARC_MMU_CONTEXT_H | ||
20 | |||
21 | #include <asm/arcregs.h> | ||
22 | #include <asm/tlb.h> | ||
23 | |||
24 | #include <asm-generic/mm_hooks.h> | ||
25 | |||
26 | /* ARC700 ASID Management | ||
27 | * | ||
28 | * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries | ||
29 | * with same vaddr (different tasks) to co-exit. This provides for | ||
30 | * "Fast Context Switch" i.e. no TLB flush on ctxt-switch | ||
31 | * | ||
32 | * Linux assigns each task a unique ASID. A simple round-robin allocation | ||
33 | * of H/w ASID is done using software tracker @asid_cache. | ||
34 | * When it reaches max 255, the allocation cycle starts afresh by flushing | ||
35 | * the entire TLB and wrapping ASID back to zero. | ||
36 | * | ||
37 | * For book-keeping, Linux uses a couple of data-structures: | ||
38 | * -mm_struct has an @asid field to keep a note of task's ASID (needed at the | ||
39 | * time of say switch_mm( ) | ||
40 | * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping, | ||
41 | * given an ASID, finding the mm struct associated. | ||
42 | * | ||
43 | * The round-robin allocation algorithm allows for ASID stealing. | ||
44 | * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was | ||
45 | * already assigned to another (switched-out) task. Obviously the prev owner | ||
46 | * is marked with an invalid ASID to make it request for a new ASID when it | ||
47 | * gets scheduled next time. However its TLB entries (with ASID "x") could | ||
48 | * exist, which must be cleared before the same ASID is used by the new owner. | ||
49 | * Flushing them would be plausible but costly solution. Instead we force a | ||
50 | * allocation policy quirk, which ensures that a stolen ASID won't have any | ||
51 | * TLB entries associates, alleviating the need to flush. | ||
52 | * The quirk essentially is not allowing ASID allocated in prev cycle | ||
53 | * to be used past a roll-over in the next cycle. | ||
54 | * When this happens (i.e. task ASID > asid tracker), task needs to refresh | ||
55 | * its ASID, aligning it to current value of tracker. If the task doesn't get | ||
56 | * scheduled past a roll-over, hence its ASID is not yet realigned with | ||
57 | * tracker, such ASID is anyways safely reusable because it is | ||
58 | * gauranteed that TLB entries with that ASID wont exist. | ||
59 | */ | ||
60 | |||
61 | #define FIRST_ASID 0 | ||
62 | #define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */ | ||
63 | #define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */ | ||
64 | #define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1) | ||
65 | |||
66 | /* ASID to mm struct mapping */ | ||
67 | extern struct mm_struct *asid_mm_map[NUM_ASID + 1]; | ||
68 | |||
69 | extern int asid_cache; | ||
70 | |||
71 | /* | ||
72 | * Assign a new ASID to task. If the task already has an ASID, it is | ||
73 | * relinquished. | ||
74 | */ | ||
75 | static inline void get_new_mmu_context(struct mm_struct *mm) | ||
76 | { | ||
77 | struct mm_struct *prev_owner; | ||
78 | unsigned long flags; | ||
79 | |||
80 | local_irq_save(flags); | ||
81 | |||
82 | /* | ||
83 | * Relinquish the currently owned ASID (if any). | ||
84 | * Doing unconditionally saves a cmp-n-branch; for already unused | ||
85 | * ASID slot, the value was/remains NULL | ||
86 | */ | ||
87 | asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL; | ||
88 | |||
89 | /* move to new ASID */ | ||
90 | if (++asid_cache > MAX_ASID) { /* ASID roll-over */ | ||
91 | asid_cache = FIRST_ASID; | ||
92 | flush_tlb_all(); | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Is next ASID already owned by some-one else (we are stealing it). | ||
97 | * If so, let the orig owner be aware of this, so when it runs, it | ||
98 | * asks for a brand new ASID. This would only happen for a long-lived | ||
99 | * task with ASID from prev allocation cycle (before ASID roll-over). | ||
100 | * | ||
101 | * This might look wrong - if we are re-using some other task's ASID, | ||
102 | * won't we use it's stale TLB entries too. Actually switch_mm( ) takes | ||
103 | * care of such a case: it ensures that task with ASID from prev alloc | ||
104 | * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below | ||
105 | * The stealing scenario described here will only happen if that task | ||
106 | * didn't get a chance to refresh it's ASID - implying stale entries | ||
107 | * won't exist. | ||
108 | */ | ||
109 | prev_owner = asid_mm_map[asid_cache]; | ||
110 | if (prev_owner) | ||
111 | prev_owner->context.asid = NO_ASID; | ||
112 | |||
113 | /* Assign new ASID to tsk */ | ||
114 | asid_mm_map[asid_cache] = mm; | ||
115 | mm->context.asid = asid_cache; | ||
116 | |||
117 | #ifdef CONFIG_ARC_TLB_DBG | ||
118 | pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s," | ||
119 | " pid:%u, assigned asid:%lu\n", | ||
120 | (unsigned int)mm, (unsigned int)prev_owner, | ||
121 | (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm, | ||
122 | (mm->context.tsk)->pid, mm->context.asid); | ||
123 | #endif | ||
124 | |||
125 | write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE); | ||
126 | |||
127 | local_irq_restore(flags); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Initialize the context related info for a new mm_struct | ||
132 | * instance. | ||
133 | */ | ||
134 | static inline int | ||
135 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
136 | { | ||
137 | mm->context.asid = NO_ASID; | ||
138 | #ifdef CONFIG_ARC_TLB_DBG | ||
139 | mm->context.tsk = tsk; | ||
140 | #endif | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | /* Prepare the MMU for task: setup PID reg with allocated ASID | ||
145 | If task doesn't have an ASID (never alloc or stolen, get a new ASID) | ||
146 | */ | ||
147 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
148 | struct task_struct *tsk) | ||
149 | { | ||
150 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ | ||
151 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | ||
152 | |||
153 | /* | ||
154 | * Get a new ASID if task doesn't have a valid one. Possible when | ||
155 | * -task never had an ASID (fresh after fork) | ||
156 | * -it's ASID was stolen - past an ASID roll-over. | ||
157 | * -There's a third obscure scenario (if this task is running for the | ||
158 | * first time afer an ASID rollover), where despite having a valid | ||
159 | * ASID, we force a get for new ASID - see comments at top. | ||
160 | * | ||
161 | * Both the non-alloc scenario and first-use-after-rollover can be | ||
162 | * detected using the single condition below: NO_ASID = 256 | ||
163 | * while asid_cache is always a valid ASID value (0-255). | ||
164 | */ | ||
165 | if (next->context.asid > asid_cache) { | ||
166 | get_new_mmu_context(next); | ||
167 | } else { | ||
168 | /* | ||
169 | * XXX: This will never happen given the chks above | ||
170 | * BUG_ON(next->context.asid > MAX_ASID); | ||
171 | */ | ||
172 | write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE); | ||
173 | } | ||
174 | |||
175 | } | ||
176 | |||
177 | static inline void destroy_context(struct mm_struct *mm) | ||
178 | { | ||
179 | unsigned long flags; | ||
180 | |||
181 | local_irq_save(flags); | ||
182 | |||
183 | asid_mm_map[mm->context.asid] = NULL; | ||
184 | mm->context.asid = NO_ASID; | ||
185 | |||
186 | local_irq_restore(flags); | ||
187 | } | ||
188 | |||
189 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping | ||
190 | * for retiring-mm. However destroy_context( ) still needs to do that because | ||
191 | * between mm_release( ) = >deactive_mm( ) and | ||
192 | * mmput => .. => __mmdrop( ) => destroy_context( ) | ||
193 | * there is a good chance that task gets sched-out/in, making it's ASID valid | ||
194 | * again (this teased me for a whole day). | ||
195 | */ | ||
196 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
197 | |||
198 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | ||
199 | { | ||
200 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | ||
201 | |||
202 | /* Unconditionally get a new ASID */ | ||
203 | get_new_mmu_context(next); | ||
204 | |||
205 | } | ||
206 | |||
207 | #define enter_lazy_tlb(mm, tsk) | ||
208 | |||
209 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ | ||
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c new file mode 100644 index 000000000000..f1edae2410a7 --- /dev/null +++ b/arch/arc/mm/tlb.c | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * TLB Management (flush/create/diagnostics) for ARC700 | ||
3 | * | ||
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <asm/arcregs.h> | ||
13 | #include <asm/mmu_context.h> | ||
14 | #include <asm/tlb.h> | ||
15 | |||
16 | /* A copy of the ASID from the PID reg is kept in asid_cache */ | ||
17 | int asid_cache = FIRST_ASID; | ||
18 | |||
19 | /* ASID to mm struct mapping. We have one extra entry corresponding to | ||
20 | * NO_ASID to save us a compare when clearing the mm entry for old asid | ||
21 | * see get_new_mmu_context (asm-arc/mmu_context.h) | ||
22 | */ | ||
23 | struct mm_struct *asid_mm_map[NUM_ASID + 1]; | ||