aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-07-25 18:45:50 -0400
committerVineet Gupta <vgupta@synopsys.com>2013-08-30 12:12:19 -0400
commit947bf103fcd2defa3bc4b7ebc6b05d0427bcde2d (patch)
tree549bdf5c9cdd5a9d4aa320bf4fbdf88b499f1f4b /arch/arc
parentc60115537c96d78a884d2a4bd78839a57266d48b (diff)
ARC: [ASID] Track ASID allocation cycles/generations
This helps remove asid-to-mm reverse map While mm->context.id contains the ASID assigned to a process, our ASID allocator also used asid_mm_map[] reverse map. In a new allocation cycle (mm->ASID >= @asid_cache), the Round Robin ASID allocator used this to check if new @asid_cache belonged to some mm2 (from prev cycle). If so, it could locate that mm using the ASID reverse map, and mark that mm as unallocated ASID, to force it to refresh at the time of switch_mm() However, for SMP, the reverse map has to be maintained per CPU, so becomes 2 dimensional, hence got rid of it. With reverse map gone, it is NOT possible to reach out to current assignee. So we track the ASID allocation generation/cycle and on every switch_mm(), check if the current generation of CPU ASID is same as mm's ASID; If not it is refreshed. (Based loosely on arch/sh implementation) Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/mmu.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h97
-rw-r--r--arch/arc/mm/tlb.c22
-rw-r--r--arch/arc/mm/tlbex.S5
4 files changed, 40 insertions, 86 deletions
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 1639f25e47b1..c82db8bd7270 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -48,7 +48,7 @@
48#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
49 49
50typedef struct { 50typedef struct {
51 unsigned long asid; /* Pvt Addr-Space ID for mm */ 51 unsigned long asid; /* 8 bit MMU PID + Generation cycle */
52} mm_context_t; 52} mm_context_t;
53 53
54#ifdef CONFIG_ARC_DBG_TLB_PARANOIA 54#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 9b09d18f01b3..43a1b51bb8cc 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -34,39 +34,22 @@
34 * When it reaches max 255, the allocation cycle starts afresh by flushing 34 * When it reaches max 255, the allocation cycle starts afresh by flushing
35 * the entire TLB and wrapping ASID back to zero. 35 * the entire TLB and wrapping ASID back to zero.
36 * 36 *
37 * For book-keeping, Linux uses a couple of data-structures: 37 * A new allocation cycle, post rollover, could potentially reassign an ASID
38 * -mm_struct has an @asid field to keep a note of task's ASID (needed at the 38 * to a different task. Thus the rule is to refresh the ASID in a new cycle.
39 * time of say switch_mm( ) 39 * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
40 * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping, 40 * serve as cycle/generation indicator and natural 32 bit unsigned math
41 * given an ASID, finding the mm struct associated. 41 * automagically increments the generation when lower 8 bits rollover.
42 *
43 * The round-robin allocation algorithm allows for ASID stealing.
44 * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
45 * already assigned to another (switched-out) task. Obviously the prev owner
46 * is marked with an invalid ASID to make it request for a new ASID when it
47 * gets scheduled next time. However its TLB entries (with ASID "x") could
48 * exist, which must be cleared before the same ASID is used by the new owner.
49 * Flushing them would be plausible but costly solution. Instead we force a
50 * allocation policy quirk, which ensures that a stolen ASID won't have any
51 * TLB entries associates, alleviating the need to flush.
52 * The quirk essentially is not allowing ASID allocated in prev cycle
53 * to be used past a roll-over in the next cycle.
54 * When this happens (i.e. task ASID > asid tracker), task needs to refresh
55 * its ASID, aligning it to current value of tracker. If the task doesn't get
56 * scheduled past a roll-over, hence its ASID is not yet realigned with
57 * tracker, such ASID is anyways safely reusable because it is
58 * gauranteed that TLB entries with that ASID wont exist.
59 */ 42 */
60 43
61#define FIRST_ASID 0 44#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
62#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */ 45#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
63#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */ 46
64#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1) 47#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
48#define MM_CTXT_NO_ASID 0UL
65 49
66/* ASID to mm struct mapping */ 50#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK)
67extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
68 51
69extern int asid_cache; 52extern unsigned int asid_cache;
70 53
71/* 54/*
72 * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) 55 * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
@@ -74,59 +57,42 @@ extern int asid_cache;
74 */ 57 */
75static inline void get_new_mmu_context(struct mm_struct *mm) 58static inline void get_new_mmu_context(struct mm_struct *mm)
76{ 59{
77 struct mm_struct *prev_owner;
78 unsigned long flags; 60 unsigned long flags;
79 61
80 local_irq_save(flags); 62 local_irq_save(flags);
81 63
82 /* 64 /*
83 * Move to new ASID if it was not from current alloc-cycle/generation. 65 * Move to new ASID if it was not from current alloc-cycle/generation.
66 * This is done by ensuring that the generation bits in both mm->ASID
67 * and cpu's ASID counter are exactly same.
84 * 68 *
85 * Note: Callers needing new ASID unconditionally, independent of 69 * Note: Callers needing new ASID unconditionally, independent of
86 * generation, e.g. local_flush_tlb_mm() for forking parent, 70 * generation, e.g. local_flush_tlb_mm() for forking parent,
87 * first need to destroy the context, setting it to invalid 71 * first need to destroy the context, setting it to invalid
88 * value. 72 * value.
89 */ 73 */
90 if (mm->context.asid <= asid_cache) 74 if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK))
91 goto set_hw; 75 goto set_hw;
92 76
93 /* 77 /* move to new ASID and handle rollover */
94 * Relinquish the currently owned ASID (if any). 78 if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) {
95 * Doing unconditionally saves a cmp-n-branch; for already unused
96 * ASID slot, the value was/remains NULL
97 */
98 asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
99 79
100 /* move to new ASID */
101 if (++asid_cache > MAX_ASID) { /* ASID roll-over */
102 asid_cache = FIRST_ASID;
103 flush_tlb_all(); 80 flush_tlb_all();
104 }
105 81
106 /* 82 /*
107 * Is next ASID already owned by some-one else (we are stealing it). 83 * Above checke for rollover of 8 bit ASID in 32 bit container.
108 * If so, let the orig owner be aware of this, so when it runs, it 84 * If the container itself wrapped around, set it to a non zero
109 * asks for a brand new ASID. This would only happen for a long-lived 85 * "generation" to distinguish from no context
110 * task with ASID from prev allocation cycle (before ASID roll-over). 86 */
111 * 87 if (!asid_cache)
112 * This might look wrong - if we are re-using some other task's ASID, 88 asid_cache = MM_CTXT_FIRST_CYCLE;
113 * won't we use it's stale TLB entries too. Actually the algorithm takes 89 }
114 * care of such a case: it ensures that task with ASID from prev alloc
115 * cycle, when scheduled will refresh it's ASID
116 * The stealing scenario described here will only happen if that task
117 * didn't get a chance to refresh it's ASID - implying stale entries
118 * won't exist.
119 */
120 prev_owner = asid_mm_map[asid_cache];
121 if (prev_owner)
122 prev_owner->context.asid = NO_ASID;
123 90
124 /* Assign new ASID to tsk */ 91 /* Assign new ASID to tsk */
125 asid_mm_map[asid_cache] = mm;
126 mm->context.asid = asid_cache; 92 mm->context.asid = asid_cache;
127 93
128set_hw: 94set_hw:
129 write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE); 95 write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE);
130 96
131 local_irq_restore(flags); 97 local_irq_restore(flags);
132} 98}
@@ -138,7 +104,7 @@ set_hw:
138static inline int 104static inline int
139init_new_context(struct task_struct *tsk, struct mm_struct *mm) 105init_new_context(struct task_struct *tsk, struct mm_struct *mm)
140{ 106{
141 mm->context.asid = NO_ASID; 107 mm->context.asid = MM_CTXT_NO_ASID;
142 return 0; 108 return 0;
143} 109}
144 110
@@ -167,14 +133,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
167 133
168static inline void destroy_context(struct mm_struct *mm) 134static inline void destroy_context(struct mm_struct *mm)
169{ 135{
170 unsigned long flags; 136 mm->context.asid = MM_CTXT_NO_ASID;
171
172 local_irq_save(flags);
173
174 asid_mm_map[mm->context.asid] = NULL;
175 mm->context.asid = NO_ASID;
176
177 local_irq_restore(flags);
178} 137}
179 138
180/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping 139/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index b5c5e0aa0aaa..71cb26df4255 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -100,13 +100,7 @@
100 100
101 101
102/* A copy of the ASID from the PID reg is kept in asid_cache */ 102/* A copy of the ASID from the PID reg is kept in asid_cache */
103int asid_cache = FIRST_ASID; 103unsigned int asid_cache = MM_CTXT_FIRST_CYCLE;
104
105/* ASID to mm struct mapping. We have one extra entry corresponding to
106 * NO_ASID to save us a compare when clearing the mm entry for old asid
107 * see get_new_mmu_context (asm-arc/mmu_context.h)
108 */
109struct mm_struct *asid_mm_map[NUM_ASID + 1];
110 104
111/* 105/*
112 * Utility Routine to erase a J-TLB entry 106 * Utility Routine to erase a J-TLB entry
@@ -281,7 +275,6 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
281 unsigned long end) 275 unsigned long end)
282{ 276{
283 unsigned long flags; 277 unsigned long flags;
284 unsigned int asid;
285 278
286 /* If range @start to @end is more than 32 TLB entries deep, 279 /* If range @start to @end is more than 32 TLB entries deep,
287 * its better to move to a new ASID rather than searching for 280 * its better to move to a new ASID rather than searching for
@@ -303,11 +296,10 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
303 start &= PAGE_MASK; 296 start &= PAGE_MASK;
304 297
305 local_irq_save(flags); 298 local_irq_save(flags);
306 asid = vma->vm_mm->context.asid;
307 299
308 if (asid != NO_ASID) { 300 if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) {
309 while (start < end) { 301 while (start < end) {
310 tlb_entry_erase(start | (asid & 0xff)); 302 tlb_entry_erase(start | hw_pid(vma->vm_mm));
311 start += PAGE_SIZE; 303 start += PAGE_SIZE;
312 } 304 }
313 } 305 }
@@ -361,9 +353,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
361 */ 353 */
362 local_irq_save(flags); 354 local_irq_save(flags);
363 355
364 if (vma->vm_mm->context.asid != NO_ASID) { 356 if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) {
365 tlb_entry_erase((page & PAGE_MASK) | 357 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm));
366 (vma->vm_mm->context.asid & 0xff));
367 utlb_invalidate(); 358 utlb_invalidate();
368 } 359 }
369 360
@@ -709,7 +700,8 @@ void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
709 * - SW needs to have a valid ASID 700 * - SW needs to have a valid ASID
710 */ 701 */
711 if (addr < 0x70000000 && 702 if (addr < 0x70000000 &&
712 ((mmu_asid != mm_asid) || (mm_asid == NO_ASID))) 703 ((mm_asid == MM_CTXT_NO_ASID) ||
704 (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
713 print_asid_mismatch(mm_asid, mmu_asid, 0); 705 print_asid_mismatch(mm_asid, mmu_asid, 0);
714} 706}
715#endif 707#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 88897a112d55..cf7d7d9ad695 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -140,12 +140,15 @@ ex_saved_reg1:
140 GET_CURR_TASK_ON_CPU r3 140 GET_CURR_TASK_ON_CPU r3
141 ld r0, [r3, TASK_ACT_MM] 141 ld r0, [r3, TASK_ACT_MM]
142 ld r0, [r0, MM_CTXT+MM_CTXT_ASID] 142 ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
143 breq r0, 0, 55f ; Error if no ASID allocated
143 144
144 lr r1, [ARC_REG_PID] 145 lr r1, [ARC_REG_PID]
145 and r1, r1, 0xFF 146 and r1, r1, 0xFF
146 147
147 breq r1, r0, 5f 148 and r2, r0, 0xFF ; MMU PID bits only for comparison
149 breq r1, r2, 5f
148 150
15155:
149 ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode 152 ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
150 lr r2, [erstatus] 153 lr r2, [erstatus]
151 bbit0 r2, STATUS_U_BIT, 5f 154 bbit0 r2, STATUS_U_BIT, 5f