aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/mmu.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h97
2 files changed, 29 insertions, 70 deletions
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 1639f25e47b1..c82db8bd7270 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -48,7 +48,7 @@
48#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
49 49
50typedef struct { 50typedef struct {
51 unsigned long asid; /* Pvt Addr-Space ID for mm */ 51 unsigned long asid; /* 8 bit MMU PID + Generation cycle */
52} mm_context_t; 52} mm_context_t;
53 53
54#ifdef CONFIG_ARC_DBG_TLB_PARANOIA 54#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 9b09d18f01b3..43a1b51bb8cc 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -34,39 +34,22 @@
34 * When it reaches max 255, the allocation cycle starts afresh by flushing 34 * When it reaches max 255, the allocation cycle starts afresh by flushing
35 * the entire TLB and wrapping ASID back to zero. 35 * the entire TLB and wrapping ASID back to zero.
36 * 36 *
37 * For book-keeping, Linux uses a couple of data-structures: 37 * A new allocation cycle, post rollover, could potentially reassign an ASID
38 * -mm_struct has an @asid field to keep a note of task's ASID (needed at the 38 * to a different task. Thus the rule is to refresh the ASID in a new cycle.
39 * time of say switch_mm( ) 39 * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
40 * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping, 40 * serve as cycle/generation indicator and natural 32 bit unsigned math
41 * given an ASID, finding the mm struct associated. 41 * automagically increments the generation when lower 8 bits rollover.
42 *
43 * The round-robin allocation algorithm allows for ASID stealing.
44 * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
45 * already assigned to another (switched-out) task. Obviously the prev owner
46 * is marked with an invalid ASID to make it request for a new ASID when it
47 * gets scheduled next time. However its TLB entries (with ASID "x") could
48 * exist, which must be cleared before the same ASID is used by the new owner.
49 * Flushing them would be plausible but costly solution. Instead we force a
50 * allocation policy quirk, which ensures that a stolen ASID won't have any
51 * TLB entries associates, alleviating the need to flush.
52 * The quirk essentially is not allowing ASID allocated in prev cycle
53 * to be used past a roll-over in the next cycle.
54 * When this happens (i.e. task ASID > asid tracker), task needs to refresh
55 * its ASID, aligning it to current value of tracker. If the task doesn't get
56 * scheduled past a roll-over, hence its ASID is not yet realigned with
57 * tracker, such ASID is anyways safely reusable because it is
58 * gauranteed that TLB entries with that ASID wont exist.
59 */ 42 */
60 43
61#define FIRST_ASID 0 44#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
62#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */ 45#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
63#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */ 46
64#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1) 47#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
48#define MM_CTXT_NO_ASID 0UL
65 49
66/* ASID to mm struct mapping */ 50#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK)
67extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
68 51
69extern int asid_cache; 52extern unsigned int asid_cache;
70 53
71/* 54/*
72 * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) 55 * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
@@ -74,59 +57,42 @@ extern int asid_cache;
74 */ 57 */
75static inline void get_new_mmu_context(struct mm_struct *mm) 58static inline void get_new_mmu_context(struct mm_struct *mm)
76{ 59{
77 struct mm_struct *prev_owner;
78 unsigned long flags; 60 unsigned long flags;
79 61
80 local_irq_save(flags); 62 local_irq_save(flags);
81 63
82 /* 64 /*
83 * Move to new ASID if it was not from current alloc-cycle/generation. 65 * Move to new ASID if it was not from current alloc-cycle/generation.
66 * This is done by ensuring that the generation bits in both mm->ASID
67 * and cpu's ASID counter are exactly same.
84 * 68 *
85 * Note: Callers needing new ASID unconditionally, independent of 69 * Note: Callers needing new ASID unconditionally, independent of
86 * generation, e.g. local_flush_tlb_mm() for forking parent, 70 * generation, e.g. local_flush_tlb_mm() for forking parent,
87 * first need to destroy the context, setting it to invalid 71 * first need to destroy the context, setting it to invalid
88 * value. 72 * value.
89 */ 73 */
90 if (mm->context.asid <= asid_cache) 74 if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK))
91 goto set_hw; 75 goto set_hw;
92 76
93 /* 77 /* move to new ASID and handle rollover */
94 * Relinquish the currently owned ASID (if any). 78 if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) {
95 * Doing unconditionally saves a cmp-n-branch; for already unused
96 * ASID slot, the value was/remains NULL
97 */
98 asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
99 79
100 /* move to new ASID */
101 if (++asid_cache > MAX_ASID) { /* ASID roll-over */
102 asid_cache = FIRST_ASID;
103 flush_tlb_all(); 80 flush_tlb_all();
104 }
105 81
106 /* 82 /*
107 * Is next ASID already owned by some-one else (we are stealing it). 83 * Above checke for rollover of 8 bit ASID in 32 bit container.
108 * If so, let the orig owner be aware of this, so when it runs, it 84 * If the container itself wrapped around, set it to a non zero
109 * asks for a brand new ASID. This would only happen for a long-lived 85 * "generation" to distinguish from no context
110 * task with ASID from prev allocation cycle (before ASID roll-over). 86 */
111 * 87 if (!asid_cache)
112 * This might look wrong - if we are re-using some other task's ASID, 88 asid_cache = MM_CTXT_FIRST_CYCLE;
113 * won't we use it's stale TLB entries too. Actually the algorithm takes 89 }
114 * care of such a case: it ensures that task with ASID from prev alloc
115 * cycle, when scheduled will refresh it's ASID
116 * The stealing scenario described here will only happen if that task
117 * didn't get a chance to refresh it's ASID - implying stale entries
118 * won't exist.
119 */
120 prev_owner = asid_mm_map[asid_cache];
121 if (prev_owner)
122 prev_owner->context.asid = NO_ASID;
123 90
124 /* Assign new ASID to tsk */ 91 /* Assign new ASID to tsk */
125 asid_mm_map[asid_cache] = mm;
126 mm->context.asid = asid_cache; 92 mm->context.asid = asid_cache;
127 93
128set_hw: 94set_hw:
129 write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE); 95 write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE);
130 96
131 local_irq_restore(flags); 97 local_irq_restore(flags);
132} 98}
@@ -138,7 +104,7 @@ set_hw:
138static inline int 104static inline int
139init_new_context(struct task_struct *tsk, struct mm_struct *mm) 105init_new_context(struct task_struct *tsk, struct mm_struct *mm)
140{ 106{
141 mm->context.asid = NO_ASID; 107 mm->context.asid = MM_CTXT_NO_ASID;
142 return 0; 108 return 0;
143} 109}
144 110
@@ -167,14 +133,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
167 133
168static inline void destroy_context(struct mm_struct *mm) 134static inline void destroy_context(struct mm_struct *mm)
169{ 135{
170 unsigned long flags; 136 mm->context.asid = MM_CTXT_NO_ASID;
171
172 local_irq_save(flags);
173
174 asid_mm_map[mm->context.asid] = NULL;
175 mm->context.asid = NO_ASID;
176
177 local_irq_restore(flags);
178} 137}
179 138
180/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping 139/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping