aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include/asm/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include/asm/mmu_context.h')
-rw-r--r--arch/xtensa/include/asm/mmu_context.h100
1 files changed, 57 insertions, 43 deletions
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index 86292c28674f..d33c71a8c9ec 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -1,13 +1,11 @@
1/* 1/*
2 * include/asm-xtensa/mmu_context.h
3 *
4 * Switch an MMU context. 2 * Switch an MMU context.
5 * 3 *
6 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 6 * for more details.
9 * 7 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc. 8 * Copyright (C) 2001 - 2013 Tensilica Inc.
11 */ 9 */
12 10
13#ifndef _XTENSA_MMU_CONTEXT_H 11#ifndef _XTENSA_MMU_CONTEXT_H
@@ -20,22 +18,25 @@
20#include <linux/stringify.h> 18#include <linux/stringify.h>
21#include <linux/sched.h> 19#include <linux/sched.h>
22 20
23#include <variant/core.h> 21#include <asm/vectors.h>
24 22
25#include <asm/pgtable.h> 23#include <asm/pgtable.h>
26#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
27#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
28#include <asm-generic/mm_hooks.h> 26#include <asm-generic/mm_hooks.h>
27#include <asm-generic/percpu.h>
29 28
30#if (XCHAL_HAVE_TLBS != 1) 29#if (XCHAL_HAVE_TLBS != 1)
31# error "Linux must have an MMU!" 30# error "Linux must have an MMU!"
32#endif 31#endif
33 32
34extern unsigned long asid_cache; 33DECLARE_PER_CPU(unsigned long, asid_cache);
34#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
35 35
36/* 36/*
37 * NO_CONTEXT is the invalid ASID value that we don't ever assign to 37 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
38 * any user or kernel context. 38 * any user or kernel context. We use the reserved values in the
39 * ASID_INSERT macro below.
39 * 40 *
40 * 0 invalid 41 * 0 invalid
41 * 1 kernel 42 * 1 kernel
@@ -68,64 +69,77 @@ static inline unsigned long get_rasid_register (void)
68 return tmp; 69 return tmp;
69} 70}
70 71
71static inline void 72static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
72__get_new_mmu_context(struct mm_struct *mm) 73{
74 unsigned long asid = cpu_asid_cache(cpu);
75 if ((++asid & ASID_MASK) == 0) {
76 /*
77 * Start new asid cycle; continue counting with next
78 * incarnation bits; skipping over 0, 1, 2, 3.
79 */
80 local_flush_tlb_all();
81 asid += ASID_USER_FIRST;
82 }
83 cpu_asid_cache(cpu) = asid;
84 mm->context.asid[cpu] = asid;
85 mm->context.cpu = cpu;
86}
87
88static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
73{ 89{
74 extern void flush_tlb_all(void); 90 /*
75 if (! (++asid_cache & ASID_MASK) ) { 91 * Check if our ASID is of an older version and thus invalid.
76 flush_tlb_all(); /* start new asid cycle */ 92 */
77 asid_cache += ASID_USER_FIRST; 93
94 if (mm) {
95 unsigned long asid = mm->context.asid[cpu];
96
97 if (asid == NO_CONTEXT ||
98 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
99 get_new_mmu_context(mm, cpu);
78 } 100 }
79 mm->context = asid_cache;
80} 101}
81 102
82static inline void 103static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
83__load_mmu_context(struct mm_struct *mm)
84{ 104{
85 set_rasid_register(ASID_INSERT(mm->context)); 105 get_mmu_context(mm, cpu);
106 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
86 invalidate_page_directory(); 107 invalidate_page_directory();
87} 108}
88 109
89/* 110/*
90 * Initialize the context related info for a new mm_struct 111 * Initialize the context related info for a new mm_struct
91 * instance. 112 * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
113 * to -1 says the process has never run on any core.
92 */ 114 */
93 115
94static inline int 116static inline int init_new_context(struct task_struct *tsk,
95init_new_context(struct task_struct *tsk, struct mm_struct *mm) 117 struct mm_struct *mm)
96{ 118{
97 mm->context = NO_CONTEXT; 119 int cpu;
120 for_each_possible_cpu(cpu) {
121 mm->context.asid[cpu] = NO_CONTEXT;
122 }
123 mm->context.cpu = -1;
98 return 0; 124 return 0;
99} 125}
100 126
101/*
102 * After we have set current->mm to a new value, this activates
103 * the context for the new mm so we see the new mappings.
104 */
105static inline void
106activate_mm(struct mm_struct *prev, struct mm_struct *next)
107{
108 /* Unconditionally get a new ASID. */
109
110 __get_new_mmu_context(next);
111 __load_mmu_context(next);
112}
113
114
115static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 127static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
116 struct task_struct *tsk) 128 struct task_struct *tsk)
117{ 129{
118 unsigned long asid = asid_cache; 130 unsigned int cpu = smp_processor_id();
119 131 int migrated = next->context.cpu != cpu;
120 /* Check if our ASID is of an older version and thus invalid */ 132 /* Flush the icache if we migrated to a new core. */
121 133 if (migrated) {
122 if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) 134 __invalidate_icache_all();
123 __get_new_mmu_context(next); 135 next->context.cpu = cpu;
124 136 }
125 __load_mmu_context(next); 137 if (migrated || prev != next)
138 activate_context(next, cpu);
126} 139}
127 140
128#define deactivate_mm(tsk, mm) do { } while(0) 141#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
142#define deactivate_mm(tsk, mm) do { } while (0)
129 143
130/* 144/*
131 * Destroy context related info for an mm_struct that is about 145 * Destroy context related info for an mm_struct that is about