aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sh/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/mmu_context.h')
-rw-r--r--include/asm-sh/mmu_context.h87
1 files changed, 24 insertions, 63 deletions
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 199662bb35c6..fe58d00b250c 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * Copyright (C) 1999 Niibe Yutaka 2 * Copyright (C) 1999 Niibe Yutaka
3 * Copyright (C) 2003 - 2006 Paul Mundt 3 * Copyright (C) 2003 - 2007 Paul Mundt
4 * 4 *
5 * ASID handling idea taken from MIPS implementation. 5 * ASID handling idea taken from MIPS implementation.
6 */ 6 */
7#ifndef __ASM_SH_MMU_CONTEXT_H 7#ifndef __ASM_SH_MMU_CONTEXT_H
8#define __ASM_SH_MMU_CONTEXT_H 8#define __ASM_SH_MMU_CONTEXT_H
9#ifdef __KERNEL__
10 9
10#ifdef __KERNEL__
11#include <asm/cpu/mmu_context.h> 11#include <asm/cpu/mmu_context.h>
12#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
@@ -19,7 +19,6 @@
19 * (a) TLB cache version (or round, cycle whatever expression you like) 19 * (a) TLB cache version (or round, cycle whatever expression you like)
20 * (b) ASID (Address Space IDentifier) 20 * (b) ASID (Address Space IDentifier)
21 */ 21 */
22
23#define MMU_CONTEXT_ASID_MASK 0x000000ff 22#define MMU_CONTEXT_ASID_MASK 0x000000ff
24#define MMU_CONTEXT_VERSION_MASK 0xffffff00 23#define MMU_CONTEXT_VERSION_MASK 0xffffff00
25#define MMU_CONTEXT_FIRST_VERSION 0x00000100 24#define MMU_CONTEXT_FIRST_VERSION 0x00000100
@@ -28,10 +27,11 @@
28/* ASID is 8-bit value, so it can't be 0x100 */ 27/* ASID is 8-bit value, so it can't be 0x100 */
29#define MMU_NO_ASID 0x100 28#define MMU_NO_ASID 0x100
30 29
31#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
32#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \
33 MMU_CONTEXT_ASID_MASK)
34#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 30#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
31#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
32
33#define cpu_asid(cpu, mm) \
34 (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
35 35
36/* 36/*
37 * Virtual Page Number mask 37 * Virtual Page Number mask
@@ -39,6 +39,12 @@
39#define MMU_VPN_MASK 0xfffff000 39#define MMU_VPN_MASK 0xfffff000
40 40
41#ifdef CONFIG_MMU 41#ifdef CONFIG_MMU
42#if defined(CONFIG_SUPERH32)
43#include "mmu_context_32.h"
44#else
45#include "mmu_context_64.h"
46#endif
47
42/* 48/*
43 * Get MMU context if needed. 49 * Get MMU context if needed.
44 */ 50 */
@@ -59,6 +65,14 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
59 */ 65 */
60 flush_tlb_all(); 66 flush_tlb_all();
61 67
68#ifdef CONFIG_SUPERH64
69 /*
70 * The SH-5 cache uses the ASIDs, requiring both the I and D
71 * cache to be flushed when the ASID is exhausted. Weak.
72 */
73 flush_cache_all();
74#endif
75
62 /* 76 /*
63 * Fix version; Note that we avoid version #0 77 * Fix version; Note that we avoid version #0
64 * to distingush NO_CONTEXT. 78 * to distingush NO_CONTEXT.
@@ -86,39 +100,6 @@ static inline int init_new_context(struct task_struct *tsk,
86} 100}
87 101
88/* 102/*
89 * Destroy context related info for an mm_struct that is about
90 * to be put to rest.
91 */
92static inline void destroy_context(struct mm_struct *mm)
93{
94 /* Do nothing */
95}
96
97static inline void set_asid(unsigned long asid)
98{
99 unsigned long __dummy;
100
101 __asm__ __volatile__ ("mov.l %2, %0\n\t"
102 "and %3, %0\n\t"
103 "or %1, %0\n\t"
104 "mov.l %0, %2"
105 : "=&r" (__dummy)
106 : "r" (asid), "m" (__m(MMU_PTEH)),
107 "r" (0xffffff00));
108}
109
110static inline unsigned long get_asid(void)
111{
112 unsigned long asid;
113
114 __asm__ __volatile__ ("mov.l %1, %0"
115 : "=r" (asid)
116 : "m" (__m(MMU_PTEH)));
117 asid &= MMU_CONTEXT_ASID_MASK;
118 return asid;
119}
120
121/*
122 * After we have set current->mm to a new value, this activates 103 * After we have set current->mm to a new value, this activates
123 * the context for the new mm so we see the new mappings. 104 * the context for the new mm so we see the new mappings.
124 */ 105 */
@@ -128,17 +109,6 @@ static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
128 set_asid(cpu_asid(cpu, mm)); 109 set_asid(cpu_asid(cpu, mm));
129} 110}
130 111
131/* MMU_TTB is used for optimizing the fault handling. */
132static inline void set_TTB(pgd_t *pgd)
133{
134 ctrl_outl((unsigned long)pgd, MMU_TTB);
135}
136
137static inline pgd_t *get_TTB(void)
138{
139 return (pgd_t *)ctrl_inl(MMU_TTB);
140}
141
142static inline void switch_mm(struct mm_struct *prev, 112static inline void switch_mm(struct mm_struct *prev,
143 struct mm_struct *next, 113 struct mm_struct *next,
144 struct task_struct *tsk) 114 struct task_struct *tsk)
@@ -153,17 +123,7 @@ static inline void switch_mm(struct mm_struct *prev,
153 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) 123 if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
154 activate_context(next, cpu); 124 activate_context(next, cpu);
155} 125}
156 126#else
157#define deactivate_mm(tsk,mm) do { } while (0)
158
159#define activate_mm(prev, next) \
160 switch_mm((prev),(next),NULL)
161
162static inline void
163enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
164{
165}
166#else /* !CONFIG_MMU */
167#define get_mmu_context(mm) do { } while (0) 127#define get_mmu_context(mm) do { } while (0)
168#define init_new_context(tsk,mm) (0) 128#define init_new_context(tsk,mm) (0)
169#define destroy_context(mm) do { } while (0) 129#define destroy_context(mm) do { } while (0)
@@ -173,10 +133,11 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
173#define get_TTB() (0) 133#define get_TTB() (0)
174#define activate_context(mm,cpu) do { } while (0) 134#define activate_context(mm,cpu) do { } while (0)
175#define switch_mm(prev,next,tsk) do { } while (0) 135#define switch_mm(prev,next,tsk) do { } while (0)
136#endif /* CONFIG_MMU */
137
138#define activate_mm(prev, next) switch_mm((prev),(next),NULL)
176#define deactivate_mm(tsk,mm) do { } while (0) 139#define deactivate_mm(tsk,mm) do { } while (0)
177#define activate_mm(prev,next) do { } while (0)
178#define enter_lazy_tlb(mm,tsk) do { } while (0) 140#define enter_lazy_tlb(mm,tsk) do { } while (0)
179#endif /* CONFIG_MMU */
180 141
181#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) 142#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
182/* 143/*