aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAkira Takeuchi <takeuchi.akr@jp.panasonic.com>2010-10-27 12:28:49 -0400
committerDavid Howells <dhowells@redhat.com>2010-10-27 12:28:49 -0400
commita9bc60ebfd5766ce5f6095d0fed3d9978990122f (patch)
treed6044bbb56bbb06fb6f13fab9f079a20938d0960
parent492e675116003b99dfcf0fa70084027e86bc0161 (diff)
MN10300: Make the use of PIDR to mark TLB entries controllable
Make controllable the use of the PIDR register to mark TLB entries as belonging to particular processes. Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com> Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com>
-rw-r--r--arch/mn10300/Kconfig3
-rw-r--r--arch/mn10300/include/asm/mmu_context.h59
-rw-r--r--arch/mn10300/include/asm/tlbflush.h43
-rw-r--r--arch/mn10300/mm/mmu-context.c41
4 files changed, 84 insertions, 62 deletions
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index dd7b5700358b..7bd920b1c06f 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -142,6 +142,9 @@ config FPU
142 142
143source "arch/mn10300/mm/Kconfig.cache" 143source "arch/mn10300/mm/Kconfig.cache"
144 144
145config MN10300_TLB_USE_PIDR
146 def_bool y
147
145menu "Memory layout options" 148menu "Memory layout options"
146 149
147config KERNEL_RAM_BASE_ADDRESS 150config KERNEL_RAM_BASE_ADDRESS
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index 24d63f0f7377..5fb3648968ae 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -27,28 +27,22 @@
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28#include <asm-generic/mm_hooks.h> 28#include <asm-generic/mm_hooks.h>
29 29
30#define MMU_CONTEXT_TLBPID_NR 256
30#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL 31#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
31#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL 32#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
32#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL 33#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
33#define MMU_NO_CONTEXT 0x00000000UL 34#define MMU_NO_CONTEXT 0x00000000UL
34 35#define MMU_CONTEXT_TLBPID_LOCK_NR 0
35extern unsigned long mmu_context_cache[NR_CPUS];
36#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
37 36
38#define enter_lazy_tlb(mm, tsk) do {} while (0) 37#define enter_lazy_tlb(mm, tsk) do {} while (0)
39 38
40#ifdef CONFIG_SMP 39#ifdef CONFIG_MN10300_TLB_USE_PIDR
41#define cpu_ran_vm(cpu, mm) \ 40extern unsigned long mmu_context_cache[NR_CPUS];
42 cpumask_set_cpu((cpu), mm_cpumask(mm)) 41#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
43#define cpu_maybe_ran_vm(cpu, mm) \
44 cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
45#else
46#define cpu_ran_vm(cpu, mm) do {} while (0)
47#define cpu_maybe_ran_vm(cpu, mm) true
48#endif /* CONFIG_SMP */
49 42
50/* 43/**
51 * allocate an MMU context 44 * allocate_mmu_context - Allocate storage for the arch-specific MMU data
45 * @mm: The userspace VM context being set up
52 */ 46 */
53static inline unsigned long allocate_mmu_context(struct mm_struct *mm) 47static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
54{ 48{
@@ -101,34 +95,41 @@ static inline int init_new_context(struct task_struct *tsk,
101} 95}
102 96
103/* 97/*
104 * destroy context related info for an mm_struct that is about to be put to
105 * rest
106 */
107#define destroy_context(mm) do { } while (0)
108
109/*
110 * after we have set current->mm to a new value, this activates the context for 98 * after we have set current->mm to a new value, this activates the context for
111 * the new mm so we see the new mappings. 99 * the new mm so we see the new mappings.
112 */ 100 */
113static inline void activate_context(struct mm_struct *mm, int cpu) 101static inline void activate_context(struct mm_struct *mm)
114{ 102{
115 PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK; 103 PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
116} 104}
105#else /* CONFIG_MN10300_TLB_USE_PIDR */
117 106
118/* 107#define init_new_context(tsk, mm) (0)
119 * change between virtual memory sets 108#define activate_context(mm) local_flush_tlb()
109
110#endif /* CONFIG_MN10300_TLB_USE_PIDR */
111
112/**
113 * destroy_context - Destroy mm context information
114 * @mm: The MM being destroyed.
115 *
116 * Destroy context related info for an mm_struct that is about to be put to
117 * rest
118 */
119#define destroy_context(mm) do {} while (0)
120
121/**
122 * switch_mm - Change between userspace virtual memory contexts
123 * @prev: The outgoing MM context.
124 * @next: The incoming MM context.
125 * @tsk: The incoming task.
120 */ 126 */
121static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 127static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
122 struct task_struct *tsk) 128 struct task_struct *tsk)
123{ 129{
124 int cpu = smp_processor_id();
125
126 if (prev != next) { 130 if (prev != next) {
127 cpu_ran_vm(cpu, next);
128 activate_context(next, cpu);
129 PTBR = (unsigned long) next->pgd; 131 PTBR = (unsigned long) next->pgd;
130 } else if (!cpu_maybe_ran_vm(cpu, next)) { 132 activate_context(next);
131 activate_context(next, cpu);
132 } 133 }
133} 134}
134 135
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 5d54bf57e6c3..c3c194d4031e 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -13,6 +13,12 @@
13 13
14#include <asm/processor.h> 14#include <asm/processor.h>
15 15
16struct tlb_state {
17 struct mm_struct *active_mm;
18 int state;
19};
20DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
21
16/** 22/**
17 * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs 23 * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
18 */ 24 */
@@ -31,20 +37,51 @@ static inline void local_flush_tlb(void)
31/** 37/**
32 * local_flush_tlb_all - Flush all entries from the local CPU's TLBs 38 * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
33 */ 39 */
34#define local_flush_tlb_all() local_flush_tlb() 40static inline void local_flush_tlb_all(void)
41{
42 local_flush_tlb();
43}
35 44
36/** 45/**
37 * local_flush_tlb_one - Flush one entry from the local CPU's TLBs 46 * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
38 */ 47 */
39#define local_flush_tlb_one(addr) local_flush_tlb() 48static inline void local_flush_tlb_one(unsigned long addr)
49{
50 local_flush_tlb();
51}
40 52
41/** 53/**
42 * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs 54 * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
43 * @mm: The MM to flush for 55 * @mm: The MM to flush for
44 * @addr: The address of the target page in RAM (not its page struct) 56 * @addr: The address of the target page in RAM (not its page struct)
45 */ 57 */
46extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr); 58static inline
59void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
60{
61 unsigned long pteu, flags, cnx;
62
63 addr &= PAGE_MASK;
47 64
65 local_irq_save(flags);
66
67 cnx = 1;
68#ifdef CONFIG_MN10300_TLB_USE_PIDR
69 cnx = mm->context.tlbpid[smp_processor_id()];
70#endif
71 if (cnx) {
72 pteu = addr;
73#ifdef CONFIG_MN10300_TLB_USE_PIDR
74 pteu |= cnx & xPTEU_PID;
75#endif
76 IPTEU = pteu;
77 DPTEU = pteu;
78 if (IPTEL & xPTEL_V)
79 IPTEL = 0;
80 if (DPTEL & xPTEL_V)
81 DPTEL = 0;
82 }
83 local_irq_restore(flags);
84}
48 85
49/* 86/*
50 * TLB flushing: 87 * TLB flushing:
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 3d83966e30e1..a4f7d3dcc6e6 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -13,40 +13,15 @@
13#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15 15
16#ifdef CONFIG_MN10300_TLB_USE_PIDR
16/* 17/*
17 * list of the MMU contexts last allocated on each CPU 18 * list of the MMU contexts last allocated on each CPU
18 */ 19 */
19unsigned long mmu_context_cache[NR_CPUS] = { 20unsigned long mmu_context_cache[NR_CPUS] = {
20 [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1, 21 [0 ... NR_CPUS - 1] =
22 MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
21}; 23};
22 24#endif /* CONFIG_MN10300_TLB_USE_PIDR */
23/*
24 * flush the specified TLB entry
25 */
26void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
27{
28 unsigned long pteu, cnx, flags;
29
30 addr &= PAGE_MASK;
31
32 /* make sure the context doesn't migrate and defend against
33 * interference from vmalloc'd regions */
34 local_irq_save(flags);
35
36 cnx = mm_context(mm);
37
38 if (cnx != MMU_NO_CONTEXT) {
39 pteu = addr | (cnx & 0x000000ffUL);
40 IPTEU = pteu;
41 DPTEU = pteu;
42 if (IPTEL & xPTEL_V)
43 IPTEL = 0;
44 if (DPTEL & xPTEL_V)
45 DPTEL = 0;
46 }
47
48 local_irq_restore(flags);
49}
50 25
51/* 26/*
52 * preemptively set a TLB entry 27 * preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
63 * interference from vmalloc'd regions */ 38 * interference from vmalloc'd regions */
64 local_irq_save(flags); 39 local_irq_save(flags);
65 40
41 cnx = ~MMU_NO_CONTEXT;
42#ifdef CONFIG_MN10300_TLB_USE_PIDR
66 cnx = mm_context(vma->vm_mm); 43 cnx = mm_context(vma->vm_mm);
44#endif
67 45
68 if (cnx != MMU_NO_CONTEXT) { 46 if (cnx != MMU_NO_CONTEXT) {
69 pteu = addr | (cnx & 0x000000ffUL); 47 pteu = addr;
48#ifdef CONFIG_MN10300_TLB_USE_PIDR
49 pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
50#endif
70 if (!(pte_val(pte) & _PAGE_NX)) { 51 if (!(pte_val(pte) & _PAGE_NX)) {
71 IPTEU = pteu; 52 IPTEU = pteu;
72 if (IPTEL & xPTEL_V) 53 if (IPTEL & xPTEL_V)