aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/tlbflush.h
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/x86/include/asm/tlbflush.h
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/x86/include/asm/tlbflush.h')
-rw-r--r--arch/x86/include/asm/tlbflush.h64
1 files changed, 32 insertions, 32 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 0fee48e279c..169be8938b9 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,7 +5,7 @@
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7#include <asm/processor.h> 7#include <asm/processor.h>
8#include <asm/special_insns.h> 8#include <asm/system.h>
9 9
10#ifdef CONFIG_PARAVIRT 10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h> 11#include <asm/paravirt.h>
@@ -56,10 +56,17 @@ static inline void __flush_tlb_all(void)
56 56
57static inline void __flush_tlb_one(unsigned long addr) 57static inline void __flush_tlb_one(unsigned long addr)
58{ 58{
59 if (cpu_has_invlpg)
59 __flush_tlb_single(addr); 60 __flush_tlb_single(addr);
61 else
62 __flush_tlb();
60} 63}
61 64
62#define TLB_FLUSH_ALL -1UL 65#ifdef CONFIG_X86_32
66# define TLB_FLUSH_ALL 0xffffffff
67#else
68# define TLB_FLUSH_ALL -1ULL
69#endif
63 70
64/* 71/*
65 * TLB flushing: 72 * TLB flushing:
@@ -70,10 +77,14 @@ static inline void __flush_tlb_one(unsigned long addr)
70 * - flush_tlb_page(vma, vmaddr) flushes one page 77 * - flush_tlb_page(vma, vmaddr) flushes one page
71 * - flush_tlb_range(vma, start, end) flushes a range of pages 78 * - flush_tlb_range(vma, start, end) flushes a range of pages
72 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 79 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
73 * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus 80 * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
74 * 81 *
75 * ..but the i386 has somewhat limited tlb flushing capabilities, 82 * ..but the i386 has somewhat limited tlb flushing capabilities,
76 * and page-granular flushes are available only on i486 and up. 83 * and page-granular flushes are available only on i486 and up.
84 *
85 * x86-64 can only flush individual pages or full VMs. For a range flush
86 * we always do the full VM. Might be worth trying if for a small
87 * range a few INVLPGs in a row are a win.
77 */ 88 */
78 89
79#ifndef CONFIG_SMP 90#ifndef CONFIG_SMP
@@ -102,17 +113,9 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
102 __flush_tlb(); 113 __flush_tlb();
103} 114}
104 115
105static inline void flush_tlb_mm_range(struct mm_struct *mm,
106 unsigned long start, unsigned long end, unsigned long vmflag)
107{
108 if (mm == current->active_mm)
109 __flush_tlb();
110}
111
112static inline void native_flush_tlb_others(const struct cpumask *cpumask, 116static inline void native_flush_tlb_others(const struct cpumask *cpumask,
113 struct mm_struct *mm, 117 struct mm_struct *mm,
114 unsigned long start, 118 unsigned long va)
115 unsigned long end)
116{ 119{
117} 120}
118 121
@@ -120,35 +123,27 @@ static inline void reset_lazy_tlbstate(void)
120{ 123{
121} 124}
122 125
123static inline void flush_tlb_kernel_range(unsigned long start,
124 unsigned long end)
125{
126 flush_tlb_all();
127}
128
129#else /* SMP */ 126#else /* SMP */
130 127
131#include <asm/smp.h> 128#include <asm/smp.h>
132 129
133#define local_flush_tlb() __flush_tlb() 130#define local_flush_tlb() __flush_tlb()
134 131
135#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
136
137#define flush_tlb_range(vma, start, end) \
138 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
139
140extern void flush_tlb_all(void); 132extern void flush_tlb_all(void);
141extern void flush_tlb_current_task(void); 133extern void flush_tlb_current_task(void);
134extern void flush_tlb_mm(struct mm_struct *);
142extern void flush_tlb_page(struct vm_area_struct *, unsigned long); 135extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
143extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
144 unsigned long end, unsigned long vmflag);
145extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
146 136
147#define flush_tlb() flush_tlb_current_task() 137#define flush_tlb() flush_tlb_current_task()
148 138
139static inline void flush_tlb_range(struct vm_area_struct *vma,
140 unsigned long start, unsigned long end)
141{
142 flush_tlb_mm(vma->vm_mm);
143}
144
149void native_flush_tlb_others(const struct cpumask *cpumask, 145void native_flush_tlb_others(const struct cpumask *cpumask,
150 struct mm_struct *mm, 146 struct mm_struct *mm, unsigned long va);
151 unsigned long start, unsigned long end);
152 147
153#define TLBSTATE_OK 1 148#define TLBSTATE_OK 1
154#define TLBSTATE_LAZY 2 149#define TLBSTATE_LAZY 2
@@ -161,15 +156,20 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
161 156
162static inline void reset_lazy_tlbstate(void) 157static inline void reset_lazy_tlbstate(void)
163{ 158{
164 this_cpu_write(cpu_tlbstate.state, 0); 159 percpu_write(cpu_tlbstate.state, 0);
165 this_cpu_write(cpu_tlbstate.active_mm, &init_mm); 160 percpu_write(cpu_tlbstate.active_mm, &init_mm);
166} 161}
167 162
168#endif /* SMP */ 163#endif /* SMP */
169 164
170#ifndef CONFIG_PARAVIRT 165#ifndef CONFIG_PARAVIRT
171#define flush_tlb_others(mask, mm, start, end) \ 166#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
172 native_flush_tlb_others(mask, mm, start, end)
173#endif 167#endif
174 168
169static inline void flush_tlb_kernel_range(unsigned long start,
170 unsigned long end)
171{
172 flush_tlb_all();
173}
174
175#endif /* _ASM_X86_TLBFLUSH_H */ 175#endif /* _ASM_X86_TLBFLUSH_H */