diff options
Diffstat (limited to 'include/asm-s390/tlbflush.h')
-rw-r--r-- | include/asm-s390/tlbflush.h | 140 |
1 files changed, 0 insertions, 140 deletions
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h deleted file mode 100644 index d60394b9745e..000000000000 --- a/include/asm-s390/tlbflush.h +++ /dev/null | |||
@@ -1,140 +0,0 @@ | |||
1 | #ifndef _S390_TLBFLUSH_H | ||
2 | #define _S390_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/pgalloc.h> | ||
8 | |||
9 | /* | ||
10 | * Flush all tlb entries on the local cpu. | ||
11 | */ | ||
12 | static inline void __tlb_flush_local(void) | ||
13 | { | ||
14 | asm volatile("ptlb" : : : "memory"); | ||
15 | } | ||
16 | |||
17 | #ifdef CONFIG_SMP | ||
18 | /* | ||
19 | * Flush all tlb entries on all cpus. | ||
20 | */ | ||
21 | void smp_ptlb_all(void); | ||
22 | |||
23 | static inline void __tlb_flush_global(void) | ||
24 | { | ||
25 | register unsigned long reg2 asm("2"); | ||
26 | register unsigned long reg3 asm("3"); | ||
27 | register unsigned long reg4 asm("4"); | ||
28 | long dummy; | ||
29 | |||
30 | #ifndef __s390x__ | ||
31 | if (!MACHINE_HAS_CSP) { | ||
32 | smp_ptlb_all(); | ||
33 | return; | ||
34 | } | ||
35 | #endif /* __s390x__ */ | ||
36 | |||
37 | dummy = 0; | ||
38 | reg2 = reg3 = 0; | ||
39 | reg4 = ((unsigned long) &dummy) + 1; | ||
40 | asm volatile( | ||
41 | " csp %0,%2" | ||
42 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); | ||
43 | } | ||
44 | |||
45 | static inline void __tlb_flush_full(struct mm_struct *mm) | ||
46 | { | ||
47 | cpumask_t local_cpumask; | ||
48 | |||
49 | preempt_disable(); | ||
50 | /* | ||
51 | * If the process only ran on the local cpu, do a local flush. | ||
52 | */ | ||
53 | local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
54 | if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) | ||
55 | __tlb_flush_local(); | ||
56 | else | ||
57 | __tlb_flush_global(); | ||
58 | preempt_enable(); | ||
59 | } | ||
60 | #else | ||
61 | #define __tlb_flush_full(mm) __tlb_flush_local() | ||
62 | #endif | ||
63 | |||
64 | /* | ||
65 | * Flush all tlb entries of a page table on all cpus. | ||
66 | */ | ||
67 | static inline void __tlb_flush_idte(unsigned long asce) | ||
68 | { | ||
69 | asm volatile( | ||
70 | " .insn rrf,0xb98e0000,0,%0,%1,0" | ||
71 | : : "a" (2048), "a" (asce) : "cc" ); | ||
72 | } | ||
73 | |||
74 | static inline void __tlb_flush_mm(struct mm_struct * mm) | ||
75 | { | ||
76 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) | ||
77 | return; | ||
78 | /* | ||
79 | * If the machine has IDTE we prefer to do a per mm flush | ||
80 | * on all cpus instead of doing a local flush if the mm | ||
81 | * only ran on the local cpu. | ||
82 | */ | ||
83 | if (MACHINE_HAS_IDTE) { | ||
84 | if (mm->context.noexec) | ||
85 | __tlb_flush_idte((unsigned long) | ||
86 | get_shadow_table(mm->pgd) | | ||
87 | mm->context.asce_bits); | ||
88 | __tlb_flush_idte((unsigned long) mm->pgd | | ||
89 | mm->context.asce_bits); | ||
90 | return; | ||
91 | } | ||
92 | __tlb_flush_full(mm); | ||
93 | } | ||
94 | |||
95 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | ||
96 | { | ||
97 | if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm) | ||
98 | __tlb_flush_mm(mm); | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * TLB flushing: | ||
103 | * flush_tlb() - flushes the current mm struct TLBs | ||
104 | * flush_tlb_all() - flushes all processes TLBs | ||
105 | * flush_tlb_mm(mm) - flushes the specified mm context TLB's | ||
106 | * flush_tlb_page(vma, vmaddr) - flushes one page | ||
107 | * flush_tlb_range(vma, start, end) - flushes a range of pages | ||
108 | * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages | ||
109 | */ | ||
110 | |||
111 | /* | ||
112 | * flush_tlb_mm goes together with ptep_set_wrprotect for the | ||
113 | * copy_page_range operation and flush_tlb_range is related to | ||
114 | * ptep_get_and_clear for change_protection. ptep_set_wrprotect and | ||
115 | * ptep_get_and_clear do not flush the TLBs directly if the mm has | ||
116 | * only one user. At the end of the update the flush_tlb_mm and | ||
117 | * flush_tlb_range functions need to do the flush. | ||
118 | */ | ||
119 | #define flush_tlb() do { } while (0) | ||
120 | #define flush_tlb_all() do { } while (0) | ||
121 | #define flush_tlb_page(vma, addr) do { } while (0) | ||
122 | |||
123 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
124 | { | ||
125 | __tlb_flush_mm_cond(mm); | ||
126 | } | ||
127 | |||
128 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
129 | unsigned long start, unsigned long end) | ||
130 | { | ||
131 | __tlb_flush_mm_cond(vma->vm_mm); | ||
132 | } | ||
133 | |||
134 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
135 | unsigned long end) | ||
136 | { | ||
137 | __tlb_flush_mm(&init_mm); | ||
138 | } | ||
139 | |||
140 | #endif /* _S390_TLBFLUSH_H */ | ||