diff options
Diffstat (limited to 'include/asm-parisc/tlbflush.h')
-rw-r--r-- | include/asm-parisc/tlbflush.h | 80 |
1 files changed, 0 insertions, 80 deletions
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h deleted file mode 100644 index b72ec66db699..000000000000 --- a/include/asm-parisc/tlbflush.h +++ /dev/null | |||
@@ -1,80 +0,0 @@ | |||
1 | #ifndef _PARISC_TLBFLUSH_H | ||
2 | #define _PARISC_TLBFLUSH_H | ||
3 | |||
4 | /* TLB flushing routines.... */ | ||
5 | |||
6 | #include <linux/mm.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <asm/mmu_context.h> | ||
9 | |||
10 | |||
11 | /* This is for the serialisation of PxTLB broadcasts. At least on the | ||
12 | * N class systems, only one PxTLB inter processor broadcast can be | ||
13 | * active at any one time on the Merced bus. This tlb purge | ||
14 | * synchronisation is fairly lightweight and harmless so we activate | ||
15 | * it on all SMP systems not just the N class. We also need to have | ||
16 | * preemption disabled on uniprocessor machines, and spin_lock does that | ||
17 | * nicely. | ||
18 | */ | ||
19 | extern spinlock_t pa_tlb_lock; | ||
20 | |||
21 | #define purge_tlb_start(x) spin_lock(&pa_tlb_lock) | ||
22 | #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) | ||
23 | |||
24 | extern void flush_tlb_all(void); | ||
25 | extern void flush_tlb_all_local(void *); | ||
26 | |||
27 | /* | ||
28 | * flush_tlb_mm() | ||
29 | * | ||
30 | * XXX This code is NOT valid for HP-UX compatibility processes, | ||
31 | * (although it will probably work 99% of the time). HP-UX | ||
32 | * processes are free to play with the space id's and save them | ||
33 | * over long periods of time, etc. so we have to preserve the | ||
34 | * space and just flush the entire tlb. We need to check the | ||
35 | * personality in order to do that, but the personality is not | ||
36 | * currently being set correctly. | ||
37 | * | ||
38 | * Of course, Linux processes could do the same thing, but | ||
39 | * we don't support that (and the compilers, dynamic linker, | ||
40 | * etc. do not do that). | ||
41 | */ | ||
42 | |||
43 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
44 | { | ||
45 | BUG_ON(mm == &init_mm); /* Should never happen */ | ||
46 | |||
47 | #ifdef CONFIG_SMP | ||
48 | flush_tlb_all(); | ||
49 | #else | ||
50 | if (mm) { | ||
51 | if (mm->context != 0) | ||
52 | free_sid(mm->context); | ||
53 | mm->context = alloc_sid(); | ||
54 | if (mm == current->active_mm) | ||
55 | load_context(mm->context); | ||
56 | } | ||
57 | #endif | ||
58 | } | ||
59 | |||
60 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
61 | unsigned long addr) | ||
62 | { | ||
63 | /* For one page, it's not worth testing the split_tlb variable */ | ||
64 | |||
65 | mb(); | ||
66 | mtsp(vma->vm_mm->context,1); | ||
67 | purge_tlb_start(); | ||
68 | pdtlb(addr); | ||
69 | pitlb(addr); | ||
70 | purge_tlb_end(); | ||
71 | } | ||
72 | |||
73 | void __flush_tlb_range(unsigned long sid, | ||
74 | unsigned long start, unsigned long end); | ||
75 | |||
76 | #define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end) | ||
77 | |||
78 | #define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end) | ||
79 | |||
80 | #endif | ||