diff options
Diffstat (limited to 'include/asm-ia64/tlbflush.h')
-rw-r--r-- | include/asm-ia64/tlbflush.h | 99 |
1 files changed, 99 insertions, 0 deletions
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h new file mode 100644 index 000000000000..b65c62702724 --- /dev/null +++ b/include/asm-ia64/tlbflush.h | |||
@@ -0,0 +1,99 @@ | |||
1 | #ifndef _ASM_IA64_TLBFLUSH_H | ||
2 | #define _ASM_IA64_TLBFLUSH_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2002 Hewlett-Packard Co | ||
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | |||
11 | #include <linux/mm.h> | ||
12 | |||
13 | #include <asm/intrinsics.h> | ||
14 | #include <asm/mmu_context.h> | ||
15 | #include <asm/page.h> | ||
16 | |||
17 | /* | ||
18 | * Now for some TLB flushing routines. This is the kind of stuff that | ||
19 | * can be very expensive, so try to avoid them whenever possible. | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * Flush everything (kernel mapping may also have changed due to | ||
24 | * vmalloc/vfree). | ||
25 | */ | ||
26 | extern void local_flush_tlb_all (void); | ||
27 | |||
28 | #ifdef CONFIG_SMP | ||
29 | extern void smp_flush_tlb_all (void); | ||
30 | extern void smp_flush_tlb_mm (struct mm_struct *mm); | ||
31 | # define flush_tlb_all() smp_flush_tlb_all() | ||
32 | #else | ||
33 | # define flush_tlb_all() local_flush_tlb_all() | ||
34 | #endif | ||
35 | |||
36 | static inline void | ||
37 | local_finish_flush_tlb_mm (struct mm_struct *mm) | ||
38 | { | ||
39 | if (mm == current->active_mm) | ||
40 | activate_context(mm); | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Flush a specified user mapping. This is called, e.g., as a result of fork() and | ||
45 | * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect | ||
46 | * the PTEs of the parent task. | ||
47 | */ | ||
48 | static inline void | ||
49 | flush_tlb_mm (struct mm_struct *mm) | ||
50 | { | ||
51 | if (!mm) | ||
52 | return; | ||
53 | |||
54 | mm->context = 0; | ||
55 | |||
56 | if (atomic_read(&mm->mm_users) == 0) | ||
57 | return; /* happens as a result of exit_mmap() */ | ||
58 | |||
59 | #ifdef CONFIG_SMP | ||
60 | smp_flush_tlb_mm(mm); | ||
61 | #else | ||
62 | local_finish_flush_tlb_mm(mm); | ||
63 | #endif | ||
64 | } | ||
65 | |||
66 | extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
67 | |||
68 | /* | ||
69 | * Page-granular tlb flush. | ||
70 | */ | ||
71 | static inline void | ||
72 | flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) | ||
73 | { | ||
74 | #ifdef CONFIG_SMP | ||
75 | flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); | ||
76 | #else | ||
77 | if (vma->vm_mm == current->active_mm) | ||
78 | ia64_ptcl(addr, (PAGE_SHIFT << 2)); | ||
79 | else | ||
80 | vma->vm_mm->context = 0; | ||
81 | #endif | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Flush the TLB entries mapping the virtually mapped linear page | ||
86 | * table corresponding to address range [START-END). | ||
87 | */ | ||
88 | static inline void | ||
89 | flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) | ||
90 | { | ||
91 | /* | ||
92 | * Deprecated. The virtual page table is now flushed via the normal gather/flush | ||
93 | * interface (see tlb.h). | ||
94 | */ | ||
95 | } | ||
96 | |||
97 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ | ||
98 | |||
99 | #endif /* _ASM_IA64_TLBFLUSH_H */ | ||