aboutsummaryrefslogtreecommitdiffstats
path: root/arch/unicore32
diff options
context:
space:
mode:
authorGuanXuetao <gxt@mprc.pku.edu.cn>2011-03-04 07:00:11 -0500
committerGuanXuetao <gxt@mprc.pku.edu.cn>2011-03-16 21:19:21 -0400
commit289d6b0e287e0acd85f3e6b7ea6c2cb5c234909a (patch)
treee60caf25649f572924bf7cd272f20ea71b68d4b8 /arch/unicore32
parent1cf46c42d7688a2e09de87fc9201b0e9a0961866 (diff)
unicore32: rewrite arch-specific tlb.h to use asm-generic version
Signed-off-by: Guan Xuetao <gxt@mprc.pku.edu.cn>
Diffstat (limited to 'arch/unicore32')
-rw-r--r--arch/unicore32/include/asm/tlb.h94
1 files changed, 12 insertions, 82 deletions
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
index 02ee40e47a0d..9cca15cdae94 100644
--- a/arch/unicore32/include/asm/tlb.h
+++ b/arch/unicore32/include/asm/tlb.h
@@ -12,87 +12,17 @@
12#ifndef __UNICORE_TLB_H__ 12#ifndef __UNICORE_TLB_H__
13#define __UNICORE_TLB_H__ 13#define __UNICORE_TLB_H__
14 14
15#include <asm/cacheflush.h> 15#define tlb_start_vma(tlb, vma) do { } while (0)
16#include <asm/tlbflush.h> 16#define tlb_end_vma(tlb, vma) do { } while (0)
17#include <asm/pgalloc.h> 17#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
18 18#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
19/* 19
20 * TLB handling. This allows us to remove pages from the page 20#define __pte_free_tlb(tlb, pte, addr) \
21 * tables, and efficiently handle the TLB issues. 21 do { \
22 */ 22 pgtable_page_dtor(pte); \
23struct mmu_gather { 23 tlb_remove_page((tlb), (pte)); \
24 struct mm_struct *mm; 24 } while (0)
25 unsigned int fullmm; 25
26 unsigned long range_start; 26#include <asm-generic/tlb.h>
27 unsigned long range_end;
28};
29
30DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
31
32static inline struct mmu_gather *
33tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
34{
35 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
36
37 tlb->mm = mm;
38 tlb->fullmm = full_mm_flush;
39
40 return tlb;
41}
42
43static inline void
44tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
45{
46 if (tlb->fullmm)
47 flush_tlb_mm(tlb->mm);
48
49 /* keep the page table cache within bounds */
50 check_pgt_cache();
51
52 put_cpu_var(mmu_gathers);
53}
54
55/*
56 * Memorize the range for the TLB flush.
57 */
58static inline void
59tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
60{
61 if (!tlb->fullmm) {
62 if (addr < tlb->range_start)
63 tlb->range_start = addr;
64 if (addr + PAGE_SIZE > tlb->range_end)
65 tlb->range_end = addr + PAGE_SIZE;
66 }
67}
68
69/*
70 * In the case of tlb vma handling, we can optimise these away in the
71 * case where we're doing a full MM flush. When we're doing a munmap,
72 * the vmas are adjusted to only cover the region to be torn down.
73 */
74static inline void
75tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
76{
77 if (!tlb->fullmm) {
78 flush_cache_range(vma, vma->vm_start, vma->vm_end);
79 tlb->range_start = TASK_SIZE;
80 tlb->range_end = 0;
81 }
82}
83
84static inline void
85tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
86{
87 if (!tlb->fullmm && tlb->range_end > 0)
88 flush_tlb_range(vma, tlb->range_start, tlb->range_end);
89}
90
91#define tlb_remove_page(tlb, page) free_page_and_swap_cache(page)
92#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
93#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
94#define pud_free_tlb(tlb, x, addr) do { } while (0)
95
96#define tlb_migrate_finish(mm) do { } while (0)
97 27
98#endif 28#endif