aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:11:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:14 -0400
commit1e56a56410bb64bce62d44563e35a143fc2d515f (patch)
tree0642980da0666f8badd27a839b8a0e263a6f6373 /arch/sh/include
parent9e14f6741062b6e6a71de75b4375e14c3e92c213 (diff)
sh: mmu_gather rework
Fix up the sh mmu_gather code to conform to the new API. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mundt <lethal@linux-sh.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sh/include')
-rw-r--r--arch/sh/include/asm/tlb.h28
1 files changed, 17 insertions, 11 deletions
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 75abb38dffd5..6c308d8b9a50 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -23,8 +23,6 @@ struct mmu_gather {
23 unsigned long start, end; 23 unsigned long start, end;
24}; 24};
25 25
26DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27
28static inline void init_tlb_gather(struct mmu_gather *tlb) 26static inline void init_tlb_gather(struct mmu_gather *tlb)
29{ 27{
30 tlb->start = TASK_SIZE; 28 tlb->start = TASK_SIZE;
@@ -36,17 +34,13 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36 } 34 }
37} 35}
38 36
39static inline struct mmu_gather * 37static inline void
40tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 38tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
41{ 39{
42 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
43
44 tlb->mm = mm; 40 tlb->mm = mm;
45 tlb->fullmm = full_mm_flush; 41 tlb->fullmm = full_mm_flush;
46 42
47 init_tlb_gather(tlb); 43 init_tlb_gather(tlb);
48
49 return tlb;
50} 44}
51 45
52static inline void 46static inline void
@@ -57,8 +51,6 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
57 51
58 /* keep the page table cache within bounds */ 52 /* keep the page table cache within bounds */
59 check_pgt_cache(); 53 check_pgt_cache();
60
61 put_cpu_var(mmu_gathers);
62} 54}
63 55
64static inline void 56static inline void
@@ -91,7 +83,21 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
91 } 83 }
92} 84}
93 85
94#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 86static inline void tlb_flush_mmu(struct mmu_gather *tlb)
87{
88}
89
90static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
91{
92 free_page_and_swap_cache(page);
93 return 1; /* avoid calling tlb_flush_mmu */
94}
95
96static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97{
98 __tlb_remove_page(tlb, page);
99}
100
95#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) 101#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
96#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 102#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
97#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) 103#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)