diff options
| author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-24 20:11:57 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:15 -0400 |
| commit | ff075d605511784c79cbf0ae73d90e07238267b3 (patch) | |
| tree | 700690d0ae5e4d712ce3ba3b182af8e5deae869b /arch/um/include/asm | |
| parent | 7a95a2c80748bb91e0bf4b8d58396542e1319d21 (diff) | |
um: mmu_gather rework
Fix up the um mmu_gather code to conform to the new API.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/include/asm')
| -rw-r--r-- | arch/um/include/asm/tlb.h | 29 |
1 files changed, 11 insertions, 18 deletions
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 660caedac9e..4febacd1a8a 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h | |||
| @@ -22,9 +22,6 @@ struct mmu_gather { | |||
| 22 | unsigned int fullmm; /* non-zero means full mm flush */ | 22 | unsigned int fullmm; /* non-zero means full mm flush */ |
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| 25 | /* Users of the generic TLB shootdown code must declare this storage space. */ | ||
| 26 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
| 27 | |||
| 28 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, | 25 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, |
| 29 | unsigned long address) | 26 | unsigned long address) |
| 30 | { | 27 | { |
| @@ -47,27 +44,20 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
| 47 | } | 44 | } |
| 48 | } | 45 | } |
| 49 | 46 | ||
| 50 | /* tlb_gather_mmu | 47 | static inline void |
| 51 | * Return a pointer to an initialized struct mmu_gather. | 48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) |
| 52 | */ | ||
| 53 | static inline struct mmu_gather * | ||
| 54 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | ||
| 55 | { | 49 | { |
| 56 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | ||
| 57 | |||
| 58 | tlb->mm = mm; | 50 | tlb->mm = mm; |
| 59 | tlb->fullmm = full_mm_flush; | 51 | tlb->fullmm = full_mm_flush; |
| 60 | 52 | ||
| 61 | init_tlb_gather(tlb); | 53 | init_tlb_gather(tlb); |
| 62 | |||
| 63 | return tlb; | ||
| 64 | } | 54 | } |
| 65 | 55 | ||
| 66 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 56 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 67 | unsigned long end); | 57 | unsigned long end); |
| 68 | 58 | ||
| 69 | static inline void | 59 | static inline void |
| 70 | tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 60 | tlb_flush_mmu(struct mmu_gather *tlb) |
| 71 | { | 61 | { |
| 72 | if (!tlb->need_flush) | 62 | if (!tlb->need_flush) |
| 73 | return; | 63 | return; |
| @@ -83,12 +73,10 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
| 83 | static inline void | 73 | static inline void |
| 84 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 74 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 85 | { | 75 | { |
| 86 | tlb_flush_mmu(tlb, start, end); | 76 | tlb_flush_mmu(tlb); |
| 87 | 77 | ||
| 88 | /* keep the page table cache within bounds */ | 78 | /* keep the page table cache within bounds */ |
| 89 | check_pgt_cache(); | 79 | check_pgt_cache(); |
| 90 | |||
| 91 | put_cpu_var(mmu_gathers); | ||
| 92 | } | 80 | } |
| 93 | 81 | ||
| 94 | /* tlb_remove_page | 82 | /* tlb_remove_page |
| @@ -96,11 +84,16 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
| 96 | * while handling the additional races in SMP caused by other CPUs | 84 | * while handling the additional races in SMP caused by other CPUs |
| 97 | * caching valid mappings in their TLBs. | 85 | * caching valid mappings in their TLBs. |
| 98 | */ | 86 | */ |
| 99 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 87 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 100 | { | 88 | { |
| 101 | tlb->need_flush = 1; | 89 | tlb->need_flush = 1; |
| 102 | free_page_and_swap_cache(page); | 90 | free_page_and_swap_cache(page); |
| 103 | return; | 91 | return 1; /* avoid calling tlb_flush_mmu */ |
| 92 | } | ||
| 93 | |||
| 94 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
| 95 | { | ||
| 96 | __tlb_remove_page(tlb, page); | ||
| 104 | } | 97 | } |
| 105 | 98 | ||
| 106 | /** | 99 | /** |
