diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/asm-generic/tlb.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'include/asm-generic/tlb.h')
-rw-r--r-- | include/asm-generic/tlb.h | 156 |
1 files changed, 86 insertions, 70 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index e43f9766259f..e58fa777fa09 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -5,6 +5,8 @@ | |||
5 | * Copyright 2001 Red Hat, Inc. | 5 | * Copyright 2001 Red Hat, Inc. |
6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. | 6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
7 | * | 7 | * |
8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
9 | * | ||
8 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
10 | * as published by the Free Software Foundation; either version | 12 | * as published by the Free Software Foundation; either version |
@@ -17,97 +19,111 @@ | |||
17 | #include <asm/pgalloc.h> | 19 | #include <asm/pgalloc.h> |
18 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
19 | 21 | ||
22 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
20 | /* | 23 | /* |
21 | * For UP we don't need to worry about TLB flush | 24 | * Semi RCU freeing of the page directories. |
22 | * and page free order so much.. | 25 | * |
26 | * This is needed by some architectures to implement software pagetable walkers. | ||
27 | * | ||
28 | * gup_fast() and other software pagetable walkers do a lockless page-table | ||
29 | * walk and therefore needs some synchronization with the freeing of the page | ||
30 | * directories. The chosen means to accomplish that is by disabling IRQs over | ||
31 | * the walk. | ||
32 | * | ||
33 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, | ||
34 | * since we unlink the page, flush TLBs, free the page. Since the disabling of | ||
35 | * IRQs delays the completion of the TLB flush we can never observe an already | ||
36 | * freed page. | ||
37 | * | ||
38 | * Architectures that do not have this (PPC) need to delay the freeing by some | ||
39 | * other means, this is that means. | ||
40 | * | ||
41 | * What we do is batch the freed directory pages (tables) and RCU free them. | ||
42 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling | ||
43 | * holds off grace periods. | ||
44 | * | ||
45 | * However, in order to batch these pages we need to allocate storage, this | ||
46 | * allocation is deep inside the MM code and can thus easily fail on memory | ||
47 | * pressure. To guarantee progress we fall back to single table freeing, see | ||
48 | * the implementation of tlb_remove_table_one(). | ||
49 | * | ||
23 | */ | 50 | */ |
24 | #ifdef CONFIG_SMP | 51 | struct mmu_table_batch { |
25 | #ifdef ARCH_FREE_PTR_NR | 52 | struct rcu_head rcu; |
26 | #define FREE_PTR_NR ARCH_FREE_PTR_NR | 53 | unsigned int nr; |
27 | #else | 54 | void *tables[0]; |
28 | #define FREE_PTE_NR 506 | 55 | }; |
29 | #endif | 56 | |
30 | #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) | 57 | #define MAX_TABLE_BATCH \ |
31 | #else | 58 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) |
32 | #define FREE_PTE_NR 1 | 59 | |
33 | #define tlb_fast_mode(tlb) 1 | 60 | extern void tlb_table_flush(struct mmu_gather *tlb); |
61 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | ||
62 | |||
34 | #endif | 63 | #endif |
35 | 64 | ||
36 | /* struct mmu_gather is an opaque type used by the mm code for passing around | 65 | /* |
37 | * any data needed by arch specific code for tlb_remove_page. | 66 | * If we can't allocate a page to make a big batch of page pointers |
67 | * to work on, then just handle a few from the on-stack structure. | ||
38 | */ | 68 | */ |
39 | struct mmu_gather { | 69 | #define MMU_GATHER_BUNDLE 8 |
40 | struct mm_struct *mm; | 70 | |
41 | unsigned int nr; /* set to ~0U means fast mode */ | 71 | struct mmu_gather_batch { |
42 | unsigned int need_flush;/* Really unmapped some ptes? */ | 72 | struct mmu_gather_batch *next; |
43 | unsigned int fullmm; /* non-zero means full mm flush */ | 73 | unsigned int nr; |
44 | struct page * pages[FREE_PTE_NR]; | 74 | unsigned int max; |
75 | struct page *pages[0]; | ||
45 | }; | 76 | }; |
46 | 77 | ||
47 | /* Users of the generic TLB shootdown code must declare this storage space. */ | 78 | #define MAX_GATHER_BATCH \ |
48 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 79 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) |
49 | 80 | ||
50 | /* tlb_gather_mmu | 81 | /* struct mmu_gather is an opaque type used by the mm code for passing around |
51 | * Return a pointer to an initialized struct mmu_gather. | 82 | * any data needed by arch specific code for tlb_remove_page. |
52 | */ | 83 | */ |
53 | static inline struct mmu_gather * | 84 | struct mmu_gather { |
54 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | 85 | struct mm_struct *mm; |
55 | { | 86 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
56 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | 87 | struct mmu_table_batch *batch; |
57 | 88 | #endif | |
58 | tlb->mm = mm; | 89 | unsigned int need_flush : 1, /* Did free PTEs */ |
90 | fast_mode : 1; /* No batching */ | ||
59 | 91 | ||
60 | /* Use fast mode if only one CPU is online */ | 92 | unsigned int fullmm; |
61 | tlb->nr = num_online_cpus() > 1 ? 0U : ~0U; | ||
62 | 93 | ||
63 | tlb->fullmm = full_mm_flush; | 94 | struct mmu_gather_batch *active; |
95 | struct mmu_gather_batch local; | ||
96 | struct page *__pages[MMU_GATHER_BUNDLE]; | ||
97 | }; | ||
64 | 98 | ||
65 | return tlb; | 99 | #define HAVE_GENERIC_MMU_GATHER |
66 | } | ||
67 | 100 | ||
68 | static inline void | 101 | static inline int tlb_fast_mode(struct mmu_gather *tlb) |
69 | tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
70 | { | 102 | { |
71 | if (!tlb->need_flush) | 103 | #ifdef CONFIG_SMP |
72 | return; | 104 | return tlb->fast_mode; |
73 | tlb->need_flush = 0; | 105 | #else |
74 | tlb_flush(tlb); | 106 | /* |
75 | if (!tlb_fast_mode(tlb)) { | 107 | * For UP we don't need to worry about TLB flush |
76 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | 108 | * and page free order so much.. |
77 | tlb->nr = 0; | 109 | */ |
78 | } | 110 | return 1; |
111 | #endif | ||
79 | } | 112 | } |
80 | 113 | ||
81 | /* tlb_finish_mmu | 114 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); |
82 | * Called at the end of the shootdown operation to free up any resources | 115 | void tlb_flush_mmu(struct mmu_gather *tlb); |
83 | * that were required. | 116 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); |
84 | */ | 117 | int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); |
85 | static inline void | ||
86 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
87 | { | ||
88 | tlb_flush_mmu(tlb, start, end); | ||
89 | |||
90 | /* keep the page table cache within bounds */ | ||
91 | check_pgt_cache(); | ||
92 | |||
93 | put_cpu_var(mmu_gathers); | ||
94 | } | ||
95 | 118 | ||
96 | /* tlb_remove_page | 119 | /* tlb_remove_page |
97 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while | 120 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when |
98 | * handling the additional races in SMP caused by other CPUs caching valid | 121 | * required. |
99 | * mappings in their TLBs. | ||
100 | */ | 122 | */ |
101 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 123 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
102 | { | 124 | { |
103 | tlb->need_flush = 1; | 125 | if (!__tlb_remove_page(tlb, page)) |
104 | if (tlb_fast_mode(tlb)) { | 126 | tlb_flush_mmu(tlb); |
105 | free_page_and_swap_cache(page); | ||
106 | return; | ||
107 | } | ||
108 | tlb->pages[tlb->nr++] = page; | ||
109 | if (tlb->nr >= FREE_PTE_NR) | ||
110 | tlb_flush_mmu(tlb, 0, 0); | ||
111 | } | 127 | } |
112 | 128 | ||
113 | /** | 129 | /** |