aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-06-17 19:57:20 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-06-17 19:57:20 -0400
commitbb07b00be77fb33274cb44a03bdbf2471e556189 (patch)
tree620390077de843dd7626998f0d8441d14a6c66d9 /arch/ia64
parentb7165ebbf0898bad9aaeddfa22b918e94ed90e07 (diff)
parent7d132055814ef17a6c7b69f342244c410a5e000f (diff)
Merge 3.10-rc6 into driver-core-next
We want these fixes here too. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/tlb.h41
1 files changed, 8 insertions, 33 deletions
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index c3ffe3e54edc..ef3a9de01954 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -46,12 +46,6 @@
46#include <asm/tlbflush.h> 46#include <asm/tlbflush.h>
47#include <asm/machvec.h> 47#include <asm/machvec.h>
48 48
49#ifdef CONFIG_SMP
50# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
51#else
52# define tlb_fast_mode(tlb) (1)
53#endif
54
55/* 49/*
56 * If we can't allocate a page to make a big batch of page pointers 50 * If we can't allocate a page to make a big batch of page pointers
57 * to work on, then just handle a few from the on-stack structure. 51 * to work on, then just handle a few from the on-stack structure.
@@ -60,7 +54,7 @@
60 54
61struct mmu_gather { 55struct mmu_gather {
62 struct mm_struct *mm; 56 struct mm_struct *mm;
63 unsigned int nr; /* == ~0U => fast mode */ 57 unsigned int nr;
64 unsigned int max; 58 unsigned int max;
65 unsigned char fullmm; /* non-zero means full mm flush */ 59 unsigned char fullmm; /* non-zero means full mm flush */
66 unsigned char need_flush; /* really unmapped some PTEs? */ 60 unsigned char need_flush; /* really unmapped some PTEs? */
@@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
103static inline void 97static inline void
104ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 98ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
105{ 99{
100 unsigned long i;
106 unsigned int nr; 101 unsigned int nr;
107 102
108 if (!tlb->need_flush) 103 if (!tlb->need_flush)
@@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
141 136
142 /* lastly, release the freed pages */ 137 /* lastly, release the freed pages */
143 nr = tlb->nr; 138 nr = tlb->nr;
144 if (!tlb_fast_mode(tlb)) { 139
145 unsigned long i; 140 tlb->nr = 0;
146 tlb->nr = 0; 141 tlb->start_addr = ~0UL;
147 tlb->start_addr = ~0UL; 142 for (i = 0; i < nr; ++i)
148 for (i = 0; i < nr; ++i) 143 free_page_and_swap_cache(tlb->pages[i]);
149 free_page_and_swap_cache(tlb->pages[i]);
150 }
151} 144}
152 145
153static inline void __tlb_alloc_page(struct mmu_gather *tlb) 146static inline void __tlb_alloc_page(struct mmu_gather *tlb)
@@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m
167 tlb->mm = mm; 160 tlb->mm = mm;
168 tlb->max = ARRAY_SIZE(tlb->local); 161 tlb->max = ARRAY_SIZE(tlb->local);
169 tlb->pages = tlb->local; 162 tlb->pages = tlb->local;
170 /* 163 tlb->nr = 0;
171 * Use fast mode if only 1 CPU is online.
172 *
173 * It would be tempting to turn on fast-mode for full_mm_flush as well. But this
174 * doesn't work because of speculative accesses and software prefetching: the page
175 * table of "mm" may (and usually is) the currently active page table and even
176 * though the kernel won't do any user-space accesses during the TLB shoot down, a
177 * compiler might use speculation or lfetch.fault on what happens to be a valid
178 * user-space address. This in turn could trigger a TLB miss fault (or a VHPT
179 * walk) and re-insert a TLB entry we just removed. Slow mode avoids such
180 * problems. (We could make fast-mode work by switching the current task to a
181 * different "mm" during the shootdown.) --davidm 08/02/2002
182 */
183 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
184 tlb->fullmm = full_mm_flush; 164 tlb->fullmm = full_mm_flush;
185 tlb->start_addr = ~0UL; 165 tlb->start_addr = ~0UL;
186} 166}
@@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
214{ 194{
215 tlb->need_flush = 1; 195 tlb->need_flush = 1;
216 196
217 if (tlb_fast_mode(tlb)) {
218 free_page_and_swap_cache(page);
219 return 1; /* avoid calling tlb_flush_mmu */
220 }
221
222 if (!tlb->nr && tlb->pages == tlb->local) 197 if (!tlb->nr && tlb->pages == tlb->local)
223 __tlb_alloc_page(tlb); 198 __tlb_alloc_page(tlb);
224 199