aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/Makefile6
-rw-r--r--arch/x86/mm/fault.c51
-rw-r--r--arch/x86/mm/highmem_32.c3
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/iomap_32.c27
-rw-r--r--arch/x86/mm/ioremap.c90
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c14
-rw-r--r--arch/x86/mm/pageattr.c8
-rw-r--r--arch/x86/mm/pat.c358
-rw-r--r--arch/x86/mm/physaddr.c70
-rw-r--r--arch/x86/mm/physaddr.h10
-rw-r--r--arch/x86/mm/srat_32.c4
-rw-r--r--arch/x86/mm/tlb.c21
13 files changed, 439 insertions, 225 deletions
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index eefdeee8a871..9b5a9f59a478 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,9 @@
1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o gup.o 2 pat.o pgtable.o physaddr.o gup.o
3
4# Make sure __phys_addr has no stackprotector
5nostackp := $(call cc-option, -fno-stack-protector)
6CFLAGS_physaddr.o := $(nostackp)
3 7
4obj-$(CONFIG_SMP) += tlb.o 8obj-$(CONFIG_SMP) += tlb.o
5 9
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index bfae139182ff..775a020990a5 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -285,26 +285,25 @@ check_v8086_mode(struct pt_regs *regs, unsigned long address,
285 tsk->thread.screen_bitmap |= 1 << bit; 285 tsk->thread.screen_bitmap |= 1 << bit;
286} 286}
287 287
288static void dump_pagetable(unsigned long address) 288static bool low_pfn(unsigned long pfn)
289{ 289{
290 __typeof__(pte_val(__pte(0))) page; 290 return pfn < max_low_pfn;
291}
291 292
292 page = read_cr3(); 293static void dump_pagetable(unsigned long address)
293 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; 294{
295 pgd_t *base = __va(read_cr3());
296 pgd_t *pgd = &base[pgd_index(address)];
297 pmd_t *pmd;
298 pte_t *pte;
294 299
295#ifdef CONFIG_X86_PAE 300#ifdef CONFIG_X86_PAE
296 printk("*pdpt = %016Lx ", page); 301 printk("*pdpt = %016Lx ", pgd_val(*pgd));
297 if ((page >> PAGE_SHIFT) < max_low_pfn 302 if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
298 && page & _PAGE_PRESENT) { 303 goto out;
299 page &= PAGE_MASK;
300 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
301 & (PTRS_PER_PMD - 1)];
302 printk(KERN_CONT "*pde = %016Lx ", page);
303 page &= ~_PAGE_NX;
304 }
305#else
306 printk("*pde = %08lx ", page);
307#endif 304#endif
305 pmd = pmd_offset(pud_offset(pgd, address), address);
306 printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
308 307
309 /* 308 /*
310 * We must not directly access the pte in the highpte 309 * We must not directly access the pte in the highpte
@@ -312,16 +311,12 @@ static void dump_pagetable(unsigned long address)
312 * And let's rather not kmap-atomic the pte, just in case 311 * And let's rather not kmap-atomic the pte, just in case
313 * it's allocated already: 312 * it's allocated already:
314 */ 313 */
315 if ((page >> PAGE_SHIFT) < max_low_pfn 314 if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
316 && (page & _PAGE_PRESENT) 315 goto out;
317 && !(page & _PAGE_PSE)) {
318
319 page &= PAGE_MASK;
320 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
321 & (PTRS_PER_PTE - 1)];
322 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
323 }
324 316
317 pte = pte_offset_kernel(pmd, address);
318 printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
319out:
325 printk("\n"); 320 printk("\n");
326} 321}
327 322
@@ -450,16 +445,12 @@ static int bad_address(void *p)
450 445
451static void dump_pagetable(unsigned long address) 446static void dump_pagetable(unsigned long address)
452{ 447{
453 pgd_t *pgd; 448 pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
449 pgd_t *pgd = base + pgd_index(address);
454 pud_t *pud; 450 pud_t *pud;
455 pmd_t *pmd; 451 pmd_t *pmd;
456 pte_t *pte; 452 pte_t *pte;
457 453
458 pgd = (pgd_t *)read_cr3();
459
460 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
461
462 pgd += pgd_index(address);
463 if (bad_address(pgd)) 454 if (bad_address(pgd))
464 goto bad; 455 goto bad;
465 456
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 2112ed55e7ea..63a6ba66cbe0 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -24,7 +24,7 @@ void kunmap(struct page *page)
24 * no global lock is needed and because the kmap code must perform a global TLB 24 * no global lock is needed and because the kmap code must perform a global TLB
25 * invalidation when the kmap pool wraps. 25 * invalidation when the kmap pool wraps.
26 * 26 *
27 * However when holding an atomic kmap is is not legal to sleep, so atomic 27 * However when holding an atomic kmap it is not legal to sleep, so atomic
28 * kmaps are appropriate for short, tight code paths only. 28 * kmaps are appropriate for short, tight code paths only.
29 */ 29 */
30void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 30void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
@@ -104,6 +104,7 @@ EXPORT_SYMBOL(kunmap);
104EXPORT_SYMBOL(kmap_atomic); 104EXPORT_SYMBOL(kmap_atomic);
105EXPORT_SYMBOL(kunmap_atomic); 105EXPORT_SYMBOL(kunmap_atomic);
106EXPORT_SYMBOL(kmap_atomic_prot); 106EXPORT_SYMBOL(kmap_atomic_prot);
107EXPORT_SYMBOL(kmap_atomic_to_page);
107 108
108void __init set_highmem_pages_init(void) 109void __init set_highmem_pages_init(void)
109{ 110{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 6176fe8f29e0..ea56b8cbb6a6 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -796,7 +796,7 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
796 return ret; 796 return ret;
797 797
798#else 798#else
799 reserve_bootmem(phys, len, BOOTMEM_DEFAULT); 799 reserve_bootmem(phys, len, flags);
800#endif 800#endif
801 801
802 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { 802 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index fe6f84ca121e..84e236ce76ba 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -21,7 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23 23
24int is_io_mapping_possible(resource_size_t base, unsigned long size) 24static int is_io_mapping_possible(resource_size_t base, unsigned long size)
25{ 25{
26#if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT) 26#if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
27 /* There is no way to map greater than 1 << 32 address without PAE */ 27 /* There is no way to map greater than 1 << 32 address without PAE */
@@ -30,7 +30,30 @@ int is_io_mapping_possible(resource_size_t base, unsigned long size)
30#endif 30#endif
31 return 1; 31 return 1;
32} 32}
33EXPORT_SYMBOL_GPL(is_io_mapping_possible); 33
34int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
35{
36 unsigned long flag = _PAGE_CACHE_WC;
37 int ret;
38
39 if (!is_io_mapping_possible(base, size))
40 return -EINVAL;
41
42 ret = io_reserve_memtype(base, base + size, &flag);
43 if (ret)
44 return ret;
45
46 *prot = __pgprot(__PAGE_KERNEL | flag);
47 return 0;
48}
49EXPORT_SYMBOL_GPL(iomap_create_wc);
50
51void
52iomap_free(resource_size_t base, unsigned long size)
53{
54 io_free_memtype(base, base + size);
55}
56EXPORT_SYMBOL_GPL(iomap_free);
34 57
35void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 58void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
36{ 59{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 8a450930834f..334e63ca7b2b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -22,77 +22,7 @@
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23#include <asm/pat.h> 23#include <asm/pat.h>
24 24
25static inline int phys_addr_valid(resource_size_t addr) 25#include "physaddr.h"
26{
27#ifdef CONFIG_PHYS_ADDR_T_64BIT
28 return !(addr >> boot_cpu_data.x86_phys_bits);
29#else
30 return 1;
31#endif
32}
33
34#ifdef CONFIG_X86_64
35
36unsigned long __phys_addr(unsigned long x)
37{
38 if (x >= __START_KERNEL_map) {
39 x -= __START_KERNEL_map;
40 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
41 x += phys_base;
42 } else {
43 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
44 x -= PAGE_OFFSET;
45 VIRTUAL_BUG_ON(!phys_addr_valid(x));
46 }
47 return x;
48}
49EXPORT_SYMBOL(__phys_addr);
50
51bool __virt_addr_valid(unsigned long x)
52{
53 if (x >= __START_KERNEL_map) {
54 x -= __START_KERNEL_map;
55 if (x >= KERNEL_IMAGE_SIZE)
56 return false;
57 x += phys_base;
58 } else {
59 if (x < PAGE_OFFSET)
60 return false;
61 x -= PAGE_OFFSET;
62 if (!phys_addr_valid(x))
63 return false;
64 }
65
66 return pfn_valid(x >> PAGE_SHIFT);
67}
68EXPORT_SYMBOL(__virt_addr_valid);
69
70#else
71
72#ifdef CONFIG_DEBUG_VIRTUAL
73unsigned long __phys_addr(unsigned long x)
74{
75 /* VMALLOC_* aren't constants */
76 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
77 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
78 return x - PAGE_OFFSET;
79}
80EXPORT_SYMBOL(__phys_addr);
81#endif
82
83bool __virt_addr_valid(unsigned long x)
84{
85 if (x < PAGE_OFFSET)
86 return false;
87 if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
88 return false;
89 if (x >= FIXADDR_START)
90 return false;
91 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
92}
93EXPORT_SYMBOL(__virt_addr_valid);
94
95#endif
96 26
97int page_is_ram(unsigned long pagenr) 27int page_is_ram(unsigned long pagenr)
98{ 28{
@@ -228,24 +158,14 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
228 retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 158 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
229 prot_val, &new_prot_val); 159 prot_val, &new_prot_val);
230 if (retval) { 160 if (retval) {
231 pr_debug("Warning: reserve_memtype returned %d\n", retval); 161 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
232 return NULL; 162 return NULL;
233 } 163 }
234 164
235 if (prot_val != new_prot_val) { 165 if (prot_val != new_prot_val) {
236 /* 166 if (!is_new_memtype_allowed(phys_addr, size,
237 * Do not fallback to certain memory types with certain 167 prot_val, new_prot_val)) {
238 * requested type: 168 printk(KERN_ERR
239 * - request is uc-, return cannot be write-back
240 * - request is uc-, return cannot be write-combine
241 * - request is write-combine, return cannot be write-back
242 */
243 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
244 (new_prot_val == _PAGE_CACHE_WB ||
245 new_prot_val == _PAGE_CACHE_WC)) ||
246 (prot_val == _PAGE_CACHE_WC &&
247 new_prot_val == _PAGE_CACHE_WB)) {
248 pr_debug(
249 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", 169 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
250 (unsigned long long)phys_addr, 170 (unsigned long long)phys_addr,
251 (unsigned long long)(phys_addr + size), 171 (unsigned long long)(phys_addr + size),
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index 2c55ed098654..528bf954eb74 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs,
331 kmemcheck_shadow_set(shadow, size); 331 kmemcheck_shadow_set(shadow, size);
332} 332}
333 333
334bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
335{
336 enum kmemcheck_shadow status;
337 void *shadow;
338
339 shadow = kmemcheck_shadow_lookup(addr);
340 if (!shadow)
341 return true;
342
343 status = kmemcheck_shadow_test(shadow, size);
344
345 return status == KMEMCHECK_SHADOW_INITIALIZED;
346}
347
334/* Access may cross page boundary */ 348/* Access may cross page boundary */
335static void kmemcheck_read(struct pt_regs *regs, 349static void kmemcheck_read(struct pt_regs *regs,
336 unsigned long addr, unsigned int size) 350 unsigned long addr, unsigned int size)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index f53cfc7f963d..24952fdc7e40 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -805,6 +805,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
805{ 805{
806 struct cpa_data cpa; 806 struct cpa_data cpa;
807 int ret, cache, checkalias; 807 int ret, cache, checkalias;
808 unsigned long baddr = 0;
808 809
809 /* 810 /*
810 * Check, if we are requested to change a not supported 811 * Check, if we are requested to change a not supported
@@ -836,6 +837,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
836 */ 837 */
837 WARN_ON_ONCE(1); 838 WARN_ON_ONCE(1);
838 } 839 }
840 /*
841 * Save address for cache flush. *addr is modified in the call
842 * to __change_page_attr_set_clr() below.
843 */
844 baddr = *addr;
839 } 845 }
840 846
841 /* Must avoid aliasing mappings in the highmem code */ 847 /* Must avoid aliasing mappings in the highmem code */
@@ -883,7 +889,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
883 cpa_flush_array(addr, numpages, cache, 889 cpa_flush_array(addr, numpages, cache,
884 cpa.flags, pages); 890 cpa.flags, pages);
885 } else 891 } else
886 cpa_flush_range(*addr, numpages, cache); 892 cpa_flush_range(baddr, numpages, cache);
887 } else 893 } else
888 cpa_flush_all(cache); 894 cpa_flush_all(cache);
889 895
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index e6718bb28065..d7ebc3a10f2f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -15,6 +15,7 @@
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/rbtree.h>
18 19
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/processor.h> 21#include <asm/processor.h>
@@ -148,11 +149,10 @@ static char *cattr_name(unsigned long flags)
148 * areas). All the aliases have the same cache attributes of course. 149 * areas). All the aliases have the same cache attributes of course.
149 * Zero attributes are represented as holes. 150 * Zero attributes are represented as holes.
150 * 151 *
151 * Currently the data structure is a list because the number of mappings 152 * The data structure is a list that is also organized as an rbtree
152 * are expected to be relatively small. If this should be a problem 153 * sorted on the start address of memtype range.
153 * it could be changed to a rbtree or similar.
154 * 154 *
155 * memtype_lock protects the whole list. 155 * memtype_lock protects both the linear list and rbtree.
156 */ 156 */
157 157
158struct memtype { 158struct memtype {
@@ -160,11 +160,53 @@ struct memtype {
160 u64 end; 160 u64 end;
161 unsigned long type; 161 unsigned long type;
162 struct list_head nd; 162 struct list_head nd;
163 struct rb_node rb;
163}; 164};
164 165
166static struct rb_root memtype_rbroot = RB_ROOT;
165static LIST_HEAD(memtype_list); 167static LIST_HEAD(memtype_list);
166static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ 168static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
167 169
170static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
171{
172 struct rb_node *node = root->rb_node;
173 struct memtype *last_lower = NULL;
174
175 while (node) {
176 struct memtype *data = container_of(node, struct memtype, rb);
177
178 if (data->start < start) {
179 last_lower = data;
180 node = node->rb_right;
181 } else if (data->start > start) {
182 node = node->rb_left;
183 } else
184 return data;
185 }
186
187 /* Will return NULL if there is no entry with its start <= start */
188 return last_lower;
189}
190
191static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
192{
193 struct rb_node **new = &(root->rb_node);
194 struct rb_node *parent = NULL;
195
196 while (*new) {
197 struct memtype *this = container_of(*new, struct memtype, rb);
198
199 parent = *new;
200 if (data->start <= this->start)
201 new = &((*new)->rb_left);
202 else if (data->start > this->start)
203 new = &((*new)->rb_right);
204 }
205
206 rb_link_node(&data->rb, parent, new);
207 rb_insert_color(&data->rb, root);
208}
209
168/* 210/*
169 * Does intersection of PAT memory type and MTRR memory type and returns 211 * Does intersection of PAT memory type and MTRR memory type and returns
170 * the resulting memory type as PAT understands it. 212 * the resulting memory type as PAT understands it.
@@ -218,9 +260,6 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
218 return -EBUSY; 260 return -EBUSY;
219} 261}
220 262
221static struct memtype *cached_entry;
222static u64 cached_start;
223
224static int pat_pagerange_is_ram(unsigned long start, unsigned long end) 263static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
225{ 264{
226 int ram_page = 0, not_rampage = 0; 265 int ram_page = 0, not_rampage = 0;
@@ -249,63 +288,61 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
249} 288}
250 289
251/* 290/*
252 * For RAM pages, mark the pages as non WB memory type using 291 * For RAM pages, we use page flags to mark the pages with appropriate type.
253 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or 292 * Here we do two pass:
254 * set_memory_wc() on a RAM page at a time before marking it as WB again. 293 * - Find the memtype of all the pages in the range, look for any conflicts
255 * This is ok, because only one driver will be owning the page and 294 * - In case of no conflicts, set the new memtype for pages in the range
256 * doing set_memory_*() calls.
257 * 295 *
258 * For now, we use PageNonWB to track that the RAM page is being mapped 296 * Caller must hold memtype_lock for atomicity.
259 * as non WB. In future, we will have to use one more flag
260 * (or some other mechanism in page_struct) to distinguish between
261 * UC and WC mapping.
262 */ 297 */
263static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 298static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
264 unsigned long *new_type) 299 unsigned long *new_type)
265{ 300{
266 struct page *page; 301 struct page *page;
267 u64 pfn, end_pfn; 302 u64 pfn;
303
304 if (req_type == _PAGE_CACHE_UC) {
305 /* We do not support strong UC */
306 WARN_ON_ONCE(1);
307 req_type = _PAGE_CACHE_UC_MINUS;
308 }
268 309
269 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 310 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
270 page = pfn_to_page(pfn); 311 unsigned long type;
271 if (page_mapped(page) || PageNonWB(page))
272 goto out;
273 312
274 SetPageNonWB(page); 313 page = pfn_to_page(pfn);
314 type = get_page_memtype(page);
315 if (type != -1) {
316 printk(KERN_INFO "reserve_ram_pages_type failed "
317 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
318 start, end, type, req_type);
319 if (new_type)
320 *new_type = type;
321
322 return -EBUSY;
323 }
275 } 324 }
276 return 0;
277 325
278out: 326 if (new_type)
279 end_pfn = pfn; 327 *new_type = req_type;
280 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { 328
329 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
281 page = pfn_to_page(pfn); 330 page = pfn_to_page(pfn);
282 ClearPageNonWB(page); 331 set_page_memtype(page, req_type);
283 } 332 }
284 333 return 0;
285 return -EINVAL;
286} 334}
287 335
288static int free_ram_pages_type(u64 start, u64 end) 336static int free_ram_pages_type(u64 start, u64 end)
289{ 337{
290 struct page *page; 338 struct page *page;
291 u64 pfn, end_pfn; 339 u64 pfn;
292 340
293 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 341 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
294 page = pfn_to_page(pfn); 342 page = pfn_to_page(pfn);
295 if (page_mapped(page) || !PageNonWB(page)) 343 set_page_memtype(page, -1);
296 goto out;
297
298 ClearPageNonWB(page);
299 } 344 }
300 return 0; 345 return 0;
301
302out:
303 end_pfn = pfn;
304 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
305 page = pfn_to_page(pfn);
306 SetPageNonWB(page);
307 }
308 return -EINVAL;
309} 346}
310 347
311/* 348/*
@@ -339,6 +376,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
339 if (new_type) { 376 if (new_type) {
340 if (req_type == -1) 377 if (req_type == -1)
341 *new_type = _PAGE_CACHE_WB; 378 *new_type = _PAGE_CACHE_WB;
379 else if (req_type == _PAGE_CACHE_WC)
380 *new_type = _PAGE_CACHE_UC_MINUS;
342 else 381 else
343 *new_type = req_type & _PAGE_CACHE_MASK; 382 *new_type = req_type & _PAGE_CACHE_MASK;
344 } 383 }
@@ -364,11 +403,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
364 *new_type = actual_type; 403 *new_type = actual_type;
365 404
366 is_range_ram = pat_pagerange_is_ram(start, end); 405 is_range_ram = pat_pagerange_is_ram(start, end);
367 if (is_range_ram == 1) 406 if (is_range_ram == 1) {
368 return reserve_ram_pages_type(start, end, req_type, 407
369 new_type); 408 spin_lock(&memtype_lock);
370 else if (is_range_ram < 0) 409 err = reserve_ram_pages_type(start, end, req_type, new_type);
410 spin_unlock(&memtype_lock);
411
412 return err;
413 } else if (is_range_ram < 0) {
371 return -EINVAL; 414 return -EINVAL;
415 }
372 416
373 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 417 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
374 if (!new) 418 if (!new)
@@ -380,17 +424,19 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
380 424
381 spin_lock(&memtype_lock); 425 spin_lock(&memtype_lock);
382 426
383 if (cached_entry && start >= cached_start) 427 entry = memtype_rb_search(&memtype_rbroot, new->start);
384 entry = cached_entry; 428 if (likely(entry != NULL)) {
385 else 429 /* To work correctly with list_for_each_entry_continue */
430 entry = list_entry(entry->nd.prev, struct memtype, nd);
431 } else {
386 entry = list_entry(&memtype_list, struct memtype, nd); 432 entry = list_entry(&memtype_list, struct memtype, nd);
433 }
387 434
388 /* Search for existing mapping that overlaps the current range */ 435 /* Search for existing mapping that overlaps the current range */
389 where = NULL; 436 where = NULL;
390 list_for_each_entry_continue(entry, &memtype_list, nd) { 437 list_for_each_entry_continue(entry, &memtype_list, nd) {
391 if (end <= entry->start) { 438 if (end <= entry->start) {
392 where = entry->nd.prev; 439 where = entry->nd.prev;
393 cached_entry = list_entry(where, struct memtype, nd);
394 break; 440 break;
395 } else if (start <= entry->start) { /* end > entry->start */ 441 } else if (start <= entry->start) { /* end > entry->start */
396 err = chk_conflict(new, entry, new_type); 442 err = chk_conflict(new, entry, new_type);
@@ -398,8 +444,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
398 dprintk("Overlap at 0x%Lx-0x%Lx\n", 444 dprintk("Overlap at 0x%Lx-0x%Lx\n",
399 entry->start, entry->end); 445 entry->start, entry->end);
400 where = entry->nd.prev; 446 where = entry->nd.prev;
401 cached_entry = list_entry(where,
402 struct memtype, nd);
403 } 447 }
404 break; 448 break;
405 } else if (start < entry->end) { /* start > entry->start */ 449 } else if (start < entry->end) { /* start > entry->start */
@@ -407,8 +451,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
407 if (!err) { 451 if (!err) {
408 dprintk("Overlap at 0x%Lx-0x%Lx\n", 452 dprintk("Overlap at 0x%Lx-0x%Lx\n",
409 entry->start, entry->end); 453 entry->start, entry->end);
410 cached_entry = list_entry(entry->nd.prev,
411 struct memtype, nd);
412 454
413 /* 455 /*
414 * Move to right position in the linked 456 * Move to right position in the linked
@@ -436,13 +478,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
436 return err; 478 return err;
437 } 479 }
438 480
439 cached_start = start;
440
441 if (where) 481 if (where)
442 list_add(&new->nd, where); 482 list_add(&new->nd, where);
443 else 483 else
444 list_add_tail(&new->nd, &memtype_list); 484 list_add_tail(&new->nd, &memtype_list);
445 485
486 memtype_rb_insert(&memtype_rbroot, new);
487
446 spin_unlock(&memtype_lock); 488 spin_unlock(&memtype_lock);
447 489
448 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", 490 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
@@ -454,7 +496,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
454 496
455int free_memtype(u64 start, u64 end) 497int free_memtype(u64 start, u64 end)
456{ 498{
457 struct memtype *entry; 499 struct memtype *entry, *saved_entry;
458 int err = -EINVAL; 500 int err = -EINVAL;
459 int is_range_ram; 501 int is_range_ram;
460 502
@@ -466,23 +508,58 @@ int free_memtype(u64 start, u64 end)
466 return 0; 508 return 0;
467 509
468 is_range_ram = pat_pagerange_is_ram(start, end); 510 is_range_ram = pat_pagerange_is_ram(start, end);
469 if (is_range_ram == 1) 511 if (is_range_ram == 1) {
470 return free_ram_pages_type(start, end); 512
471 else if (is_range_ram < 0) 513 spin_lock(&memtype_lock);
514 err = free_ram_pages_type(start, end);
515 spin_unlock(&memtype_lock);
516
517 return err;
518 } else if (is_range_ram < 0) {
472 return -EINVAL; 519 return -EINVAL;
520 }
473 521
474 spin_lock(&memtype_lock); 522 spin_lock(&memtype_lock);
523
524 entry = memtype_rb_search(&memtype_rbroot, start);
525 if (unlikely(entry == NULL))
526 goto unlock_ret;
527
528 /*
529 * Saved entry points to an entry with start same or less than what
530 * we searched for. Now go through the list in both directions to look
531 * for the entry that matches with both start and end, with list stored
532 * in sorted start address
533 */
534 saved_entry = entry;
475 list_for_each_entry(entry, &memtype_list, nd) { 535 list_for_each_entry(entry, &memtype_list, nd) {
476 if (entry->start == start && entry->end == end) { 536 if (entry->start == start && entry->end == end) {
477 if (cached_entry == entry || cached_start == start) 537 rb_erase(&entry->rb, &memtype_rbroot);
478 cached_entry = NULL; 538 list_del(&entry->nd);
539 kfree(entry);
540 err = 0;
541 break;
542 } else if (entry->start > start) {
543 break;
544 }
545 }
546
547 if (!err)
548 goto unlock_ret;
479 549
550 entry = saved_entry;
551 list_for_each_entry_reverse(entry, &memtype_list, nd) {
552 if (entry->start == start && entry->end == end) {
553 rb_erase(&entry->rb, &memtype_rbroot);
480 list_del(&entry->nd); 554 list_del(&entry->nd);
481 kfree(entry); 555 kfree(entry);
482 err = 0; 556 err = 0;
483 break; 557 break;
558 } else if (entry->start < start) {
559 break;
484 } 560 }
485 } 561 }
562unlock_ret:
486 spin_unlock(&memtype_lock); 563 spin_unlock(&memtype_lock);
487 564
488 if (err) { 565 if (err) {
@@ -496,6 +573,101 @@ int free_memtype(u64 start, u64 end)
496} 573}
497 574
498 575
576/**
577 * lookup_memtype - Looksup the memory type for a physical address
578 * @paddr: physical address of which memory type needs to be looked up
579 *
580 * Only to be called when PAT is enabled
581 *
582 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
583 * _PAGE_CACHE_UC
584 */
585static unsigned long lookup_memtype(u64 paddr)
586{
587 int rettype = _PAGE_CACHE_WB;
588 struct memtype *entry;
589
590 if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1))
591 return rettype;
592
593 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
594 struct page *page;
595 spin_lock(&memtype_lock);
596 page = pfn_to_page(paddr >> PAGE_SHIFT);
597 rettype = get_page_memtype(page);
598 spin_unlock(&memtype_lock);
599 /*
600 * -1 from get_page_memtype() implies RAM page is in its
601 * default state and not reserved, and hence of type WB
602 */
603 if (rettype == -1)
604 rettype = _PAGE_CACHE_WB;
605
606 return rettype;
607 }
608
609 spin_lock(&memtype_lock);
610
611 entry = memtype_rb_search(&memtype_rbroot, paddr);
612 if (entry != NULL)
613 rettype = entry->type;
614 else
615 rettype = _PAGE_CACHE_UC_MINUS;
616
617 spin_unlock(&memtype_lock);
618 return rettype;
619}
620
621/**
622 * io_reserve_memtype - Request a memory type mapping for a region of memory
623 * @start: start (physical address) of the region
624 * @end: end (physical address) of the region
625 * @type: A pointer to memtype, with requested type. On success, requested
626 * or any other compatible type that was available for the region is returned
627 *
628 * On success, returns 0
629 * On failure, returns non-zero
630 */
631int io_reserve_memtype(resource_size_t start, resource_size_t end,
632 unsigned long *type)
633{
634 resource_size_t size = end - start;
635 unsigned long req_type = *type;
636 unsigned long new_type;
637 int ret;
638
639 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
640
641 ret = reserve_memtype(start, end, req_type, &new_type);
642 if (ret)
643 goto out_err;
644
645 if (!is_new_memtype_allowed(start, size, req_type, new_type))
646 goto out_free;
647
648 if (kernel_map_sync_memtype(start, size, new_type) < 0)
649 goto out_free;
650
651 *type = new_type;
652 return 0;
653
654out_free:
655 free_memtype(start, end);
656 ret = -EBUSY;
657out_err:
658 return ret;
659}
660
661/**
662 * io_free_memtype - Release a memory type mapping for a region of memory
663 * @start: start (physical address) of the region
664 * @end: end (physical address) of the region
665 */
666void io_free_memtype(resource_size_t start, resource_size_t end)
667{
668 free_memtype(start, end);
669}
670
499pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 671pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
500 unsigned long size, pgprot_t vma_prot) 672 unsigned long size, pgprot_t vma_prot)
501{ 673{
@@ -577,7 +749,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
577{ 749{
578 unsigned long id_sz; 750 unsigned long id_sz;
579 751
580 if (!pat_enabled || base >= __pa(high_memory)) 752 if (base >= __pa(high_memory))
581 return 0; 753 return 0;
582 754
583 id_sz = (__pa(high_memory) < base + size) ? 755 id_sz = (__pa(high_memory) < base + size) ?
@@ -612,18 +784,37 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
612 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 784 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
613 785
614 /* 786 /*
615 * reserve_pfn_range() doesn't support RAM pages. Maintain the current 787 * reserve_pfn_range() for RAM pages. We do not refcount to keep
616 * behavior with RAM pages by returning success. 788 * track of number of mappings of RAM pages. We can assert that
789 * the type requested matches the type of first page in the range.
617 */ 790 */
618 if (is_ram != 0) 791 if (is_ram) {
792 if (!pat_enabled)
793 return 0;
794
795 flags = lookup_memtype(paddr);
796 if (want_flags != flags) {
797 printk(KERN_WARNING
798 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
799 current->comm, current->pid,
800 cattr_name(want_flags),
801 (unsigned long long)paddr,
802 (unsigned long long)(paddr + size),
803 cattr_name(flags));
804 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
805 (~_PAGE_CACHE_MASK)) |
806 flags);
807 }
619 return 0; 808 return 0;
809 }
620 810
621 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 811 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
622 if (ret) 812 if (ret)
623 return ret; 813 return ret;
624 814
625 if (flags != want_flags) { 815 if (flags != want_flags) {
626 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) { 816 if (strict_prot ||
817 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
627 free_memtype(paddr, paddr + size); 818 free_memtype(paddr, paddr + size);
628 printk(KERN_ERR "%s:%d map pfn expected mapping type %s" 819 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
629 " for %Lx-%Lx, got %s\n", 820 " for %Lx-%Lx, got %s\n",
@@ -677,14 +868,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
677 unsigned long vma_size = vma->vm_end - vma->vm_start; 868 unsigned long vma_size = vma->vm_end - vma->vm_start;
678 pgprot_t pgprot; 869 pgprot_t pgprot;
679 870
680 if (!pat_enabled)
681 return 0;
682
683 /*
684 * For now, only handle remap_pfn_range() vmas where
685 * is_linear_pfn_mapping() == TRUE. Handling of
686 * vm_insert_pfn() is TBD.
687 */
688 if (is_linear_pfn_mapping(vma)) { 871 if (is_linear_pfn_mapping(vma)) {
689 /* 872 /*
690 * reserve the whole chunk covered by vma. We need the 873 * reserve the whole chunk covered by vma. We need the
@@ -712,23 +895,24 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
712int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 895int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
713 unsigned long pfn, unsigned long size) 896 unsigned long pfn, unsigned long size)
714{ 897{
898 unsigned long flags;
715 resource_size_t paddr; 899 resource_size_t paddr;
716 unsigned long vma_size = vma->vm_end - vma->vm_start; 900 unsigned long vma_size = vma->vm_end - vma->vm_start;
717 901
718 if (!pat_enabled)
719 return 0;
720
721 /*
722 * For now, only handle remap_pfn_range() vmas where
723 * is_linear_pfn_mapping() == TRUE. Handling of
724 * vm_insert_pfn() is TBD.
725 */
726 if (is_linear_pfn_mapping(vma)) { 902 if (is_linear_pfn_mapping(vma)) {
727 /* reserve the whole chunk starting from vm_pgoff */ 903 /* reserve the whole chunk starting from vm_pgoff */
728 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 904 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
729 return reserve_pfn_range(paddr, vma_size, prot, 0); 905 return reserve_pfn_range(paddr, vma_size, prot, 0);
730 } 906 }
731 907
908 if (!pat_enabled)
909 return 0;
910
911 /* for vm_insert_pfn and friends, we set prot based on lookup */
912 flags = lookup_memtype(pfn << PAGE_SHIFT);
913 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
914 flags);
915
732 return 0; 916 return 0;
733} 917}
734 918
@@ -743,14 +927,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
743 resource_size_t paddr; 927 resource_size_t paddr;
744 unsigned long vma_size = vma->vm_end - vma->vm_start; 928 unsigned long vma_size = vma->vm_end - vma->vm_start;
745 929
746 if (!pat_enabled)
747 return;
748
749 /*
750 * For now, only handle remap_pfn_range() vmas where
751 * is_linear_pfn_mapping() == TRUE. Handling of
752 * vm_insert_pfn() is TBD.
753 */
754 if (is_linear_pfn_mapping(vma)) { 930 if (is_linear_pfn_mapping(vma)) {
755 /* free the whole chunk starting from vm_pgoff */ 931 /* free the whole chunk starting from vm_pgoff */
756 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 932 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
@@ -826,7 +1002,7 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
826 return 0; 1002 return 0;
827} 1003}
828 1004
829static struct seq_operations memtype_seq_ops = { 1005static const struct seq_operations memtype_seq_ops = {
830 .start = memtype_seq_start, 1006 .start = memtype_seq_start,
831 .next = memtype_seq_next, 1007 .next = memtype_seq_next,
832 .stop = memtype_seq_stop, 1008 .stop = memtype_seq_stop,
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
new file mode 100644
index 000000000000..d2e2735327b4
--- /dev/null
+++ b/arch/x86/mm/physaddr.c
@@ -0,0 +1,70 @@
1#include <linux/mmdebug.h>
2#include <linux/module.h>
3#include <linux/mm.h>
4
5#include <asm/page.h>
6
7#include "physaddr.h"
8
9#ifdef CONFIG_X86_64
10
11unsigned long __phys_addr(unsigned long x)
12{
13 if (x >= __START_KERNEL_map) {
14 x -= __START_KERNEL_map;
15 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
16 x += phys_base;
17 } else {
18 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
19 x -= PAGE_OFFSET;
20 VIRTUAL_BUG_ON(!phys_addr_valid(x));
21 }
22 return x;
23}
24EXPORT_SYMBOL(__phys_addr);
25
26bool __virt_addr_valid(unsigned long x)
27{
28 if (x >= __START_KERNEL_map) {
29 x -= __START_KERNEL_map;
30 if (x >= KERNEL_IMAGE_SIZE)
31 return false;
32 x += phys_base;
33 } else {
34 if (x < PAGE_OFFSET)
35 return false;
36 x -= PAGE_OFFSET;
37 if (!phys_addr_valid(x))
38 return false;
39 }
40
41 return pfn_valid(x >> PAGE_SHIFT);
42}
43EXPORT_SYMBOL(__virt_addr_valid);
44
45#else
46
47#ifdef CONFIG_DEBUG_VIRTUAL
48unsigned long __phys_addr(unsigned long x)
49{
50 /* VMALLOC_* aren't constants */
51 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
52 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
53 return x - PAGE_OFFSET;
54}
55EXPORT_SYMBOL(__phys_addr);
56#endif
57
58bool __virt_addr_valid(unsigned long x)
59{
60 if (x < PAGE_OFFSET)
61 return false;
62 if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
63 return false;
64 if (x >= FIXADDR_START)
65 return false;
66 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
67}
68EXPORT_SYMBOL(__virt_addr_valid);
69
70#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/mm/physaddr.h b/arch/x86/mm/physaddr.h
new file mode 100644
index 000000000000..a3cd5a0c97b3
--- /dev/null
+++ b/arch/x86/mm/physaddr.h
@@ -0,0 +1,10 @@
1#include <asm/processor.h>
2
3static inline int phys_addr_valid(resource_size_t addr)
4{
5#ifdef CONFIG_PHYS_ADDR_T_64BIT
6 return !(addr >> boot_cpu_data.x86_phys_bits);
7#else
8 return 1;
9#endif
10}
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 29a0e37114f8..6f8aa33031c7 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -215,7 +215,7 @@ int __init get_memcfg_from_srat(void)
215 goto out_fail; 215 goto out_fail;
216 216
217 if (num_memory_chunks == 0) { 217 if (num_memory_chunks == 0) {
218 printk(KERN_WARNING 218 printk(KERN_DEBUG
219 "could not find any ACPI SRAT memory areas.\n"); 219 "could not find any ACPI SRAT memory areas.\n");
220 goto out_fail; 220 goto out_fail;
221 } 221 }
@@ -277,7 +277,7 @@ int __init get_memcfg_from_srat(void)
277 } 277 }
278 return 1; 278 return 1;
279out_fail: 279out_fail:
280 printk(KERN_ERR "failed to get NUMA memory information from SRAT" 280 printk(KERN_DEBUG "failed to get NUMA memory information from SRAT"
281 " table\n"); 281 " table\n");
282 return 0; 282 return 0;
283} 283}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 821e97017e95..c814e144a3f0 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -183,18 +183,17 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
183 183
184 f->flush_mm = mm; 184 f->flush_mm = mm;
185 f->flush_va = va; 185 f->flush_va = va;
186 cpumask_andnot(to_cpumask(f->flush_cpumask), 186 if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
187 cpumask, cpumask_of(smp_processor_id())); 187 /*
188 188 * We have to send the IPI only to
189 /* 189 * CPUs affected.
190 * We have to send the IPI only to 190 */
191 * CPUs affected. 191 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
192 */ 192 INVALIDATE_TLB_VECTOR_START + sender);
193 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
194 INVALIDATE_TLB_VECTOR_START + sender);
195 193
196 while (!cpumask_empty(to_cpumask(f->flush_cpumask))) 194 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
197 cpu_relax(); 195 cpu_relax();
196 }
198 197
199 f->flush_mm = NULL; 198 f->flush_mm = NULL;
200 f->flush_va = 0; 199 f->flush_va = 0;