aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/amdtopology.c3
-rw-r--r--arch/x86/mm/fault.c16
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--arch/x86/mm/init.c5
-rw-r--r--arch/x86/mm/init_32.c10
-rw-r--r--arch/x86/mm/init_64.c75
-rw-r--r--arch/x86/mm/ioremap.c7
-rw-r--r--arch/x86/mm/numa.c9
-rw-r--r--arch/x86/mm/pageattr-test.c7
-rw-r--r--arch/x86/mm/pageattr.c21
-rw-r--r--arch/x86/mm/pgtable.c7
11 files changed, 73 insertions, 88 deletions
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 5247d01329ca..2ca15b59fb3f 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -130,9 +130,8 @@ int __init amd_numa_init(void)
130 } 130 }
131 131
132 limit >>= 16; 132 limit >>= 16;
133 limit <<= 24;
134 limit |= (1<<24)-1;
135 limit++; 133 limit++;
134 limit <<= 24;
136 135
137 if (limit > end) 136 if (limit > end)
138 limit = end; 137 limit = end;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2b97525246d4..654be4ae3047 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -13,12 +13,12 @@
13#include <linux/perf_event.h> /* perf_sw_event */ 13#include <linux/perf_event.h> /* perf_sw_event */
14#include <linux/hugetlb.h> /* hstate_index_to_shift */ 14#include <linux/hugetlb.h> /* hstate_index_to_shift */
15#include <linux/prefetch.h> /* prefetchw */ 15#include <linux/prefetch.h> /* prefetchw */
16#include <linux/context_tracking.h> /* exception_enter(), ... */
16 17
17#include <asm/traps.h> /* dotraplinkage, ... */ 18#include <asm/traps.h> /* dotraplinkage, ... */
18#include <asm/pgalloc.h> /* pgd_*(), ... */ 19#include <asm/pgalloc.h> /* pgd_*(), ... */
19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 20#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20#include <asm/fixmap.h> /* VSYSCALL_START */ 21#include <asm/fixmap.h> /* VSYSCALL_START */
21#include <asm/context_tracking.h> /* exception_enter(), ... */
22 22
23/* 23/*
24 * Page fault error code bits: 24 * Page fault error code bits:
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
378 if (pgd_none(*pgd_ref)) 378 if (pgd_none(*pgd_ref))
379 return -1; 379 return -1;
380 380
381 if (pgd_none(*pgd)) 381 if (pgd_none(*pgd)) {
382 set_pgd(pgd, *pgd_ref); 382 set_pgd(pgd, *pgd_ref);
383 else 383 arch_flush_lazy_mmu_mode();
384 } else {
384 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 385 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
386 }
385 387
386 /* 388 /*
387 * Below here mismatches are bugs because these lower tables 389 * Below here mismatches are bugs because these lower tables
@@ -555,7 +557,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
555 /* 557 /*
556 * Pentium F0 0F C7 C8 bug workaround: 558 * Pentium F0 0F C7 C8 bug workaround:
557 */ 559 */
558 if (boot_cpu_data.f00f_bug) { 560 if (boot_cpu_has_bug(X86_BUG_F00F)) {
559 nr = (address - idt_descr.address) >> 3; 561 nr = (address - idt_descr.address) >> 3;
560 562
561 if (nr == 6) { 563 if (nr == 6) {
@@ -1222,7 +1224,9 @@ good_area:
1222dotraplinkage void __kprobes 1224dotraplinkage void __kprobes
1223do_page_fault(struct pt_regs *regs, unsigned long error_code) 1225do_page_fault(struct pt_regs *regs, unsigned long error_code)
1224{ 1226{
1225 exception_enter(regs); 1227 enum ctx_state prev_state;
1228
1229 prev_state = exception_enter();
1226 __do_page_fault(regs, error_code); 1230 __do_page_fault(regs, error_code);
1227 exception_exit(regs); 1231 exception_exit(prev_state);
1228} 1232}
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 6f31ee56c008..252b8f5489ba 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -137,5 +137,4 @@ void __init set_highmem_pages_init(void)
137 add_highpages_with_active_regions(nid, zone_start_pfn, 137 add_highpages_with_active_regions(nid, zone_start_pfn,
138 zone_end_pfn); 138 zone_end_pfn);
139 } 139 }
140 totalram_pages += totalhigh_pages;
141} 140}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 59b7fc453277..fdc5dca14fb3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -515,11 +515,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
515 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 515 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
516 516
517 for (; addr < end; addr += PAGE_SIZE) { 517 for (; addr < end; addr += PAGE_SIZE) {
518 ClearPageReserved(virt_to_page(addr));
519 init_page_count(virt_to_page(addr));
520 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 518 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
521 free_page(addr); 519 free_reserved_page(virt_to_page(addr));
522 totalram_pages++;
523 } 520 }
524#endif 521#endif
525} 522}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2d19001151d5..3ac7e319918d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -427,14 +427,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
427 pkmap_page_table = pte; 427 pkmap_page_table = pte;
428} 428}
429 429
430static void __init add_one_highpage_init(struct page *page)
431{
432 ClearPageReserved(page);
433 init_page_count(page);
434 __free_page(page);
435 totalhigh_pages++;
436}
437
438void __init add_highpages_with_active_regions(int nid, 430void __init add_highpages_with_active_regions(int nid,
439 unsigned long start_pfn, unsigned long end_pfn) 431 unsigned long start_pfn, unsigned long end_pfn)
440{ 432{
@@ -448,7 +440,7 @@ void __init add_highpages_with_active_regions(int nid,
448 start_pfn, end_pfn); 440 start_pfn, end_pfn);
449 for ( ; pfn < e_pfn; pfn++) 441 for ( ; pfn < e_pfn; pfn++)
450 if (pfn_valid(pfn)) 442 if (pfn_valid(pfn))
451 add_one_highpage_init(pfn_to_page(pfn)); 443 free_highmem_page(pfn_to_page(pfn));
452 } 444 }
453} 445}
454#else 446#else
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 24ceda0101bb..bb00c4672ad6 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1012,14 +1012,12 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
1012 flush_tlb_all(); 1012 flush_tlb_all();
1013} 1013}
1014 1014
1015void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages) 1015void __ref vmemmap_free(unsigned long start, unsigned long end)
1016{ 1016{
1017 unsigned long start = (unsigned long)memmap;
1018 unsigned long end = (unsigned long)(memmap + nr_pages);
1019
1020 remove_pagetable(start, end, false); 1017 remove_pagetable(start, end, false);
1021} 1018}
1022 1019
1020#ifdef CONFIG_MEMORY_HOTREMOVE
1023static void __meminit 1021static void __meminit
1024kernel_physical_mapping_remove(unsigned long start, unsigned long end) 1022kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1025{ 1023{
@@ -1029,7 +1027,6 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1029 remove_pagetable(start, end, true); 1027 remove_pagetable(start, end, true);
1030} 1028}
1031 1029
1032#ifdef CONFIG_MEMORY_HOTREMOVE
1033int __ref arch_remove_memory(u64 start, u64 size) 1030int __ref arch_remove_memory(u64 start, u64 size)
1034{ 1031{
1035 unsigned long start_pfn = start >> PAGE_SHIFT; 1032 unsigned long start_pfn = start >> PAGE_SHIFT;
@@ -1068,10 +1065,9 @@ void __init mem_init(void)
1068 1065
1069 /* clear_bss() already clear the empty_zero_page */ 1066 /* clear_bss() already clear the empty_zero_page */
1070 1067
1071 reservedpages = 0;
1072
1073 /* this will put all low memory onto the freelists */
1074 register_page_bootmem_info(); 1068 register_page_bootmem_info();
1069
1070 /* this will put all memory onto the freelists */
1075 totalram_pages = free_all_bootmem(); 1071 totalram_pages = free_all_bootmem();
1076 1072
1077 absent_pages = absent_pages_in_range(0, max_pfn); 1073 absent_pages = absent_pages_in_range(0, max_pfn);
@@ -1286,18 +1282,17 @@ static long __meminitdata addr_start, addr_end;
1286static void __meminitdata *p_start, *p_end; 1282static void __meminitdata *p_start, *p_end;
1287static int __meminitdata node_start; 1283static int __meminitdata node_start;
1288 1284
1289int __meminit 1285static int __meminit vmemmap_populate_hugepages(unsigned long start,
1290vmemmap_populate(struct page *start_page, unsigned long size, int node) 1286 unsigned long end, int node)
1291{ 1287{
1292 unsigned long addr = (unsigned long)start_page; 1288 unsigned long addr;
1293 unsigned long end = (unsigned long)(start_page + size);
1294 unsigned long next; 1289 unsigned long next;
1295 pgd_t *pgd; 1290 pgd_t *pgd;
1296 pud_t *pud; 1291 pud_t *pud;
1297 pmd_t *pmd; 1292 pmd_t *pmd;
1298 1293
1299 for (; addr < end; addr = next) { 1294 for (addr = start; addr < end; addr = next) {
1300 void *p = NULL; 1295 next = pmd_addr_end(addr, end);
1301 1296
1302 pgd = vmemmap_pgd_populate(addr, node); 1297 pgd = vmemmap_pgd_populate(addr, node);
1303 if (!pgd) 1298 if (!pgd)
@@ -1307,31 +1302,14 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1307 if (!pud) 1302 if (!pud)
1308 return -ENOMEM; 1303 return -ENOMEM;
1309 1304
1310 if (!cpu_has_pse) { 1305 pmd = pmd_offset(pud, addr);
1311 next = (addr + PAGE_SIZE) & PAGE_MASK; 1306 if (pmd_none(*pmd)) {
1312 pmd = vmemmap_pmd_populate(pud, addr, node); 1307 void *p;
1313
1314 if (!pmd)
1315 return -ENOMEM;
1316
1317 p = vmemmap_pte_populate(pmd, addr, node);
1318 1308
1319 if (!p) 1309 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1320 return -ENOMEM; 1310 if (p) {
1321
1322 addr_end = addr + PAGE_SIZE;
1323 p_end = p + PAGE_SIZE;
1324 } else {
1325 next = pmd_addr_end(addr, end);
1326
1327 pmd = pmd_offset(pud, addr);
1328 if (pmd_none(*pmd)) {
1329 pte_t entry; 1311 pte_t entry;
1330 1312
1331 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1332 if (!p)
1333 return -ENOMEM;
1334
1335 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, 1313 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1336 PAGE_KERNEL_LARGE); 1314 PAGE_KERNEL_LARGE);
1337 set_pmd(pmd, __pmd(pte_val(entry))); 1315 set_pmd(pmd, __pmd(pte_val(entry)));
@@ -1348,15 +1326,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1348 1326
1349 addr_end = addr + PMD_SIZE; 1327 addr_end = addr + PMD_SIZE;
1350 p_end = p + PMD_SIZE; 1328 p_end = p + PMD_SIZE;
1351 } else 1329 continue;
1352 vmemmap_verify((pte_t *)pmd, node, addr, next); 1330 }
1331 } else if (pmd_large(*pmd)) {
1332 vmemmap_verify((pte_t *)pmd, node, addr, next);
1333 continue;
1353 } 1334 }
1354 1335 pr_warn_once("vmemmap: falling back to regular page backing\n");
1336 if (vmemmap_populate_basepages(addr, next, node))
1337 return -ENOMEM;
1355 } 1338 }
1356 sync_global_pgds((unsigned long)start_page, end - 1);
1357 return 0; 1339 return 0;
1358} 1340}
1359 1341
1342int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1343{
1344 int err;
1345
1346 if (cpu_has_pse)
1347 err = vmemmap_populate_hugepages(start, end, node);
1348 else
1349 err = vmemmap_populate_basepages(start, end, node);
1350 if (!err)
1351 sync_global_pgds(start, end - 1);
1352 return err;
1353}
1354
1360#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) 1355#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1361void register_page_bootmem_memmap(unsigned long section_nr, 1356void register_page_bootmem_memmap(unsigned long section_nr,
1362 struct page *start_page, unsigned long size) 1357 struct page *start_page, unsigned long size)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 78fe3f1ac49f..9a1e6583910c 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
282 in parallel. Reuse of the virtual address is prevented by 282 in parallel. Reuse of the virtual address is prevented by
283 leaving it in the global lists until we're done with it. 283 leaving it in the global lists until we're done with it.
284 cpa takes care of the direct mappings. */ 284 cpa takes care of the direct mappings. */
285 read_lock(&vmlist_lock); 285 p = find_vm_area((void __force *)addr);
286 for (p = vmlist; p; p = p->next) {
287 if (p->addr == (void __force *)addr)
288 break;
289 }
290 read_unlock(&vmlist_lock);
291 286
292 if (!p) { 287 if (!p) {
293 printk(KERN_ERR "iounmap: bad address %p\n", addr); 288 printk(KERN_ERR "iounmap: bad address %p\n", addr);
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 72fe01e9e414..a71c4e207679 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -114,14 +114,11 @@ void numa_clear_node(int cpu)
114 */ 114 */
115void __init setup_node_to_cpumask_map(void) 115void __init setup_node_to_cpumask_map(void)
116{ 116{
117 unsigned int node, num = 0; 117 unsigned int node;
118 118
119 /* setup nr_node_ids if not done yet */ 119 /* setup nr_node_ids if not done yet */
120 if (nr_node_ids == MAX_NUMNODES) { 120 if (nr_node_ids == MAX_NUMNODES)
121 for_each_node_mask(node, node_possible_map) 121 setup_nr_node_ids();
122 num = node;
123 nr_node_ids = num + 1;
124 }
125 122
126 /* allocate the map */ 123 /* allocate the map */
127 for (node = 0; node < nr_node_ids; node++) 124 for (node = 0; node < nr_node_ids; node++)
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index b0086567271c..d0b1773d9d2e 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
68 s->gpg++; 68 s->gpg++;
69 i += GPS/PAGE_SIZE; 69 i += GPS/PAGE_SIZE;
70 } else if (level == PG_LEVEL_2M) { 70 } else if (level == PG_LEVEL_2M) {
71 if (!(pte_val(*pte) & _PAGE_PSE)) { 71 if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
72 printk(KERN_ERR 72 printk(KERN_ERR
73 "%lx level %d but not PSE %Lx\n", 73 "%lx level %d but not PSE %Lx\n",
74 addr, level, (u64)pte_val(*pte)); 74 addr, level, (u64)pte_val(*pte));
@@ -130,13 +130,12 @@ static int pageattr_test(void)
130 } 130 }
131 131
132 failed += print_split(&sa); 132 failed += print_split(&sa);
133 srandom32(100);
134 133
135 for (i = 0; i < NTEST; i++) { 134 for (i = 0; i < NTEST; i++) {
136 unsigned long pfn = random32() % max_pfn_mapped; 135 unsigned long pfn = prandom_u32() % max_pfn_mapped;
137 136
138 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); 137 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
139 len[i] = random32() % 100; 138 len[i] = prandom_u32() % 100;
140 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); 139 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
141 140
142 if (len[i] == 0) 141 if (len[i] == 0)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 091934e1d0d9..bb32480c2d71 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
467 * We are safe now. Check whether the new pgprot is the same: 467 * We are safe now. Check whether the new pgprot is the same:
468 */ 468 */
469 old_pte = *kpte; 469 old_pte = *kpte;
470 old_prot = new_prot = req_prot = pte_pgprot(old_pte); 470 old_prot = req_prot = pte_pgprot(old_pte);
471 471
472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); 472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); 473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL 478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
479 * for the ancient hardware that doesn't support it. 479 * for the ancient hardware that doesn't support it.
480 */ 480 */
481 if (pgprot_val(new_prot) & _PAGE_PRESENT) 481 if (pgprot_val(req_prot) & _PAGE_PRESENT)
482 pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; 482 pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
483 else 483 else
484 pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); 484 pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
485 485
486 new_prot = canon_pgprot(new_prot); 486 req_prot = canon_pgprot(req_prot);
487 487
488 /* 488 /*
489 * old_pte points to the large page base address. So we need 489 * old_pte points to the large page base address. So we need
@@ -542,13 +542,14 @@ out_unlock:
542 return do_split; 542 return do_split;
543} 543}
544 544
545int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) 545static int
546__split_large_page(pte_t *kpte, unsigned long address, struct page *base)
546{ 547{
548 pte_t *pbase = (pte_t *)page_address(base);
547 unsigned long pfn, pfninc = 1; 549 unsigned long pfn, pfninc = 1;
548 unsigned int i, level; 550 unsigned int i, level;
549 pte_t *tmp; 551 pte_t *tmp;
550 pgprot_t ref_prot; 552 pgprot_t ref_prot;
551 struct page *base = virt_to_page(pbase);
552 553
553 spin_lock(&pgd_lock); 554 spin_lock(&pgd_lock);
554 /* 555 /*
@@ -633,7 +634,6 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
633 634
634static int split_large_page(pte_t *kpte, unsigned long address) 635static int split_large_page(pte_t *kpte, unsigned long address)
635{ 636{
636 pte_t *pbase;
637 struct page *base; 637 struct page *base;
638 638
639 if (!debug_pagealloc) 639 if (!debug_pagealloc)
@@ -644,8 +644,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
644 if (!base) 644 if (!base)
645 return -ENOMEM; 645 return -ENOMEM;
646 646
647 pbase = (pte_t *)page_address(base); 647 if (__split_large_page(kpte, address, base))
648 if (__split_large_page(kpte, address, pbase))
649 __free_page(base); 648 __free_page(base);
650 649
651 return 0; 650 return 0;
@@ -1413,6 +1412,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1413 * but that can deadlock->flush only current cpu: 1412 * but that can deadlock->flush only current cpu:
1414 */ 1413 */
1415 __flush_tlb_all(); 1414 __flush_tlb_all();
1415
1416 arch_flush_lazy_mmu_mode();
1416} 1417}
1417 1418
1418#ifdef CONFIG_HIBERNATION 1419#ifdef CONFIG_HIBERNATION
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 193350b51f90..17fda6a8b3c2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
59{ 59{
60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
61 /*
62 * NOTE! For PAE, any changes to the top page-directory-pointer-table
63 * entries need a full cr3 reload to flush.
64 */
65#ifdef CONFIG_X86_PAE
66 tlb->need_flush_all = 1;
67#endif
61 tlb_remove_page(tlb, virt_to_page(pmd)); 68 tlb_remove_page(tlb, virt_to_page(pmd));
62} 69}
63 70