aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/ioremap.c19
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/pageattr.c15
-rw-r--r--arch/x86/mm/pat.c83
4 files changed, 60 insertions, 59 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index af750ab973b6..f45d5e29a72e 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr)
134 return 0; 134 return 0;
135} 135}
136 136
137int pagerange_is_ram(unsigned long start, unsigned long end)
138{
139 int ram_page = 0, not_rampage = 0;
140 unsigned long page_nr;
141
142 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143 ++page_nr) {
144 if (page_is_ram(page_nr))
145 ram_page = 1;
146 else
147 not_rampage = 1;
148
149 if (ram_page == not_rampage)
150 return -1;
151 }
152
153 return ram_page;
154}
155
156/* 137/*
157 * Fix up the linear direct mapping of the kernel to avoid cache attribute 138 * Fix up the linear direct mapping of the kernel to avoid cache attribute
158 * conflicts. 139 * conflicts.
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 71a14f89f89e..f3516da035d1 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -145,7 +145,7 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
145 return shift; 145 return shift;
146} 146}
147 147
148int early_pfn_to_nid(unsigned long pfn) 148int __meminit __early_pfn_to_nid(unsigned long pfn)
149{ 149{
150 return phys_to_nid(pfn << PAGE_SHIFT); 150 return phys_to_nid(pfn << PAGE_SHIFT);
151} 151}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 84ba74820ad6..8ca0d8566fc8 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -575,7 +575,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
575 address = cpa->vaddr[cpa->curpage]; 575 address = cpa->vaddr[cpa->curpage];
576 else 576 else
577 address = *cpa->vaddr; 577 address = *cpa->vaddr;
578
579repeat: 578repeat:
580 kpte = lookup_address(address, &level); 579 kpte = lookup_address(address, &level);
581 if (!kpte) 580 if (!kpte)
@@ -812,6 +811,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
812 811
813 vm_unmap_aliases(); 812 vm_unmap_aliases();
814 813
814 /*
815 * If we're called with lazy mmu updates enabled, the
816 * in-memory pte state may be stale. Flush pending updates to
817 * bring them up to date.
818 */
819 arch_flush_lazy_mmu_mode();
820
815 cpa.vaddr = addr; 821 cpa.vaddr = addr;
816 cpa.numpages = numpages; 822 cpa.numpages = numpages;
817 cpa.mask_set = mask_set; 823 cpa.mask_set = mask_set;
@@ -854,6 +860,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
854 } else 860 } else
855 cpa_flush_all(cache); 861 cpa_flush_all(cache);
856 862
863 /*
864 * If we've been called with lazy mmu updates enabled, then
865 * make sure that everything gets flushed out before we
866 * return.
867 */
868 arch_flush_lazy_mmu_mode();
869
857out: 870out:
858 return ret; 871 return ret;
859} 872}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7b61036427df..aebbf67a79d0 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -211,6 +211,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
211static struct memtype *cached_entry; 211static struct memtype *cached_entry;
212static u64 cached_start; 212static u64 cached_start;
213 213
214static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
215{
216 int ram_page = 0, not_rampage = 0;
217 unsigned long page_nr;
218
219 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
220 ++page_nr) {
221 /*
222 * For legacy reasons, physical address range in the legacy ISA
223 * region is tracked as non-RAM. This will allow users of
224 * /dev/mem to map portions of legacy ISA region, even when
225 * some of those portions are listed(or not even listed) with
226 * different e820 types(RAM/reserved/..)
227 */
228 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
229 page_is_ram(page_nr))
230 ram_page = 1;
231 else
232 not_rampage = 1;
233
234 if (ram_page == not_rampage)
235 return -1;
236 }
237
238 return ram_page;
239}
240
214/* 241/*
215 * For RAM pages, mark the pages as non WB memory type using 242 * For RAM pages, mark the pages as non WB memory type using
216 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or 243 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
@@ -336,20 +363,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
336 if (new_type) 363 if (new_type)
337 *new_type = actual_type; 364 *new_type = actual_type;
338 365
339 /* 366 is_range_ram = pat_pagerange_is_ram(start, end);
340 * For legacy reasons, some parts of the physical address range in the 367 if (is_range_ram == 1)
341 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 368 return reserve_ram_pages_type(start, end, req_type,
342 * the e820 tables). So we will track the memory attributes of this 369 new_type);
343 * legacy 1MB region using the linear memtype_list always. 370 else if (is_range_ram < 0)
344 */ 371 return -EINVAL;
345 if (end >= ISA_END_ADDRESS) {
346 is_range_ram = pagerange_is_ram(start, end);
347 if (is_range_ram == 1)
348 return reserve_ram_pages_type(start, end, req_type,
349 new_type);
350 else if (is_range_ram < 0)
351 return -EINVAL;
352 }
353 372
354 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 373 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
355 if (!new) 374 if (!new)
@@ -446,19 +465,11 @@ int free_memtype(u64 start, u64 end)
446 if (is_ISA_range(start, end - 1)) 465 if (is_ISA_range(start, end - 1))
447 return 0; 466 return 0;
448 467
449 /* 468 is_range_ram = pat_pagerange_is_ram(start, end);
450 * For legacy reasons, some parts of the physical address range in the 469 if (is_range_ram == 1)
451 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 470 return free_ram_pages_type(start, end);
452 * the e820 tables). So we will track the memory attributes of this 471 else if (is_range_ram < 0)
453 * legacy 1MB region using the linear memtype_list always. 472 return -EINVAL;
454 */
455 if (end >= ISA_END_ADDRESS) {
456 is_range_ram = pagerange_is_ram(start, end);
457 if (is_range_ram == 1)
458 return free_ram_pages_type(start, end);
459 else if (is_range_ram < 0)
460 return -EINVAL;
461 }
462 473
463 spin_lock(&memtype_lock); 474 spin_lock(&memtype_lock);
464 list_for_each_entry(entry, &memtype_list, nd) { 475 list_for_each_entry(entry, &memtype_list, nd) {
@@ -626,17 +637,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
626 unsigned long flags; 637 unsigned long flags;
627 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 638 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
628 639
629 is_ram = pagerange_is_ram(paddr, paddr + size); 640 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
630 641
631 if (is_ram != 0) { 642 /*
632 /* 643 * reserve_pfn_range() doesn't support RAM pages.
633 * For mapping RAM pages, drivers need to call 644 */
634 * set_memory_[uc|wc|wb] directly, for reserve and free, before 645 if (is_ram != 0)
635 * setting up the PTE. 646 return -EINVAL;
636 */
637 WARN_ON_ONCE(1);
638 return 0;
639 }
640 647
641 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 648 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
642 if (ret) 649 if (ret)
@@ -693,7 +700,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
693{ 700{
694 int is_ram; 701 int is_ram;
695 702
696 is_ram = pagerange_is_ram(paddr, paddr + size); 703 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
697 if (is_ram == 0) 704 if (is_ram == 0)
698 free_memtype(paddr, paddr + size); 705 free_memtype(paddr, paddr + size);
699} 706}