diff options
Diffstat (limited to 'arch/x86/mm')
| -rw-r--r-- | arch/x86/mm/discontig_32.c | 2 | ||||
| -rw-r--r-- | arch/x86/mm/dump_pagetables.c | 4 | ||||
| -rw-r--r-- | arch/x86/mm/init_64.c | 8 | ||||
| -rw-r--r-- | arch/x86/mm/numa_64.c | 10 | ||||
| -rw-r--r-- | arch/x86/mm/pageattr.c | 4 | ||||
| -rw-r--r-- | arch/x86/mm/pgtable.c | 6 | ||||
| -rw-r--r-- | arch/x86/mm/pgtable_32.c | 3 | 
7 files changed, 19 insertions, 18 deletions
| diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 62fa440678d8..847c164725f4 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c | |||
| @@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn, | |||
| 328 | 328 | ||
| 329 | get_memcfg_numa(); | 329 | get_memcfg_numa(); | 
| 330 | 330 | ||
| 331 | kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); | 331 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); | 
| 332 | 332 | ||
| 333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | 333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | 
| 334 | do { | 334 | do { | 
| diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a20d1fa64b4e..e7277cbcfb40 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
| @@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st, | |||
| 148 | * we have now. "break" is either changing perms, levels or | 148 | * we have now. "break" is either changing perms, levels or | 
| 149 | * address space marker. | 149 | * address space marker. | 
| 150 | */ | 150 | */ | 
| 151 | prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); | 151 | prot = pgprot_val(new_prot) & PTE_FLAGS_MASK; | 
| 152 | cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); | 152 | cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK; | 
| 153 | 153 | ||
| 154 | if (!st->level) { | 154 | if (!st->level) { | 
| 155 | /* First entry */ | 155 | /* First entry */ | 
| diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d3746efb060d..770536ebf7e9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -225,7 +225,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |||
| 225 | void __init cleanup_highmap(void) | 225 | void __init cleanup_highmap(void) | 
| 226 | { | 226 | { | 
| 227 | unsigned long vaddr = __START_KERNEL_map; | 227 | unsigned long vaddr = __START_KERNEL_map; | 
| 228 | unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; | 228 | unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; | 
| 229 | pmd_t *pmd = level2_kernel_pgt; | 229 | pmd_t *pmd = level2_kernel_pgt; | 
| 230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | 230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | 
| 231 | 231 | ||
| @@ -451,14 +451,14 @@ static void __init find_early_table_space(unsigned long end) | |||
| 451 | unsigned long puds, pmds, ptes, tables, start; | 451 | unsigned long puds, pmds, ptes, tables, start; | 
| 452 | 452 | ||
| 453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 
| 454 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); | 454 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); | 
| 455 | if (direct_gbpages) { | 455 | if (direct_gbpages) { | 
| 456 | unsigned long extra; | 456 | unsigned long extra; | 
| 457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 
| 458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 
| 459 | } else | 459 | } else | 
| 460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 
| 461 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | 461 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); | 
| 462 | 462 | ||
| 463 | if (cpu_has_pse) { | 463 | if (cpu_has_pse) { | 
| 464 | unsigned long extra; | 464 | unsigned long extra; | 
| @@ -466,7 +466,7 @@ static void __init find_early_table_space(unsigned long end) | |||
| 466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
| 467 | } else | 467 | } else | 
| 468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
| 469 | tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); | 469 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); | 
| 470 | 470 | ||
| 471 | /* | 471 | /* | 
| 472 | * RED-PEN putting page tables only on node 0 could | 472 | * RED-PEN putting page tables only on node 0 could | 
| diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a4dd793d6003..cebcbf152d46 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
| @@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void) | |||
| 79 | return 0; | 79 | return 0; | 
| 80 | 80 | ||
| 81 | addr = 0x8000; | 81 | addr = 0x8000; | 
| 82 | nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); | 82 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); | 
| 83 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, | 83 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, | 
| 84 | nodemap_size, L1_CACHE_BYTES); | 84 | nodemap_size, L1_CACHE_BYTES); | 
| 85 | if (nodemap_addr == -1UL) { | 85 | if (nodemap_addr == -1UL) { | 
| @@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, | |||
| 176 | unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; | 176 | unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; | 
| 177 | unsigned long bootmap_start, nodedata_phys; | 177 | unsigned long bootmap_start, nodedata_phys; | 
| 178 | void *bootmap; | 178 | void *bootmap; | 
| 179 | const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); | 179 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | 
| 180 | int nid; | 180 | int nid; | 
| 181 | 181 | ||
| 182 | start = round_up(start, ZONE_ALIGN); | 182 | start = roundup(start, ZONE_ALIGN); | 
| 183 | 183 | ||
| 184 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, | 184 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, | 
| 185 | start, end); | 185 | start, end); | 
| @@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, | |||
| 210 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); | 210 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); | 
| 211 | nid = phys_to_nid(nodedata_phys); | 211 | nid = phys_to_nid(nodedata_phys); | 
| 212 | if (nid == nodeid) | 212 | if (nid == nodeid) | 
| 213 | bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); | 213 | bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE); | 
| 214 | else | 214 | else | 
| 215 | bootmap_start = round_up(start, PAGE_SIZE); | 215 | bootmap_start = roundup(start, PAGE_SIZE); | 
| 216 | /* | 216 | /* | 
| 217 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like | 217 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like | 
| 218 | * to use that to align to PAGE_SIZE | 218 | * to use that to align to PAGE_SIZE | 
| diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 43e2f8483e4f..898fad617abe 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -84,7 +84,7 @@ static inline unsigned long highmap_start_pfn(void) | |||
| 84 | 84 | ||
| 85 | static inline unsigned long highmap_end_pfn(void) | 85 | static inline unsigned long highmap_end_pfn(void) | 
| 86 | { | 86 | { | 
| 87 | return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | 87 | return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | 
| 88 | } | 88 | } | 
| 89 | 89 | ||
| 90 | #endif | 90 | #endif | 
| @@ -906,11 +906,13 @@ int set_memory_ro(unsigned long addr, int numpages) | |||
| 906 | { | 906 | { | 
| 907 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); | 907 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); | 
| 908 | } | 908 | } | 
| 909 | EXPORT_SYMBOL_GPL(set_memory_ro); | ||
| 909 | 910 | ||
| 910 | int set_memory_rw(unsigned long addr, int numpages) | 911 | int set_memory_rw(unsigned long addr, int numpages) | 
| 911 | { | 912 | { | 
| 912 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); | 913 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); | 
| 913 | } | 914 | } | 
| 915 | EXPORT_SYMBOL_GPL(set_memory_rw); | ||
| 914 | 916 | ||
| 915 | int set_memory_np(unsigned long addr, int numpages) | 917 | int set_memory_np(unsigned long addr, int numpages) | 
| 916 | { | 918 | { | 
| diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index d50302774fe2..86f2ffc43c3d 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
| @@ -63,10 +63,8 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
| 63 | #define UNSHARED_PTRS_PER_PGD \ | 63 | #define UNSHARED_PTRS_PER_PGD \ | 
| 64 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) | 64 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) | 
| 65 | 65 | ||
| 66 | static void pgd_ctor(void *p) | 66 | static void pgd_ctor(pgd_t *pgd) | 
| 67 | { | 67 | { | 
| 68 | pgd_t *pgd = p; | ||
| 69 | |||
| 70 | /* If the pgd points to a shared pagetable level (either the | 68 | /* If the pgd points to a shared pagetable level (either the | 
| 71 | ptes in non-PAE, or shared PMD in PAE), then just copy the | 69 | ptes in non-PAE, or shared PMD in PAE), then just copy the | 
| 72 | references from swapper_pg_dir. */ | 70 | references from swapper_pg_dir. */ | 
| @@ -87,7 +85,7 @@ static void pgd_ctor(void *p) | |||
| 87 | pgd_list_add(pgd); | 85 | pgd_list_add(pgd); | 
| 88 | } | 86 | } | 
| 89 | 87 | ||
| 90 | static void pgd_dtor(void *pgd) | 88 | static void pgd_dtor(pgd_t *pgd) | 
| 91 | { | 89 | { | 
| 92 | unsigned long flags; /* can be called from interrupt context */ | 90 | unsigned long flags; /* can be called from interrupt context */ | 
| 93 | 91 | ||
| diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index cab0abbd1ebe..0951db9ee519 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
| @@ -123,7 +123,8 @@ static int __init parse_vmalloc(char *arg) | |||
| 123 | if (!arg) | 123 | if (!arg) | 
| 124 | return -EINVAL; | 124 | return -EINVAL; | 
| 125 | 125 | ||
| 126 | __VMALLOC_RESERVE = memparse(arg, &arg); | 126 | /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ | 
| 127 | __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; | ||
| 127 | return 0; | 128 | return 0; | 
| 128 | } | 129 | } | 
| 129 | early_param("vmalloc", parse_vmalloc); | 130 | early_param("vmalloc", parse_vmalloc); | 
