aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2012-05-30 15:11:26 -0400
committerH. Peter Anvin <hpa@zytor.com>2012-05-30 15:11:32 -0400
commitbbd771474ec44b516107685d77e1c80bbe09f141 (patch)
tree0cb15781539a68f27b4ea6c89f827282630cbce6 /arch/x86/mm
parent403e1c5b7495d7b80fae9fc4d0a7a6f5abdc3307 (diff)
parent319b6ffc6df892e4ccffff823cc5521a4a5d2dca (diff)
Merge branch 'x86/trampoline' into x86/urgent
x86/trampoline contains an urgent commit which is necessarily on a newer baseline. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init.c16
-rw-r--r--arch/x86/mm/numa.c32
-rw-r--r--arch/x86/mm/numa_emulation.c4
-rw-r--r--arch/x86/mm/pat.c42
-rw-r--r--arch/x86/mm/srat.c5
5 files changed, 49 insertions, 50 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 319b6f2fb8b9..97141c26a13a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -84,8 +84,9 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
84 pgt_buf_end = pgt_buf_start; 84 pgt_buf_end = pgt_buf_start;
85 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); 85 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
86 86
87 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", 87 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
88 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); 88 end - 1, pgt_buf_start << PAGE_SHIFT,
89 (pgt_buf_top << PAGE_SHIFT) - 1);
89} 90}
90 91
91void __init native_pagetable_reserve(u64 start, u64 end) 92void __init native_pagetable_reserve(u64 start, u64 end)
@@ -132,7 +133,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
132 int nr_range, i; 133 int nr_range, i;
133 int use_pse, use_gbpages; 134 int use_pse, use_gbpages;
134 135
135 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); 136 printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
137 start, end - 1);
136 138
137#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) 139#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
138 /* 140 /*
@@ -251,8 +253,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
251 } 253 }
252 254
253 for (i = 0; i < nr_range; i++) 255 for (i = 0; i < nr_range; i++)
254 printk(KERN_DEBUG " %010lx - %010lx page %s\n", 256 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
255 mr[i].start, mr[i].end, 257 mr[i].start, mr[i].end - 1,
256 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( 258 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
257 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); 259 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
258 260
@@ -350,8 +352,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
350 * create a kernel page fault: 352 * create a kernel page fault:
351 */ 353 */
352#ifdef CONFIG_DEBUG_PAGEALLOC 354#ifdef CONFIG_DEBUG_PAGEALLOC
353 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 355 printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
354 begin, end); 356 begin, end - 1);
355 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 357 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
356#else 358#else
357 /* 359 /*
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 19d3fa08b119..2d125be1bae9 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -141,8 +141,8 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
141 141
142 /* whine about and ignore invalid blks */ 142 /* whine about and ignore invalid blks */
143 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { 143 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
144 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", 144 pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
145 nid, start, end); 145 nid, start, end - 1);
146 return 0; 146 return 0;
147 } 147 }
148 148
@@ -210,8 +210,8 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
210 210
211 start = roundup(start, ZONE_ALIGN); 211 start = roundup(start, ZONE_ALIGN);
212 212
213 printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", 213 printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
214 nid, start, end); 214 nid, start, end - 1);
215 215
216 /* 216 /*
217 * Allocate node data. Try remap allocator first, node-local 217 * Allocate node data. Try remap allocator first, node-local
@@ -232,7 +232,7 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
232 } 232 }
233 233
234 /* report and initialize */ 234 /* report and initialize */
235 printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n", 235 printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
236 nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); 236 nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
237 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 237 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
238 if (!remapped && tnid != nid) 238 if (!remapped && tnid != nid)
@@ -291,14 +291,14 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
291 */ 291 */
292 if (bi->end > bj->start && bi->start < bj->end) { 292 if (bi->end > bj->start && bi->start < bj->end) {
293 if (bi->nid != bj->nid) { 293 if (bi->nid != bj->nid) {
294 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", 294 pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
295 bi->nid, bi->start, bi->end, 295 bi->nid, bi->start, bi->end - 1,
296 bj->nid, bj->start, bj->end); 296 bj->nid, bj->start, bj->end - 1);
297 return -EINVAL; 297 return -EINVAL;
298 } 298 }
299 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", 299 pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
300 bi->nid, bi->start, bi->end, 300 bi->nid, bi->start, bi->end - 1,
301 bj->start, bj->end); 301 bj->start, bj->end - 1);
302 } 302 }
303 303
304 /* 304 /*
@@ -320,9 +320,9 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
320 } 320 }
321 if (k < mi->nr_blks) 321 if (k < mi->nr_blks)
322 continue; 322 continue;
323 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n", 323 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
324 bi->nid, bi->start, bi->end, bj->start, bj->end, 324 bi->nid, bi->start, bi->end - 1, bj->start,
325 start, end); 325 bj->end - 1, start, end - 1);
326 bi->start = start; 326 bi->start = start;
327 bi->end = end; 327 bi->end = end;
328 numa_remove_memblk_from(j--, mi); 328 numa_remove_memblk_from(j--, mi);
@@ -616,8 +616,8 @@ static int __init dummy_numa_init(void)
616{ 616{
617 printk(KERN_INFO "%s\n", 617 printk(KERN_INFO "%s\n",
618 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 618 numa_off ? "NUMA turned off" : "No NUMA configuration found");
619 printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n", 619 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
620 0LLU, PFN_PHYS(max_pfn)); 620 0LLU, PFN_PHYS(max_pfn) - 1);
621 621
622 node_set(0, numa_nodes_parsed); 622 node_set(0, numa_nodes_parsed);
623 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); 623 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 871dd8868170..dbbbb47260cc 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -68,8 +68,8 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
68 numa_remove_memblk_from(phys_blk, pi); 68 numa_remove_memblk_from(phys_blk, pi);
69 } 69 }
70 70
71 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, 71 printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
72 eb->start, eb->end, (eb->end - eb->start) >> 20); 72 nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
73 return 0; 73 return 0;
74} 74}
75 75
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index bea6e573e02b..3d68ef6d2266 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -225,9 +225,8 @@ static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
225 page = pfn_to_page(pfn); 225 page = pfn_to_page(pfn);
226 type = get_page_memtype(page); 226 type = get_page_memtype(page);
227 if (type != -1) { 227 if (type != -1) {
228 printk(KERN_INFO "reserve_ram_pages_type failed " 228 printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
229 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n", 229 start, end - 1, type, req_type);
230 start, end, type, req_type);
231 if (new_type) 230 if (new_type)
232 *new_type = type; 231 *new_type = type;
233 232
@@ -330,9 +329,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
330 329
331 err = rbt_memtype_check_insert(new, new_type); 330 err = rbt_memtype_check_insert(new, new_type);
332 if (err) { 331 if (err) {
333 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " 332 printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
334 "track %s, req %s\n", 333 start, end - 1,
335 start, end, cattr_name(new->type), cattr_name(req_type)); 334 cattr_name(new->type), cattr_name(req_type));
336 kfree(new); 335 kfree(new);
337 spin_unlock(&memtype_lock); 336 spin_unlock(&memtype_lock);
338 337
@@ -341,8 +340,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
341 340
342 spin_unlock(&memtype_lock); 341 spin_unlock(&memtype_lock);
343 342
344 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", 343 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
345 start, end, cattr_name(new->type), cattr_name(req_type), 344 start, end - 1, cattr_name(new->type), cattr_name(req_type),
346 new_type ? cattr_name(*new_type) : "-"); 345 new_type ? cattr_name(*new_type) : "-");
347 346
348 return err; 347 return err;
@@ -376,14 +375,14 @@ int free_memtype(u64 start, u64 end)
376 spin_unlock(&memtype_lock); 375 spin_unlock(&memtype_lock);
377 376
378 if (!entry) { 377 if (!entry) {
379 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", 378 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
380 current->comm, current->pid, start, end); 379 current->comm, current->pid, start, end - 1);
381 return -EINVAL; 380 return -EINVAL;
382 } 381 }
383 382
384 kfree(entry); 383 kfree(entry);
385 384
386 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); 385 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
387 386
388 return 0; 387 return 0;
389} 388}
@@ -507,9 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
507 506
508 while (cursor < to) { 507 while (cursor < to) {
509 if (!devmem_is_allowed(pfn)) { 508 if (!devmem_is_allowed(pfn)) {
510 printk(KERN_INFO 509 printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
511 "Program %s tried to access /dev/mem between %Lx->%Lx.\n", 510 current->comm, from, to - 1);
512 current->comm, from, to);
513 return 0; 511 return 0;
514 } 512 }
515 cursor += PAGE_SIZE; 513 cursor += PAGE_SIZE;
@@ -570,12 +568,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
570 size; 568 size;
571 569
572 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { 570 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
573 printk(KERN_INFO 571 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
574 "%s:%d ioremap_change_attr failed %s " 572 "for [mem %#010Lx-%#010Lx]\n",
575 "for %Lx-%Lx\n",
576 current->comm, current->pid, 573 current->comm, current->pid,
577 cattr_name(flags), 574 cattr_name(flags),
578 base, (unsigned long long)(base + size)); 575 base, (unsigned long long)(base + size-1));
579 return -EINVAL; 576 return -EINVAL;
580 } 577 }
581 return 0; 578 return 0;
@@ -607,12 +604,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
607 604
608 flags = lookup_memtype(paddr); 605 flags = lookup_memtype(paddr);
609 if (want_flags != flags) { 606 if (want_flags != flags) {
610 printk(KERN_WARNING 607 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
611 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
612 current->comm, current->pid, 608 current->comm, current->pid,
613 cattr_name(want_flags), 609 cattr_name(want_flags),
614 (unsigned long long)paddr, 610 (unsigned long long)paddr,
615 (unsigned long long)(paddr + size), 611 (unsigned long long)(paddr + size - 1),
616 cattr_name(flags)); 612 cattr_name(flags));
617 *vma_prot = __pgprot((pgprot_val(*vma_prot) & 613 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
618 (~_PAGE_CACHE_MASK)) | 614 (~_PAGE_CACHE_MASK)) |
@@ -630,11 +626,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
630 !is_new_memtype_allowed(paddr, size, want_flags, flags)) { 626 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
631 free_memtype(paddr, paddr + size); 627 free_memtype(paddr, paddr + size);
632 printk(KERN_ERR "%s:%d map pfn expected mapping type %s" 628 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
633 " for %Lx-%Lx, got %s\n", 629 " for [mem %#010Lx-%#010Lx], got %s\n",
634 current->comm, current->pid, 630 current->comm, current->pid,
635 cattr_name(want_flags), 631 cattr_name(want_flags),
636 (unsigned long long)paddr, 632 (unsigned long long)paddr,
637 (unsigned long long)(paddr + size), 633 (unsigned long long)(paddr + size - 1),
638 cattr_name(flags)); 634 cattr_name(flags));
639 return -EINVAL; 635 return -EINVAL;
640 } 636 }
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index efb5b4b93711..732af3a96183 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -176,8 +176,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
176 return; 176 return;
177 } 177 }
178 178
179 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, 179 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
180 start, end); 180 node, pxm,
181 (unsigned long long) start, (unsigned long long) end - 1);
181} 182}
182 183
183void __init acpi_numa_arch_fixup(void) {} 184void __init acpi_numa_arch_fixup(void) {}