diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-15 06:18:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-15 06:18:15 -0400 |
commit | dca2d6ac09d9ef59ff46820d4f0c94b08a671202 (patch) | |
tree | fdec753b842dad09e3a4151954fab3eb5c43500d /arch/x86/mm | |
parent | d6a65dffb30d8636b1e5d4c201564ef401a246cf (diff) | |
parent | 18240904960a39e582ced8ba8ececb10b8c22dd3 (diff) |
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts:
arch/x86/kernel/process_64.c
Semantic conflict fixed in:
arch/x86/kvm/x86.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 51 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 72 | ||||
-rw-r--r-- | arch/x86/mm/kmemcheck/kmemcheck.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/physaddr.c | 70 | ||||
-rw-r--r-- | arch/x86/mm/physaddr.h | 10 | ||||
-rw-r--r-- | arch/x86/mm/srat_32.c | 4 |
9 files changed, 126 insertions, 106 deletions
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index eefdeee8a871..9b5a9f59a478 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,5 +1,9 @@ | |||
1 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 1 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
2 | pat.o pgtable.o gup.o | 2 | pat.o pgtable.o physaddr.o gup.o |
3 | |||
4 | # Make sure __phys_addr has no stackprotector | ||
5 | nostackp := $(call cc-option, -fno-stack-protector) | ||
6 | CFLAGS_physaddr.o := $(nostackp) | ||
3 | 7 | ||
4 | obj-$(CONFIG_SMP) += tlb.o | 8 | obj-$(CONFIG_SMP) += tlb.o |
5 | 9 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index bfae139182ff..775a020990a5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -285,26 +285,25 @@ check_v8086_mode(struct pt_regs *regs, unsigned long address, | |||
285 | tsk->thread.screen_bitmap |= 1 << bit; | 285 | tsk->thread.screen_bitmap |= 1 << bit; |
286 | } | 286 | } |
287 | 287 | ||
288 | static void dump_pagetable(unsigned long address) | 288 | static bool low_pfn(unsigned long pfn) |
289 | { | 289 | { |
290 | __typeof__(pte_val(__pte(0))) page; | 290 | return pfn < max_low_pfn; |
291 | } | ||
291 | 292 | ||
292 | page = read_cr3(); | 293 | static void dump_pagetable(unsigned long address) |
293 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | 294 | { |
295 | pgd_t *base = __va(read_cr3()); | ||
296 | pgd_t *pgd = &base[pgd_index(address)]; | ||
297 | pmd_t *pmd; | ||
298 | pte_t *pte; | ||
294 | 299 | ||
295 | #ifdef CONFIG_X86_PAE | 300 | #ifdef CONFIG_X86_PAE |
296 | printk("*pdpt = %016Lx ", page); | 301 | printk("*pdpt = %016Lx ", pgd_val(*pgd)); |
297 | if ((page >> PAGE_SHIFT) < max_low_pfn | 302 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) |
298 | && page & _PAGE_PRESENT) { | 303 | goto out; |
299 | page &= PAGE_MASK; | ||
300 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | ||
301 | & (PTRS_PER_PMD - 1)]; | ||
302 | printk(KERN_CONT "*pde = %016Lx ", page); | ||
303 | page &= ~_PAGE_NX; | ||
304 | } | ||
305 | #else | ||
306 | printk("*pde = %08lx ", page); | ||
307 | #endif | 304 | #endif |
305 | pmd = pmd_offset(pud_offset(pgd, address), address); | ||
306 | printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); | ||
308 | 307 | ||
309 | /* | 308 | /* |
310 | * We must not directly access the pte in the highpte | 309 | * We must not directly access the pte in the highpte |
@@ -312,16 +311,12 @@ static void dump_pagetable(unsigned long address) | |||
312 | * And let's rather not kmap-atomic the pte, just in case | 311 | * And let's rather not kmap-atomic the pte, just in case |
313 | * it's allocated already: | 312 | * it's allocated already: |
314 | */ | 313 | */ |
315 | if ((page >> PAGE_SHIFT) < max_low_pfn | 314 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) |
316 | && (page & _PAGE_PRESENT) | 315 | goto out; |
317 | && !(page & _PAGE_PSE)) { | ||
318 | |||
319 | page &= PAGE_MASK; | ||
320 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | ||
321 | & (PTRS_PER_PTE - 1)]; | ||
322 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); | ||
323 | } | ||
324 | 316 | ||
317 | pte = pte_offset_kernel(pmd, address); | ||
318 | printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); | ||
319 | out: | ||
325 | printk("\n"); | 320 | printk("\n"); |
326 | } | 321 | } |
327 | 322 | ||
@@ -450,16 +445,12 @@ static int bad_address(void *p) | |||
450 | 445 | ||
451 | static void dump_pagetable(unsigned long address) | 446 | static void dump_pagetable(unsigned long address) |
452 | { | 447 | { |
453 | pgd_t *pgd; | 448 | pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
449 | pgd_t *pgd = base + pgd_index(address); | ||
454 | pud_t *pud; | 450 | pud_t *pud; |
455 | pmd_t *pmd; | 451 | pmd_t *pmd; |
456 | pte_t *pte; | 452 | pte_t *pte; |
457 | 453 | ||
458 | pgd = (pgd_t *)read_cr3(); | ||
459 | |||
460 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); | ||
461 | |||
462 | pgd += pgd_index(address); | ||
463 | if (bad_address(pgd)) | 454 | if (bad_address(pgd)) |
464 | goto bad; | 455 | goto bad; |
465 | 456 | ||
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 2112ed55e7ea..63a6ba66cbe0 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -24,7 +24,7 @@ void kunmap(struct page *page) | |||
24 | * no global lock is needed and because the kmap code must perform a global TLB | 24 | * no global lock is needed and because the kmap code must perform a global TLB |
25 | * invalidation when the kmap pool wraps. | 25 | * invalidation when the kmap pool wraps. |
26 | * | 26 | * |
27 | * However when holding an atomic kmap is is not legal to sleep, so atomic | 27 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
28 | * kmaps are appropriate for short, tight code paths only. | 28 | * kmaps are appropriate for short, tight code paths only. |
29 | */ | 29 | */ |
30 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 30 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |
@@ -104,6 +104,7 @@ EXPORT_SYMBOL(kunmap); | |||
104 | EXPORT_SYMBOL(kmap_atomic); | 104 | EXPORT_SYMBOL(kmap_atomic); |
105 | EXPORT_SYMBOL(kunmap_atomic); | 105 | EXPORT_SYMBOL(kunmap_atomic); |
106 | EXPORT_SYMBOL(kmap_atomic_prot); | 106 | EXPORT_SYMBOL(kmap_atomic_prot); |
107 | EXPORT_SYMBOL(kmap_atomic_to_page); | ||
107 | 108 | ||
108 | void __init set_highmem_pages_init(void) | 109 | void __init set_highmem_pages_init(void) |
109 | { | 110 | { |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 8a450930834f..04e1ad60c63a 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -22,77 +22,7 @@ | |||
22 | #include <asm/pgalloc.h> | 22 | #include <asm/pgalloc.h> |
23 | #include <asm/pat.h> | 23 | #include <asm/pat.h> |
24 | 24 | ||
25 | static inline int phys_addr_valid(resource_size_t addr) | 25 | #include "physaddr.h" |
26 | { | ||
27 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
28 | return !(addr >> boot_cpu_data.x86_phys_bits); | ||
29 | #else | ||
30 | return 1; | ||
31 | #endif | ||
32 | } | ||
33 | |||
34 | #ifdef CONFIG_X86_64 | ||
35 | |||
36 | unsigned long __phys_addr(unsigned long x) | ||
37 | { | ||
38 | if (x >= __START_KERNEL_map) { | ||
39 | x -= __START_KERNEL_map; | ||
40 | VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE); | ||
41 | x += phys_base; | ||
42 | } else { | ||
43 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); | ||
44 | x -= PAGE_OFFSET; | ||
45 | VIRTUAL_BUG_ON(!phys_addr_valid(x)); | ||
46 | } | ||
47 | return x; | ||
48 | } | ||
49 | EXPORT_SYMBOL(__phys_addr); | ||
50 | |||
51 | bool __virt_addr_valid(unsigned long x) | ||
52 | { | ||
53 | if (x >= __START_KERNEL_map) { | ||
54 | x -= __START_KERNEL_map; | ||
55 | if (x >= KERNEL_IMAGE_SIZE) | ||
56 | return false; | ||
57 | x += phys_base; | ||
58 | } else { | ||
59 | if (x < PAGE_OFFSET) | ||
60 | return false; | ||
61 | x -= PAGE_OFFSET; | ||
62 | if (!phys_addr_valid(x)) | ||
63 | return false; | ||
64 | } | ||
65 | |||
66 | return pfn_valid(x >> PAGE_SHIFT); | ||
67 | } | ||
68 | EXPORT_SYMBOL(__virt_addr_valid); | ||
69 | |||
70 | #else | ||
71 | |||
72 | #ifdef CONFIG_DEBUG_VIRTUAL | ||
73 | unsigned long __phys_addr(unsigned long x) | ||
74 | { | ||
75 | /* VMALLOC_* aren't constants */ | ||
76 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); | ||
77 | VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); | ||
78 | return x - PAGE_OFFSET; | ||
79 | } | ||
80 | EXPORT_SYMBOL(__phys_addr); | ||
81 | #endif | ||
82 | |||
83 | bool __virt_addr_valid(unsigned long x) | ||
84 | { | ||
85 | if (x < PAGE_OFFSET) | ||
86 | return false; | ||
87 | if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) | ||
88 | return false; | ||
89 | if (x >= FIXADDR_START) | ||
90 | return false; | ||
91 | return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); | ||
92 | } | ||
93 | EXPORT_SYMBOL(__virt_addr_valid); | ||
94 | |||
95 | #endif | ||
96 | 26 | ||
97 | int page_is_ram(unsigned long pagenr) | 27 | int page_is_ram(unsigned long pagenr) |
98 | { | 28 | { |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 2c55ed098654..528bf954eb74 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs, | |||
331 | kmemcheck_shadow_set(shadow, size); | 331 | kmemcheck_shadow_set(shadow, size); |
332 | } | 332 | } |
333 | 333 | ||
334 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) | ||
335 | { | ||
336 | enum kmemcheck_shadow status; | ||
337 | void *shadow; | ||
338 | |||
339 | shadow = kmemcheck_shadow_lookup(addr); | ||
340 | if (!shadow) | ||
341 | return true; | ||
342 | |||
343 | status = kmemcheck_shadow_test(shadow, size); | ||
344 | |||
345 | return status == KMEMCHECK_SHADOW_INITIALIZED; | ||
346 | } | ||
347 | |||
334 | /* Access may cross page boundary */ | 348 | /* Access may cross page boundary */ |
335 | static void kmemcheck_read(struct pt_regs *regs, | 349 | static void kmemcheck_read(struct pt_regs *regs, |
336 | unsigned long addr, unsigned int size) | 350 | unsigned long addr, unsigned int size) |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 352aa9e927e2..b2f7d3e59b86 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -827,7 +827,7 @@ static int memtype_seq_show(struct seq_file *seq, void *v) | |||
827 | return 0; | 827 | return 0; |
828 | } | 828 | } |
829 | 829 | ||
830 | static struct seq_operations memtype_seq_ops = { | 830 | static const struct seq_operations memtype_seq_ops = { |
831 | .start = memtype_seq_start, | 831 | .start = memtype_seq_start, |
832 | .next = memtype_seq_next, | 832 | .next = memtype_seq_next, |
833 | .stop = memtype_seq_stop, | 833 | .stop = memtype_seq_stop, |
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c new file mode 100644 index 000000000000..d2e2735327b4 --- /dev/null +++ b/arch/x86/mm/physaddr.c | |||
@@ -0,0 +1,70 @@ | |||
1 | #include <linux/mmdebug.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/mm.h> | ||
4 | |||
5 | #include <asm/page.h> | ||
6 | |||
7 | #include "physaddr.h" | ||
8 | |||
9 | #ifdef CONFIG_X86_64 | ||
10 | |||
11 | unsigned long __phys_addr(unsigned long x) | ||
12 | { | ||
13 | if (x >= __START_KERNEL_map) { | ||
14 | x -= __START_KERNEL_map; | ||
15 | VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE); | ||
16 | x += phys_base; | ||
17 | } else { | ||
18 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); | ||
19 | x -= PAGE_OFFSET; | ||
20 | VIRTUAL_BUG_ON(!phys_addr_valid(x)); | ||
21 | } | ||
22 | return x; | ||
23 | } | ||
24 | EXPORT_SYMBOL(__phys_addr); | ||
25 | |||
26 | bool __virt_addr_valid(unsigned long x) | ||
27 | { | ||
28 | if (x >= __START_KERNEL_map) { | ||
29 | x -= __START_KERNEL_map; | ||
30 | if (x >= KERNEL_IMAGE_SIZE) | ||
31 | return false; | ||
32 | x += phys_base; | ||
33 | } else { | ||
34 | if (x < PAGE_OFFSET) | ||
35 | return false; | ||
36 | x -= PAGE_OFFSET; | ||
37 | if (!phys_addr_valid(x)) | ||
38 | return false; | ||
39 | } | ||
40 | |||
41 | return pfn_valid(x >> PAGE_SHIFT); | ||
42 | } | ||
43 | EXPORT_SYMBOL(__virt_addr_valid); | ||
44 | |||
45 | #else | ||
46 | |||
47 | #ifdef CONFIG_DEBUG_VIRTUAL | ||
48 | unsigned long __phys_addr(unsigned long x) | ||
49 | { | ||
50 | /* VMALLOC_* aren't constants */ | ||
51 | VIRTUAL_BUG_ON(x < PAGE_OFFSET); | ||
52 | VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); | ||
53 | return x - PAGE_OFFSET; | ||
54 | } | ||
55 | EXPORT_SYMBOL(__phys_addr); | ||
56 | #endif | ||
57 | |||
58 | bool __virt_addr_valid(unsigned long x) | ||
59 | { | ||
60 | if (x < PAGE_OFFSET) | ||
61 | return false; | ||
62 | if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) | ||
63 | return false; | ||
64 | if (x >= FIXADDR_START) | ||
65 | return false; | ||
66 | return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); | ||
67 | } | ||
68 | EXPORT_SYMBOL(__virt_addr_valid); | ||
69 | |||
70 | #endif /* CONFIG_X86_64 */ | ||
diff --git a/arch/x86/mm/physaddr.h b/arch/x86/mm/physaddr.h new file mode 100644 index 000000000000..a3cd5a0c97b3 --- /dev/null +++ b/arch/x86/mm/physaddr.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #include <asm/processor.h> | ||
2 | |||
3 | static inline int phys_addr_valid(resource_size_t addr) | ||
4 | { | ||
5 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
6 | return !(addr >> boot_cpu_data.x86_phys_bits); | ||
7 | #else | ||
8 | return 1; | ||
9 | #endif | ||
10 | } | ||
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 29a0e37114f8..6f8aa33031c7 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c | |||
@@ -215,7 +215,7 @@ int __init get_memcfg_from_srat(void) | |||
215 | goto out_fail; | 215 | goto out_fail; |
216 | 216 | ||
217 | if (num_memory_chunks == 0) { | 217 | if (num_memory_chunks == 0) { |
218 | printk(KERN_WARNING | 218 | printk(KERN_DEBUG |
219 | "could not find any ACPI SRAT memory areas.\n"); | 219 | "could not find any ACPI SRAT memory areas.\n"); |
220 | goto out_fail; | 220 | goto out_fail; |
221 | } | 221 | } |
@@ -277,7 +277,7 @@ int __init get_memcfg_from_srat(void) | |||
277 | } | 277 | } |
278 | return 1; | 278 | return 1; |
279 | out_fail: | 279 | out_fail: |
280 | printk(KERN_ERR "failed to get NUMA memory information from SRAT" | 280 | printk(KERN_DEBUG "failed to get NUMA memory information from SRAT" |
281 | " table\n"); | 281 | " table\n"); |
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |