diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/mm/dump_pagetables.c | 32 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/kmmio.c | 16 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/testmmiotrace.c | 22 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 4 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 2 | ||||
-rw-r--r-- | arch/x86/vdso/vma.c | 2 |
8 files changed, 71 insertions, 23 deletions
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a725b7f760ae..0002a3a33081 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -37,6 +37,28 @@ struct addr_marker { | |||
37 | const char *name; | 37 | const char *name; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | /* indices for address_markers; keep sync'd w/ address_markers below */ | ||
41 | enum address_markers_idx { | ||
42 | USER_SPACE_NR = 0, | ||
43 | #ifdef CONFIG_X86_64 | ||
44 | KERNEL_SPACE_NR, | ||
45 | LOW_KERNEL_NR, | ||
46 | VMALLOC_START_NR, | ||
47 | VMEMMAP_START_NR, | ||
48 | HIGH_KERNEL_NR, | ||
49 | MODULES_VADDR_NR, | ||
50 | MODULES_END_NR, | ||
51 | #else | ||
52 | KERNEL_SPACE_NR, | ||
53 | VMALLOC_START_NR, | ||
54 | VMALLOC_END_NR, | ||
55 | # ifdef CONFIG_HIGHMEM | ||
56 | PKMAP_BASE_NR, | ||
57 | # endif | ||
58 | FIXADDR_START_NR, | ||
59 | #endif | ||
60 | }; | ||
61 | |||
40 | /* Address space markers hints */ | 62 | /* Address space markers hints */ |
41 | static struct addr_marker address_markers[] = { | 63 | static struct addr_marker address_markers[] = { |
42 | { 0, "User Space" }, | 64 | { 0, "User Space" }, |
@@ -331,14 +353,12 @@ static int pt_dump_init(void) | |||
331 | 353 | ||
332 | #ifdef CONFIG_X86_32 | 354 | #ifdef CONFIG_X86_32 |
333 | /* Not a compile-time constant on x86-32 */ | 355 | /* Not a compile-time constant on x86-32 */ |
334 | address_markers[2].start_address = VMALLOC_START; | 356 | address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; |
335 | address_markers[3].start_address = VMALLOC_END; | 357 | address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; |
336 | # ifdef CONFIG_HIGHMEM | 358 | # ifdef CONFIG_HIGHMEM |
337 | address_markers[4].start_address = PKMAP_BASE; | 359 | address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; |
338 | address_markers[5].start_address = FIXADDR_START; | ||
339 | # else | ||
340 | address_markers[4].start_address = FIXADDR_START; | ||
341 | # endif | 360 | # endif |
361 | address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; | ||
342 | #endif | 362 | #endif |
343 | 363 | ||
344 | pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL, | 364 | pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL, |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 12e4d2d3c110..3ba6e0608c55 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -62,8 +62,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, | |||
62 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, | 62 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
63 | unsigned long size, unsigned long prot_val, void *caller) | 63 | unsigned long size, unsigned long prot_val, void *caller) |
64 | { | 64 | { |
65 | unsigned long pfn, offset, vaddr; | 65 | unsigned long offset, vaddr; |
66 | resource_size_t last_addr; | 66 | resource_size_t pfn, last_pfn, last_addr; |
67 | const resource_size_t unaligned_phys_addr = phys_addr; | 67 | const resource_size_t unaligned_phys_addr = phys_addr; |
68 | const unsigned long unaligned_size = size; | 68 | const unsigned long unaligned_size = size; |
69 | struct vm_struct *area; | 69 | struct vm_struct *area; |
@@ -100,10 +100,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
100 | /* | 100 | /* |
101 | * Don't allow anybody to remap normal RAM that we're using.. | 101 | * Don't allow anybody to remap normal RAM that we're using.. |
102 | */ | 102 | */ |
103 | for (pfn = phys_addr >> PAGE_SHIFT; | 103 | last_pfn = last_addr >> PAGE_SHIFT; |
104 | (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); | 104 | for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { |
105 | pfn++) { | ||
106 | |||
107 | int is_ram = page_is_ram(pfn); | 105 | int is_ram = page_is_ram(pfn); |
108 | 106 | ||
109 | if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) | 107 | if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) |
@@ -115,7 +113,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
115 | * Mappings have to be page-aligned | 113 | * Mappings have to be page-aligned |
116 | */ | 114 | */ |
117 | offset = phys_addr & ~PAGE_MASK; | 115 | offset = phys_addr & ~PAGE_MASK; |
118 | phys_addr &= PAGE_MASK; | 116 | phys_addr &= PHYSICAL_PAGE_MASK; |
119 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 117 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
120 | 118 | ||
121 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, | 119 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
@@ -613,7 +611,7 @@ void __init early_iounmap(void __iomem *addr, unsigned long size) | |||
613 | return; | 611 | return; |
614 | } | 612 | } |
615 | offset = virt_addr & ~PAGE_MASK; | 613 | offset = virt_addr & ~PAGE_MASK; |
616 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; | 614 | nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; |
617 | 615 | ||
618 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; | 616 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
619 | while (nrpages > 0) { | 617 | while (nrpages > 0) { |
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 5d0e67fff1a6..e5d5e2ce9f77 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -45,6 +45,8 @@ struct kmmio_fault_page { | |||
45 | * Protected by kmmio_lock, when linked into kmmio_page_table. | 45 | * Protected by kmmio_lock, when linked into kmmio_page_table. |
46 | */ | 46 | */ |
47 | int count; | 47 | int count; |
48 | |||
49 | bool scheduled_for_release; | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | struct kmmio_delayed_release { | 52 | struct kmmio_delayed_release { |
@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page, | |||
398 | BUG_ON(f->count < 0); | 400 | BUG_ON(f->count < 0); |
399 | if (!f->count) { | 401 | if (!f->count) { |
400 | disarm_kmmio_fault_page(f); | 402 | disarm_kmmio_fault_page(f); |
401 | f->release_next = *release_list; | 403 | if (!f->scheduled_for_release) { |
402 | *release_list = f; | 404 | f->release_next = *release_list; |
405 | *release_list = f; | ||
406 | f->scheduled_for_release = true; | ||
407 | } | ||
403 | } | 408 | } |
404 | } | 409 | } |
405 | 410 | ||
@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) | |||
471 | prevp = &f->release_next; | 476 | prevp = &f->release_next; |
472 | } else { | 477 | } else { |
473 | *prevp = f->release_next; | 478 | *prevp = f->release_next; |
479 | f->release_next = NULL; | ||
480 | f->scheduled_for_release = false; | ||
474 | } | 481 | } |
475 | f = f->release_next; | 482 | f = *prevp; |
476 | } | 483 | } |
477 | spin_unlock_irqrestore(&kmmio_lock, flags); | 484 | spin_unlock_irqrestore(&kmmio_lock, flags); |
478 | 485 | ||
@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p) | |||
510 | kmmio_count--; | 517 | kmmio_count--; |
511 | spin_unlock_irqrestore(&kmmio_lock, flags); | 518 | spin_unlock_irqrestore(&kmmio_lock, flags); |
512 | 519 | ||
520 | if (!release_list) | ||
521 | return; | ||
522 | |||
513 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); | 523 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); |
514 | if (!drelease) { | 524 | if (!drelease) { |
515 | pr_crit("leaking kmmio_fault_page objects.\n"); | 525 | pr_crit("leaking kmmio_fault_page objects.\n"); |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 64121a18b8cb..f6ff57b7efa5 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -158,7 +158,7 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) | |||
158 | return req_type; | 158 | return req_type; |
159 | } | 159 | } |
160 | 160 | ||
161 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) | 161 | static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) |
162 | { | 162 | { |
163 | int ram_page = 0, not_rampage = 0; | 163 | int ram_page = 0, not_rampage = 0; |
164 | unsigned long page_nr; | 164 | unsigned long page_nr; |
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index 8565d944f7cf..38868adf07ea 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c | |||
@@ -90,6 +90,27 @@ static void do_test(unsigned long size) | |||
90 | iounmap(p); | 90 | iounmap(p); |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | ||
94 | * Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in | ||
95 | * a short time. We had a bug in deferred freeing procedure which tried | ||
96 | * to free this region multiple times (ioremap can reuse the same address | ||
97 | * for many mappings). | ||
98 | */ | ||
99 | static void do_test_bulk_ioremapping(void) | ||
100 | { | ||
101 | void __iomem *p; | ||
102 | int i; | ||
103 | |||
104 | for (i = 0; i < 10; ++i) { | ||
105 | p = ioremap_nocache(mmio_address, PAGE_SIZE); | ||
106 | if (p) | ||
107 | iounmap(p); | ||
108 | } | ||
109 | |||
110 | /* Force freeing. If it will crash we will know why. */ | ||
111 | synchronize_rcu(); | ||
112 | } | ||
113 | |||
93 | static int __init init(void) | 114 | static int __init init(void) |
94 | { | 115 | { |
95 | unsigned long size = (read_far) ? (8 << 20) : (16 << 10); | 116 | unsigned long size = (read_far) ? (8 << 20) : (16 << 10); |
@@ -104,6 +125,7 @@ static int __init init(void) | |||
104 | "and writing 16 kB of rubbish in there.\n", | 125 | "and writing 16 kB of rubbish in there.\n", |
105 | size >> 10, mmio_address); | 126 | size >> 10, mmio_address); |
106 | do_test(size); | 127 | do_test(size); |
128 | do_test_bulk_ioremapping(); | ||
107 | pr_info("All done.\n"); | 129 | pr_info("All done.\n"); |
108 | return 0; | 130 | return 0; |
109 | } | 131 | } |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 426f3a1a64d3..c03f14ab6667 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -278,11 +278,9 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
278 | 278 | ||
279 | static void do_flush_tlb_all(void *info) | 279 | static void do_flush_tlb_all(void *info) |
280 | { | 280 | { |
281 | unsigned long cpu = smp_processor_id(); | ||
282 | |||
283 | __flush_tlb_all(); | 281 | __flush_tlb_all(); |
284 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) | 282 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
285 | leave_mm(cpu); | 283 | leave_mm(smp_processor_id()); |
286 | } | 284 | } |
287 | 285 | ||
288 | void flush_tlb_all(void) | 286 | void flush_tlb_all(void) |
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 02b442e92007..36df991985b2 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -374,7 +374,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
374 | 374 | ||
375 | #ifdef CONFIG_X86_64 | 375 | #ifdef CONFIG_X86_64 |
376 | 376 | ||
377 | __initcall(sysenter_setup); | 377 | subsys_initcall(sysenter_setup); |
378 | 378 | ||
379 | #ifdef CONFIG_SYSCTL | 379 | #ifdef CONFIG_SYSCTL |
380 | /* Register vsyscall32 into the ABI table */ | 380 | /* Register vsyscall32 into the ABI table */ |
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index ac74869b8140..43456ee17692 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -74,7 +74,7 @@ static int __init init_vdso_vars(void) | |||
74 | vdso_enabled = 0; | 74 | vdso_enabled = 0; |
75 | return -ENOMEM; | 75 | return -ENOMEM; |
76 | } | 76 | } |
77 | __initcall(init_vdso_vars); | 77 | subsys_initcall(init_vdso_vars); |
78 | 78 | ||
79 | struct linux_binprm; | 79 | struct linux_binprm; |
80 | 80 | ||