diff options
-rw-r--r-- | arch/x86/mm/kmmio.c | 16 | ||||
-rw-r--r-- | arch/x86/mm/testmmiotrace.c | 22 |
2 files changed, 35 insertions, 3 deletions
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 5d0e67fff1a..e5d5e2ce9f7 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -45,6 +45,8 @@ struct kmmio_fault_page { | |||
45 | * Protected by kmmio_lock, when linked into kmmio_page_table. | 45 | * Protected by kmmio_lock, when linked into kmmio_page_table. |
46 | */ | 46 | */ |
47 | int count; | 47 | int count; |
48 | |||
49 | bool scheduled_for_release; | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | struct kmmio_delayed_release { | 52 | struct kmmio_delayed_release { |
@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page, | |||
398 | BUG_ON(f->count < 0); | 400 | BUG_ON(f->count < 0); |
399 | if (!f->count) { | 401 | if (!f->count) { |
400 | disarm_kmmio_fault_page(f); | 402 | disarm_kmmio_fault_page(f); |
401 | f->release_next = *release_list; | 403 | if (!f->scheduled_for_release) { |
402 | *release_list = f; | 404 | f->release_next = *release_list; |
405 | *release_list = f; | ||
406 | f->scheduled_for_release = true; | ||
407 | } | ||
403 | } | 408 | } |
404 | } | 409 | } |
405 | 410 | ||
@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) | |||
471 | prevp = &f->release_next; | 476 | prevp = &f->release_next; |
472 | } else { | 477 | } else { |
473 | *prevp = f->release_next; | 478 | *prevp = f->release_next; |
479 | f->release_next = NULL; | ||
480 | f->scheduled_for_release = false; | ||
474 | } | 481 | } |
475 | f = f->release_next; | 482 | f = *prevp; |
476 | } | 483 | } |
477 | spin_unlock_irqrestore(&kmmio_lock, flags); | 484 | spin_unlock_irqrestore(&kmmio_lock, flags); |
478 | 485 | ||
@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p) | |||
510 | kmmio_count--; | 517 | kmmio_count--; |
511 | spin_unlock_irqrestore(&kmmio_lock, flags); | 518 | spin_unlock_irqrestore(&kmmio_lock, flags); |
512 | 519 | ||
520 | if (!release_list) | ||
521 | return; | ||
522 | |||
513 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); | 523 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); |
514 | if (!drelease) { | 524 | if (!drelease) { |
515 | pr_crit("leaking kmmio_fault_page objects.\n"); | 525 | pr_crit("leaking kmmio_fault_page objects.\n"); |
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index 8565d944f7c..38868adf07e 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c | |||
@@ -90,6 +90,27 @@ static void do_test(unsigned long size) | |||
90 | iounmap(p); | 90 | iounmap(p); |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | ||
94 | * Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in | ||
95 | * a short time. We had a bug in deferred freeing procedure which tried | ||
96 | * to free this region multiple times (ioremap can reuse the same address | ||
97 | * for many mappings). | ||
98 | */ | ||
99 | static void do_test_bulk_ioremapping(void) | ||
100 | { | ||
101 | void __iomem *p; | ||
102 | int i; | ||
103 | |||
104 | for (i = 0; i < 10; ++i) { | ||
105 | p = ioremap_nocache(mmio_address, PAGE_SIZE); | ||
106 | if (p) | ||
107 | iounmap(p); | ||
108 | } | ||
109 | |||
110 | /* Force freeing. If it will crash we will know why. */ | ||
111 | synchronize_rcu(); | ||
112 | } | ||
113 | |||
93 | static int __init init(void) | 114 | static int __init init(void) |
94 | { | 115 | { |
95 | unsigned long size = (read_far) ? (8 << 20) : (16 << 10); | 116 | unsigned long size = (read_far) ? (8 << 20) : (16 << 10); |
@@ -104,6 +125,7 @@ static int __init init(void) | |||
104 | "and writing 16 kB of rubbish in there.\n", | 125 | "and writing 16 kB of rubbish in there.\n", |
105 | size >> 10, mmio_address); | 126 | size >> 10, mmio_address); |
106 | do_test(size); | 127 | do_test(size); |
128 | do_test_bulk_ioremapping(); | ||
107 | pr_info("All done.\n"); | 129 | pr_info("All done.\n"); |
108 | return 0; | 130 | return 0; |
109 | } | 131 | } |