aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPekka Paalanen <pq@iki.fi>2008-05-12 15:21:03 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-24 05:27:32 -0400
commit87e547fe41a8b57d6d80afc67a0031fbe477eb0d (patch)
tree96b34f7c11268ebb58a470faf858132a69b82639 /arch
parent970e6fa03885f32cc43e42cb08c73a5f54cd8bd9 (diff)
x86 mmiotrace: fix page-unaligned ioremaps
mmiotrace_ioremap() expects to receive the original unaligned map phys address and size. Also fix {un,}register_kmmio_probe() to deal properly with unaligned size. Signed-off-by: Pekka Paalanen <pq@iki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/kmmio.c13
-rw-r--r--arch/x86/mm/mmio-mod.c1
3 files changed, 15 insertions, 3 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 8927c878544d..a7c80a6e8622 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -123,6 +123,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
123{ 123{
124 unsigned long pfn, offset, vaddr; 124 unsigned long pfn, offset, vaddr;
125 resource_size_t last_addr; 125 resource_size_t last_addr;
126 const resource_size_t unaligned_phys_addr = phys_addr;
127 const unsigned long unaligned_size = size;
126 struct vm_struct *area; 128 struct vm_struct *area;
127 unsigned long new_prot_val; 129 unsigned long new_prot_val;
128 pgprot_t prot; 130 pgprot_t prot;
@@ -236,7 +238,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
236 } 238 }
237 239
238 ret_addr = (void __iomem *) (vaddr + offset); 240 ret_addr = (void __iomem *) (vaddr + offset);
239 mmiotrace_ioremap(phys_addr, size, ret_addr); 241 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
240 242
241 return ret_addr; 243 return ret_addr;
242} 244}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 6a92d9111b64..93b1797666cb 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -351,11 +351,19 @@ static void release_kmmio_fault_page(unsigned long page,
351 } 351 }
352} 352}
353 353
354/*
355 * With page-unaligned ioremaps, one or two armed pages may contain
356 * addresses from outside the intended mapping. Events for these addresses
357 * are currently silently dropped. The events may result only from programming
358 * mistakes by accessing addresses before the beginning or past the end of a
359 * mapping.
360 */
354int register_kmmio_probe(struct kmmio_probe *p) 361int register_kmmio_probe(struct kmmio_probe *p)
355{ 362{
356 unsigned long flags; 363 unsigned long flags;
357 int ret = 0; 364 int ret = 0;
358 unsigned long size = 0; 365 unsigned long size = 0;
366 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
359 367
360 spin_lock_irqsave(&kmmio_lock, flags); 368 spin_lock_irqsave(&kmmio_lock, flags);
361 if (get_kmmio_probe(p->addr)) { 369 if (get_kmmio_probe(p->addr)) {
@@ -364,7 +372,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
364 } 372 }
365 kmmio_count++; 373 kmmio_count++;
366 list_add_rcu(&p->list, &kmmio_probes); 374 list_add_rcu(&p->list, &kmmio_probes);
367 while (size < p->len) { 375 while (size < size_lim) {
368 if (add_kmmio_fault_page(p->addr + size)) 376 if (add_kmmio_fault_page(p->addr + size))
369 pr_err("kmmio: Unable to set page fault.\n"); 377 pr_err("kmmio: Unable to set page fault.\n");
370 size += PAGE_SIZE; 378 size += PAGE_SIZE;
@@ -436,11 +444,12 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
436{ 444{
437 unsigned long flags; 445 unsigned long flags;
438 unsigned long size = 0; 446 unsigned long size = 0;
447 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
439 struct kmmio_fault_page *release_list = NULL; 448 struct kmmio_fault_page *release_list = NULL;
440 struct kmmio_delayed_release *drelease; 449 struct kmmio_delayed_release *drelease;
441 450
442 spin_lock_irqsave(&kmmio_lock, flags); 451 spin_lock_irqsave(&kmmio_lock, flags);
443 while (size < p->len) { 452 while (size < size_lim) {
444 release_kmmio_fault_page(p->addr + size, &release_list); 453 release_kmmio_fault_page(p->addr + size, &release_list);
445 size += PAGE_SIZE; 454 size += PAGE_SIZE;
446 } 455 }
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index a8d2a0019da4..278998c1998f 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -280,6 +280,7 @@ static void ioremap_trace_core(unsigned long offset, unsigned long size,
280{ 280{
281 static atomic_t next_id; 281 static atomic_t next_id;
282 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); 282 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
283 /* These are page-unaligned. */
283 struct mmiotrace_map map = { 284 struct mmiotrace_map map = {
284 .phys = offset, 285 .phys = offset,
285 .virt = (unsigned long)addr, 286 .virt = (unsigned long)addr,