diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-11-25 10:52:51 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-11-25 10:52:51 -0500 |
commit | 5edf71ae129167ac276ebac18b25ccc7bec6ac3e (patch) | |
tree | 619daac39d08dbf66fb19698205b263e21bba3d0 /arch/arm/mm | |
parent | 3c0bdac3875505516eda1c6b6e68dd84eff3b231 (diff) |
[ARM] Do not call flush_tlb_kernel_range() with IRQs disabled.
We must not call TLB maintainence operations with interrupts disabled,
otherwise we risk a lockup in the SMP IPI code.
This means that consistent_free() can not be called from a context with
IRQs disabled. In addition, we must not hold the lock in consistent_free
when we call flush_tlb_kernel_range(). However, we must continue to
prevent consistent_alloc() from re-using the memory region until we've
finished tearing down the mapping and dealing with the TLB.
Therefore, leave the vm_region entry in the list, but mark it inactive
before dropping the lock and starting the tear-down process. After the
mapping has been torn down, re-acquire the lock and remove the entry
from the list.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/consistent.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index 47b0b767f080..dbfe9e891f01 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c | |||
@@ -66,6 +66,7 @@ struct vm_region { | |||
66 | unsigned long vm_start; | 66 | unsigned long vm_start; |
67 | unsigned long vm_end; | 67 | unsigned long vm_end; |
68 | struct page *vm_pages; | 68 | struct page *vm_pages; |
69 | int vm_active; | ||
69 | }; | 70 | }; |
70 | 71 | ||
71 | static struct vm_region consistent_head = { | 72 | static struct vm_region consistent_head = { |
@@ -104,6 +105,7 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) | |||
104 | list_add_tail(&new->vm_list, &c->vm_list); | 105 | list_add_tail(&new->vm_list, &c->vm_list); |
105 | new->vm_start = addr; | 106 | new->vm_start = addr; |
106 | new->vm_end = addr + size; | 107 | new->vm_end = addr + size; |
108 | new->vm_active = 1; | ||
107 | 109 | ||
108 | spin_unlock_irqrestore(&consistent_lock, flags); | 110 | spin_unlock_irqrestore(&consistent_lock, flags); |
109 | return new; | 111 | return new; |
@@ -120,7 +122,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad | |||
120 | struct vm_region *c; | 122 | struct vm_region *c; |
121 | 123 | ||
122 | list_for_each_entry(c, &head->vm_list, vm_list) { | 124 | list_for_each_entry(c, &head->vm_list, vm_list) { |
123 | if (c->vm_start == addr) | 125 | if (c->vm_active && c->vm_start == addr) |
124 | goto out; | 126 | goto out; |
125 | } | 127 | } |
126 | c = NULL; | 128 | c = NULL; |
@@ -319,6 +321,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine); | |||
319 | 321 | ||
320 | /* | 322 | /* |
321 | * free a page as defined by the above mapping. | 323 | * free a page as defined by the above mapping. |
324 | * Must not be called with IRQs disabled. | ||
322 | */ | 325 | */ |
323 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 326 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
324 | { | 327 | { |
@@ -326,14 +329,18 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
326 | unsigned long flags, addr; | 329 | unsigned long flags, addr; |
327 | pte_t *ptep; | 330 | pte_t *ptep; |
328 | 331 | ||
332 | WARN_ON(irqs_disabled()); | ||
333 | |||
329 | size = PAGE_ALIGN(size); | 334 | size = PAGE_ALIGN(size); |
330 | 335 | ||
331 | spin_lock_irqsave(&consistent_lock, flags); | 336 | spin_lock_irqsave(&consistent_lock, flags); |
332 | |||
333 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 337 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); |
334 | if (!c) | 338 | if (!c) |
335 | goto no_area; | 339 | goto no_area; |
336 | 340 | ||
341 | c->vm_active = 0; | ||
342 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
343 | |||
337 | if ((c->vm_end - c->vm_start) != size) { | 344 | if ((c->vm_end - c->vm_start) != size) { |
338 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | 345 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", |
339 | __func__, c->vm_end - c->vm_start, size); | 346 | __func__, c->vm_end - c->vm_start, size); |
@@ -372,8 +379,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
372 | 379 | ||
373 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | 380 | flush_tlb_kernel_range(c->vm_start, c->vm_end); |
374 | 381 | ||
382 | spin_lock_irqsave(&consistent_lock, flags); | ||
375 | list_del(&c->vm_list); | 383 | list_del(&c->vm_list); |
376 | |||
377 | spin_unlock_irqrestore(&consistent_lock, flags); | 384 | spin_unlock_irqrestore(&consistent_lock, flags); |
378 | 385 | ||
379 | kfree(c); | 386 | kfree(c); |