diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2011-02-08 17:37:42 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2011-02-24 13:58:41 -0500 |
commit | 7bbb890358b96cb6f77adc6815f2072bdf813d5d (patch) | |
tree | 56d110312dd7ee4b5a49ae48bd9845ff95a5e7bb | |
parent | 073b4964b3b75fd9e19bf3933b26d9c23591c9db (diff) |
ACPI: Change acpi_ioremap_lock into a mutex
There's no reason why acpi_ioremap_lock has to be a spinlock,
because all of the functions it is used in may sleep anyway and
there's no reason why it should be locked with interrupts off.
Use a mutex instead (that's going to allow us to put some more
operations under the lock later).
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
-rw-r--r-- | drivers/acpi/osl.c | 21 |
1 files changed, 9 insertions, 12 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index ff2189d3fa06..97061928249a 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -109,7 +109,7 @@ struct acpi_ioremap { | |||
109 | }; | 109 | }; |
110 | 110 | ||
111 | static LIST_HEAD(acpi_ioremaps); | 111 | static LIST_HEAD(acpi_ioremaps); |
112 | static DEFINE_SPINLOCK(acpi_ioremap_lock); | 112 | static DEFINE_MUTEX(acpi_ioremap_lock); |
113 | 113 | ||
114 | static void __init acpi_osi_setup_late(void); | 114 | static void __init acpi_osi_setup_late(void); |
115 | 115 | ||
@@ -303,7 +303,6 @@ void __iomem *__init_refok | |||
303 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | 303 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size) |
304 | { | 304 | { |
305 | struct acpi_ioremap *map, *tmp_map; | 305 | struct acpi_ioremap *map, *tmp_map; |
306 | unsigned long flags; | ||
307 | void __iomem *virt; | 306 | void __iomem *virt; |
308 | acpi_physical_address pg_off; | 307 | acpi_physical_address pg_off; |
309 | acpi_size pg_sz; | 308 | acpi_size pg_sz; |
@@ -334,18 +333,18 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | |||
334 | map->size = pg_sz; | 333 | map->size = pg_sz; |
335 | kref_init(&map->ref); | 334 | kref_init(&map->ref); |
336 | 335 | ||
337 | spin_lock_irqsave(&acpi_ioremap_lock, flags); | 336 | mutex_lock(&acpi_ioremap_lock); |
338 | /* Check if page has already been mapped. */ | 337 | /* Check if page has already been mapped. */ |
339 | tmp_map = acpi_map_lookup(phys, size); | 338 | tmp_map = acpi_map_lookup(phys, size); |
340 | if (tmp_map) { | 339 | if (tmp_map) { |
341 | kref_get(&tmp_map->ref); | 340 | kref_get(&tmp_map->ref); |
342 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); | 341 | mutex_unlock(&acpi_ioremap_lock); |
343 | iounmap(map->virt); | 342 | iounmap(map->virt); |
344 | kfree(map); | 343 | kfree(map); |
345 | return tmp_map->virt + (phys - tmp_map->phys); | 344 | return tmp_map->virt + (phys - tmp_map->phys); |
346 | } | 345 | } |
347 | list_add_tail_rcu(&map->list, &acpi_ioremaps); | 346 | list_add_tail_rcu(&map->list, &acpi_ioremaps); |
348 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); | 347 | mutex_unlock(&acpi_ioremap_lock); |
349 | 348 | ||
350 | return map->virt + (phys - map->phys); | 349 | return map->virt + (phys - map->phys); |
351 | } | 350 | } |
@@ -362,7 +361,6 @@ static void acpi_kref_del_iomap(struct kref *ref) | |||
362 | void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | 361 | void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) |
363 | { | 362 | { |
364 | struct acpi_ioremap *map; | 363 | struct acpi_ioremap *map; |
365 | unsigned long flags; | ||
366 | int del; | 364 | int del; |
367 | 365 | ||
368 | if (!acpi_gbl_permanent_mmap) { | 366 | if (!acpi_gbl_permanent_mmap) { |
@@ -370,17 +368,17 @@ void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | |||
370 | return; | 368 | return; |
371 | } | 369 | } |
372 | 370 | ||
373 | spin_lock_irqsave(&acpi_ioremap_lock, flags); | 371 | mutex_lock(&acpi_ioremap_lock); |
374 | map = acpi_map_lookup_virt(virt, size); | 372 | map = acpi_map_lookup_virt(virt, size); |
375 | if (!map) { | 373 | if (!map) { |
376 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); | 374 | mutex_unlock(&acpi_ioremap_lock); |
377 | printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt); | 375 | printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt); |
378 | dump_stack(); | 376 | dump_stack(); |
379 | return; | 377 | return; |
380 | } | 378 | } |
381 | 379 | ||
382 | del = kref_put(&map->ref, acpi_kref_del_iomap); | 380 | del = kref_put(&map->ref, acpi_kref_del_iomap); |
383 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); | 381 | mutex_unlock(&acpi_ioremap_lock); |
384 | 382 | ||
385 | if (!del) | 383 | if (!del) |
386 | return; | 384 | return; |
@@ -417,7 +415,6 @@ static int acpi_os_map_generic_address(struct acpi_generic_address *addr) | |||
417 | static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr) | 415 | static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr) |
418 | { | 416 | { |
419 | void __iomem *virt; | 417 | void __iomem *virt; |
420 | unsigned long flags; | ||
421 | acpi_size size = addr->bit_width / 8; | 418 | acpi_size size = addr->bit_width / 8; |
422 | 419 | ||
423 | if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) | 420 | if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) |
@@ -426,9 +423,9 @@ static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr) | |||
426 | if (!addr->address || !addr->bit_width) | 423 | if (!addr->address || !addr->bit_width) |
427 | return; | 424 | return; |
428 | 425 | ||
429 | spin_lock_irqsave(&acpi_ioremap_lock, flags); | 426 | mutex_lock(&acpi_ioremap_lock); |
430 | virt = acpi_map_vaddr_lookup(addr->address, size); | 427 | virt = acpi_map_vaddr_lookup(addr->address, size); |
431 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); | 428 | mutex_unlock(&acpi_ioremap_lock); |
432 | 429 | ||
433 | acpi_os_unmap_memory(virt, size); | 430 | acpi_os_unmap_memory(virt, size); |
434 | } | 431 | } |