diff options
author | Christoph Hellwig <hch@lst.de> | 2007-09-10 20:02:27 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@hera.kernel.org> | 2007-09-10 20:02:27 -0400 |
commit | 90f1c19a9fd2a943adc69d2b9b8c83bcc4bba6f9 (patch) | |
tree | 4928c818b63181775d7800158882024390b7103a /fs/btrfs/extent_map.c | |
parent | 9433063bcb4b3c0c9e5ef3c24a811b63084dedb4 (diff) |
Btrfs: [PATCH] extent_map: fix locking for bio completion
The bio completion handlers can be run in any context, e.g. when using
the old ide driver they run in hardirq context with irqs disabled so
lockdep rightfully warns about using write_lock_irq useage in these
handlers.
This patch switches clear_extent_bit and set_extent_bit to
write_lock_irqsave to fix this problem.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r-- | fs/btrfs/extent_map.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 370ed97e52f5..3021564ac62c 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -456,6 +456,7 @@ int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, | |||
456 | struct extent_state *state; | 456 | struct extent_state *state; |
457 | struct extent_state *prealloc = NULL; | 457 | struct extent_state *prealloc = NULL; |
458 | struct rb_node *node; | 458 | struct rb_node *node; |
459 | unsigned long flags; | ||
459 | int err; | 460 | int err; |
460 | int set = 0; | 461 | int set = 0; |
461 | 462 | ||
@@ -466,7 +467,7 @@ again: | |||
466 | return -ENOMEM; | 467 | return -ENOMEM; |
467 | } | 468 | } |
468 | 469 | ||
469 | write_lock_irq(&tree->lock); | 470 | write_lock_irqsave(&tree->lock, flags); |
470 | /* | 471 | /* |
471 | * this search will find the extents that end after | 472 | * this search will find the extents that end after |
472 | * our range starts | 473 | * our range starts |
@@ -533,7 +534,7 @@ again: | |||
533 | goto search_again; | 534 | goto search_again; |
534 | 535 | ||
535 | out: | 536 | out: |
536 | write_unlock_irq(&tree->lock); | 537 | write_unlock_irqrestore(&tree->lock, flags); |
537 | if (prealloc) | 538 | if (prealloc) |
538 | free_extent_state(prealloc); | 539 | free_extent_state(prealloc); |
539 | 540 | ||
@@ -542,7 +543,7 @@ out: | |||
542 | search_again: | 543 | search_again: |
543 | if (start >= end) | 544 | if (start >= end) |
544 | goto out; | 545 | goto out; |
545 | write_unlock_irq(&tree->lock); | 546 | write_unlock_irqrestore(&tree->lock, flags); |
546 | if (mask & __GFP_WAIT) | 547 | if (mask & __GFP_WAIT) |
547 | cond_resched(); | 548 | cond_resched(); |
548 | goto again; | 549 | goto again; |
@@ -628,6 +629,7 @@ int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits, | |||
628 | struct extent_state *state; | 629 | struct extent_state *state; |
629 | struct extent_state *prealloc = NULL; | 630 | struct extent_state *prealloc = NULL; |
630 | struct rb_node *node; | 631 | struct rb_node *node; |
632 | unsigned long flags; | ||
631 | int err = 0; | 633 | int err = 0; |
632 | int set; | 634 | int set; |
633 | u64 last_start; | 635 | u64 last_start; |
@@ -639,7 +641,7 @@ again: | |||
639 | return -ENOMEM; | 641 | return -ENOMEM; |
640 | } | 642 | } |
641 | 643 | ||
642 | write_lock_irq(&tree->lock); | 644 | write_lock_irqsave(&tree->lock, flags); |
643 | /* | 645 | /* |
644 | * this search will find all the extents that end after | 646 | * this search will find all the extents that end after |
645 | * our range starts. | 647 | * our range starts. |
@@ -759,7 +761,7 @@ again: | |||
759 | goto search_again; | 761 | goto search_again; |
760 | 762 | ||
761 | out: | 763 | out: |
762 | write_unlock_irq(&tree->lock); | 764 | write_unlock_irqrestore(&tree->lock, flags); |
763 | if (prealloc) | 765 | if (prealloc) |
764 | free_extent_state(prealloc); | 766 | free_extent_state(prealloc); |
765 | 767 | ||
@@ -768,7 +770,7 @@ out: | |||
768 | search_again: | 770 | search_again: |
769 | if (start > end) | 771 | if (start > end) |
770 | goto out; | 772 | goto out; |
771 | write_unlock_irq(&tree->lock); | 773 | write_unlock_irqrestore(&tree->lock, flags); |
772 | if (mask & __GFP_WAIT) | 774 | if (mask & __GFP_WAIT) |
773 | cond_resched(); | 775 | cond_resched(); |
774 | goto again; | 776 | goto again; |