diff options
-rw-r--r-- | drivers/block/xen-blkfront.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 537cb722a21..5d9c559f187 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -82,6 +82,7 @@ static const struct block_device_operations xlvbd_block_fops; | |||
82 | */ | 82 | */ |
83 | struct blkfront_info | 83 | struct blkfront_info |
84 | { | 84 | { |
85 | spinlock_t io_lock; | ||
85 | struct mutex mutex; | 86 | struct mutex mutex; |
86 | struct xenbus_device *xbdev; | 87 | struct xenbus_device *xbdev; |
87 | struct gendisk *gd; | 88 | struct gendisk *gd; |
@@ -106,8 +107,6 @@ struct blkfront_info | |||
106 | int is_ready; | 107 | int is_ready; |
107 | }; | 108 | }; |
108 | 109 | ||
109 | static DEFINE_SPINLOCK(blkif_io_lock); | ||
110 | |||
111 | static unsigned int nr_minors; | 110 | static unsigned int nr_minors; |
112 | static unsigned long *minors; | 111 | static unsigned long *minors; |
113 | static DEFINE_SPINLOCK(minor_lock); | 112 | static DEFINE_SPINLOCK(minor_lock); |
@@ -418,7 +417,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |||
418 | struct request_queue *rq; | 417 | struct request_queue *rq; |
419 | struct blkfront_info *info = gd->private_data; | 418 | struct blkfront_info *info = gd->private_data; |
420 | 419 | ||
421 | rq = blk_init_queue(do_blkif_request, &blkif_io_lock); | 420 | rq = blk_init_queue(do_blkif_request, &info->io_lock); |
422 | if (rq == NULL) | 421 | if (rq == NULL) |
423 | return -1; | 422 | return -1; |
424 | 423 | ||
@@ -635,14 +634,14 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) | |||
635 | if (info->rq == NULL) | 634 | if (info->rq == NULL) |
636 | return; | 635 | return; |
637 | 636 | ||
638 | spin_lock_irqsave(&blkif_io_lock, flags); | 637 | spin_lock_irqsave(&info->io_lock, flags); |
639 | 638 | ||
640 | /* No more blkif_request(). */ | 639 | /* No more blkif_request(). */ |
641 | blk_stop_queue(info->rq); | 640 | blk_stop_queue(info->rq); |
642 | 641 | ||
643 | /* No more gnttab callback work. */ | 642 | /* No more gnttab callback work. */ |
644 | gnttab_cancel_free_callback(&info->callback); | 643 | gnttab_cancel_free_callback(&info->callback); |
645 | spin_unlock_irqrestore(&blkif_io_lock, flags); | 644 | spin_unlock_irqrestore(&info->io_lock, flags); |
646 | 645 | ||
647 | /* Flush gnttab callback work. Must be done with no locks held. */ | 646 | /* Flush gnttab callback work. Must be done with no locks held. */ |
648 | flush_work_sync(&info->work); | 647 | flush_work_sync(&info->work); |
@@ -674,16 +673,16 @@ static void blkif_restart_queue(struct work_struct *work) | |||
674 | { | 673 | { |
675 | struct blkfront_info *info = container_of(work, struct blkfront_info, work); | 674 | struct blkfront_info *info = container_of(work, struct blkfront_info, work); |
676 | 675 | ||
677 | spin_lock_irq(&blkif_io_lock); | 676 | spin_lock_irq(&info->io_lock); |
678 | if (info->connected == BLKIF_STATE_CONNECTED) | 677 | if (info->connected == BLKIF_STATE_CONNECTED) |
679 | kick_pending_request_queues(info); | 678 | kick_pending_request_queues(info); |
680 | spin_unlock_irq(&blkif_io_lock); | 679 | spin_unlock_irq(&info->io_lock); |
681 | } | 680 | } |
682 | 681 | ||
683 | static void blkif_free(struct blkfront_info *info, int suspend) | 682 | static void blkif_free(struct blkfront_info *info, int suspend) |
684 | { | 683 | { |
685 | /* Prevent new requests being issued until we fix things up. */ | 684 | /* Prevent new requests being issued until we fix things up. */ |
686 | spin_lock_irq(&blkif_io_lock); | 685 | spin_lock_irq(&info->io_lock); |
687 | info->connected = suspend ? | 686 | info->connected = suspend ? |
688 | BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; | 687 | BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; |
689 | /* No more blkif_request(). */ | 688 | /* No more blkif_request(). */ |
@@ -691,7 +690,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
691 | blk_stop_queue(info->rq); | 690 | blk_stop_queue(info->rq); |
692 | /* No more gnttab callback work. */ | 691 | /* No more gnttab callback work. */ |
693 | gnttab_cancel_free_callback(&info->callback); | 692 | gnttab_cancel_free_callback(&info->callback); |
694 | spin_unlock_irq(&blkif_io_lock); | 693 | spin_unlock_irq(&info->io_lock); |
695 | 694 | ||
696 | /* Flush gnttab callback work. Must be done with no locks held. */ | 695 | /* Flush gnttab callback work. Must be done with no locks held. */ |
697 | flush_work_sync(&info->work); | 696 | flush_work_sync(&info->work); |
@@ -727,10 +726,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
727 | struct blkfront_info *info = (struct blkfront_info *)dev_id; | 726 | struct blkfront_info *info = (struct blkfront_info *)dev_id; |
728 | int error; | 727 | int error; |
729 | 728 | ||
730 | spin_lock_irqsave(&blkif_io_lock, flags); | 729 | spin_lock_irqsave(&info->io_lock, flags); |
731 | 730 | ||
732 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { | 731 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { |
733 | spin_unlock_irqrestore(&blkif_io_lock, flags); | 732 | spin_unlock_irqrestore(&info->io_lock, flags); |
734 | return IRQ_HANDLED; | 733 | return IRQ_HANDLED; |
735 | } | 734 | } |
736 | 735 | ||
@@ -815,7 +814,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
815 | 814 | ||
816 | kick_pending_request_queues(info); | 815 | kick_pending_request_queues(info); |
817 | 816 | ||
818 | spin_unlock_irqrestore(&blkif_io_lock, flags); | 817 | spin_unlock_irqrestore(&info->io_lock, flags); |
819 | 818 | ||
820 | return IRQ_HANDLED; | 819 | return IRQ_HANDLED; |
821 | } | 820 | } |
@@ -990,6 +989,7 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
990 | } | 989 | } |
991 | 990 | ||
992 | mutex_init(&info->mutex); | 991 | mutex_init(&info->mutex); |
992 | spin_lock_init(&info->io_lock); | ||
993 | info->xbdev = dev; | 993 | info->xbdev = dev; |
994 | info->vdevice = vdevice; | 994 | info->vdevice = vdevice; |
995 | info->connected = BLKIF_STATE_DISCONNECTED; | 995 | info->connected = BLKIF_STATE_DISCONNECTED; |
@@ -1067,7 +1067,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
1067 | 1067 | ||
1068 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 1068 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
1069 | 1069 | ||
1070 | spin_lock_irq(&blkif_io_lock); | 1070 | spin_lock_irq(&info->io_lock); |
1071 | 1071 | ||
1072 | /* Now safe for us to use the shared ring */ | 1072 | /* Now safe for us to use the shared ring */ |
1073 | info->connected = BLKIF_STATE_CONNECTED; | 1073 | info->connected = BLKIF_STATE_CONNECTED; |
@@ -1078,7 +1078,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
1078 | /* Kick any other new requests queued since we resumed */ | 1078 | /* Kick any other new requests queued since we resumed */ |
1079 | kick_pending_request_queues(info); | 1079 | kick_pending_request_queues(info); |
1080 | 1080 | ||
1081 | spin_unlock_irq(&blkif_io_lock); | 1081 | spin_unlock_irq(&info->io_lock); |
1082 | 1082 | ||
1083 | return 0; | 1083 | return 0; |
1084 | } | 1084 | } |
@@ -1276,10 +1276,10 @@ static void blkfront_connect(struct blkfront_info *info) | |||
1276 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 1276 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
1277 | 1277 | ||
1278 | /* Kick pending requests. */ | 1278 | /* Kick pending requests. */ |
1279 | spin_lock_irq(&blkif_io_lock); | 1279 | spin_lock_irq(&info->io_lock); |
1280 | info->connected = BLKIF_STATE_CONNECTED; | 1280 | info->connected = BLKIF_STATE_CONNECTED; |
1281 | kick_pending_request_queues(info); | 1281 | kick_pending_request_queues(info); |
1282 | spin_unlock_irq(&blkif_io_lock); | 1282 | spin_unlock_irq(&info->io_lock); |
1283 | 1283 | ||
1284 | add_disk(info->gd); | 1284 | add_disk(info->gd); |
1285 | 1285 | ||