aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c18
-rw-r--r--block/blk-flush.c8
-rw-r--r--block/blk-lib.c21
-rw-r--r--block/blk-throttle.c39
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c8
8 files changed, 64 insertions, 52 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index b3cf121f1de9..a63336d49f30 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
352 WARN_ON(!irqs_disabled()); 352 WARN_ON(!irqs_disabled());
353 353
354 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 354 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
355 __blk_run_queue(q); 355 __blk_run_queue(q, false);
356} 356}
357EXPORT_SYMBOL(blk_start_queue); 357EXPORT_SYMBOL(blk_start_queue);
358 358
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
403/** 403/**
404 * __blk_run_queue - run a single device queue 404 * __blk_run_queue - run a single device queue
405 * @q: The queue to run 405 * @q: The queue to run
406 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
406 * 407 *
407 * Description: 408 * Description:
408 * See @blk_run_queue. This variant must be called with the queue lock 409 * See @blk_run_queue. This variant must be called with the queue lock
409 * held and interrupts disabled. 410 * held and interrupts disabled.
410 * 411 *
411 */ 412 */
412void __blk_run_queue(struct request_queue *q) 413void __blk_run_queue(struct request_queue *q, bool force_kblockd)
413{ 414{
414 blk_remove_plug(q); 415 blk_remove_plug(q);
415 416
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
423 * Only recurse once to avoid overrunning the stack, let the unplug 424 * Only recurse once to avoid overrunning the stack, let the unplug
424 * handling reinvoke the handler shortly if we already got there. 425 * handling reinvoke the handler shortly if we already got there.
425 */ 426 */
426 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 427 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
427 q->request_fn(q); 428 q->request_fn(q);
428 queue_flag_clear(QUEUE_FLAG_REENTER, q); 429 queue_flag_clear(QUEUE_FLAG_REENTER, q);
429 } else { 430 } else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
446 unsigned long flags; 447 unsigned long flags;
447 448
448 spin_lock_irqsave(q->queue_lock, flags); 449 spin_lock_irqsave(q->queue_lock, flags);
449 __blk_run_queue(q); 450 __blk_run_queue(q, false);
450 spin_unlock_irqrestore(q->queue_lock, flags); 451 spin_unlock_irqrestore(q->queue_lock, flags);
451} 452}
452EXPORT_SYMBOL(blk_run_queue); 453EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
1053 1054
1054 drive_stat_acct(rq, 1); 1055 drive_stat_acct(rq, 1);
1055 __elv_add_request(q, rq, where, 0); 1056 __elv_add_request(q, rq, where, 0);
1056 __blk_run_queue(q); 1057 __blk_run_queue(q, false);
1057 spin_unlock_irqrestore(q->queue_lock, flags); 1058 spin_unlock_irqrestore(q->queue_lock, flags);
1058} 1059}
1059EXPORT_SYMBOL(blk_insert_request); 1060EXPORT_SYMBOL(blk_insert_request);
@@ -2627,13 +2628,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2627} 2628}
2628EXPORT_SYMBOL(kblockd_schedule_work); 2629EXPORT_SYMBOL(kblockd_schedule_work);
2629 2630
2630int kblockd_schedule_delayed_work(struct request_queue *q,
2631 struct delayed_work *dwork, unsigned long delay)
2632{
2633 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2634}
2635EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2636
2637int __init blk_dev_init(void) 2631int __init blk_dev_init(void)
2638{ 2632{
2639 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2633 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 54b123d6563e..b27d0208611b 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
66 66
67 /* 67 /*
68 * Moving a request silently to empty queue_head may stall the 68 * Moving a request silently to empty queue_head may stall the
69 * queue. Kick the queue in those cases. 69 * queue. Kick the queue in those cases. This function is called
70 * from request completion path and calling directly into
71 * request_fn may confuse the driver. Always use kblockd.
70 */ 72 */
71 if (was_empty && next_rq) 73 if (was_empty && next_rq)
72 __blk_run_queue(q); 74 __blk_run_queue(q, true);
73} 75}
74 76
75static void pre_flush_end_io(struct request *rq, int error) 77static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
130 BUG(); 132 BUG();
131 } 133 }
132 134
133 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 135 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
134 return rq; 136 return rq;
135} 137}
136 138
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 1a320d2406b0..bd3e8df4d5e2 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -109,7 +109,6 @@ struct bio_batch
109 atomic_t done; 109 atomic_t done;
110 unsigned long flags; 110 unsigned long flags;
111 struct completion *wait; 111 struct completion *wait;
112 bio_end_io_t *end_io;
113}; 112};
114 113
115static void bio_batch_end_io(struct bio *bio, int err) 114static void bio_batch_end_io(struct bio *bio, int err)
@@ -122,17 +121,14 @@ static void bio_batch_end_io(struct bio *bio, int err)
122 else 121 else
123 clear_bit(BIO_UPTODATE, &bb->flags); 122 clear_bit(BIO_UPTODATE, &bb->flags);
124 } 123 }
125 if (bb) { 124 if (bb)
126 if (bb->end_io) 125 if (atomic_dec_and_test(&bb->done))
127 bb->end_io(bio, err); 126 complete(bb->wait);
128 atomic_inc(&bb->done);
129 complete(bb->wait);
130 }
131 bio_put(bio); 127 bio_put(bio);
132} 128}
133 129
134/** 130/**
135 * blkdev_issue_zeroout generate number of zero filed write bios 131 * blkdev_issue_zeroout - generate number of zero filed write bios
136 * @bdev: blockdev to issue 132 * @bdev: blockdev to issue
137 * @sector: start sector 133 * @sector: start sector
138 * @nr_sects: number of sectors to write 134 * @nr_sects: number of sectors to write
@@ -150,13 +146,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
150 int ret; 146 int ret;
151 struct bio *bio; 147 struct bio *bio;
152 struct bio_batch bb; 148 struct bio_batch bb;
153 unsigned int sz, issued = 0; 149 unsigned int sz;
154 DECLARE_COMPLETION_ONSTACK(wait); 150 DECLARE_COMPLETION_ONSTACK(wait);
155 151
156 atomic_set(&bb.done, 0); 152 atomic_set(&bb.done, 1);
157 bb.flags = 1 << BIO_UPTODATE; 153 bb.flags = 1 << BIO_UPTODATE;
158 bb.wait = &wait; 154 bb.wait = &wait;
159 bb.end_io = NULL;
160 155
161submit: 156submit:
162 ret = 0; 157 ret = 0;
@@ -185,12 +180,12 @@ submit:
185 break; 180 break;
186 } 181 }
187 ret = 0; 182 ret = 0;
188 issued++; 183 atomic_inc(&bb.done);
189 submit_bio(WRITE, bio); 184 submit_bio(WRITE, bio);
190 } 185 }
191 186
192 /* Wait for bios in-flight */ 187 /* Wait for bios in-flight */
193 while (issued != atomic_read(&bb.done)) 188 if (!atomic_dec_and_test(&bb.done))
194 wait_for_completion(&wait); 189 wait_for_completion(&wait);
195 190
196 if (!test_bit(BIO_UPTODATE, &bb.flags)) 191 if (!test_bit(BIO_UPTODATE, &bb.flags))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 381b09bb562b..e36cc10a346c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
20/* Throttling is performed over 100ms slice and after that slice is renewed */ 20/* Throttling is performed over 100ms slice and after that slice is renewed */
21static unsigned long throtl_slice = HZ/10; /* 100 ms */ 21static unsigned long throtl_slice = HZ/10; /* 100 ms */
22 22
23/* A workqueue to queue throttle related work */
24static struct workqueue_struct *kthrotld_workqueue;
25static void throtl_schedule_delayed_work(struct throtl_data *td,
26 unsigned long delay);
27
23struct throtl_rb_root { 28struct throtl_rb_root {
24 struct rb_root rb; 29 struct rb_root rb;
25 struct rb_node *left; 30 struct rb_node *left;
@@ -168,7 +173,15 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
168 * tree of blkg (instead of traversing through hash list all 173 * tree of blkg (instead of traversing through hash list all
169 * the time. 174 * the time.
170 */ 175 */
171 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); 176
177 /*
178 * This is the common case when there are no blkio cgroups.
179 * Avoid lookup in this case
180 */
181 if (blkcg == &blkio_root_cgroup)
182 tg = &td->root_tg;
183 else
184 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
172 185
173 /* Fill in device details for root group */ 186 /* Fill in device details for root group */
174 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { 187 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
@@ -337,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
337 update_min_dispatch_time(st); 350 update_min_dispatch_time(st);
338 351
339 if (time_before_eq(st->min_disptime, jiffies)) 352 if (time_before_eq(st->min_disptime, jiffies))
340 throtl_schedule_delayed_work(td->queue, 0); 353 throtl_schedule_delayed_work(td, 0);
341 else 354 else
342 throtl_schedule_delayed_work(td->queue, 355 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
343 (st->min_disptime - jiffies));
344} 356}
345 357
346static inline void 358static inline void
@@ -807,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
807} 819}
808 820
809/* Call with queue lock held */ 821/* Call with queue lock held */
810void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) 822static void
823throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
811{ 824{
812 825
813 struct throtl_data *td = q->td;
814 struct delayed_work *dwork = &td->throtl_work; 826 struct delayed_work *dwork = &td->throtl_work;
815 827
816 if (total_nr_queued(td) > 0) { 828 if (total_nr_queued(td) > 0) {
@@ -819,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
819 * Cancel that and schedule a new one. 831 * Cancel that and schedule a new one.
820 */ 832 */
821 __cancel_delayed_work(dwork); 833 __cancel_delayed_work(dwork);
822 kblockd_schedule_delayed_work(q, dwork, delay); 834 queue_delayed_work(kthrotld_workqueue, dwork, delay);
823 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 835 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
824 delay, jiffies); 836 delay, jiffies);
825 } 837 }
826} 838}
827EXPORT_SYMBOL(throtl_schedule_delayed_work);
828 839
829static void 840static void
830throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) 841throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -912,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
912 smp_mb__after_atomic_inc(); 923 smp_mb__after_atomic_inc();
913 924
914 /* Schedule a work now to process the limit change */ 925 /* Schedule a work now to process the limit change */
915 throtl_schedule_delayed_work(td->queue, 0); 926 throtl_schedule_delayed_work(td, 0);
916} 927}
917 928
918static void throtl_update_blkio_group_write_bps(void *key, 929static void throtl_update_blkio_group_write_bps(void *key,
@@ -926,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
926 smp_mb__before_atomic_inc(); 937 smp_mb__before_atomic_inc();
927 atomic_inc(&td->limits_changed); 938 atomic_inc(&td->limits_changed);
928 smp_mb__after_atomic_inc(); 939 smp_mb__after_atomic_inc();
929 throtl_schedule_delayed_work(td->queue, 0); 940 throtl_schedule_delayed_work(td, 0);
930} 941}
931 942
932static void throtl_update_blkio_group_read_iops(void *key, 943static void throtl_update_blkio_group_read_iops(void *key,
@@ -940,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
940 smp_mb__before_atomic_inc(); 951 smp_mb__before_atomic_inc();
941 atomic_inc(&td->limits_changed); 952 atomic_inc(&td->limits_changed);
942 smp_mb__after_atomic_inc(); 953 smp_mb__after_atomic_inc();
943 throtl_schedule_delayed_work(td->queue, 0); 954 throtl_schedule_delayed_work(td, 0);
944} 955}
945 956
946static void throtl_update_blkio_group_write_iops(void *key, 957static void throtl_update_blkio_group_write_iops(void *key,
@@ -954,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
954 smp_mb__before_atomic_inc(); 965 smp_mb__before_atomic_inc();
955 atomic_inc(&td->limits_changed); 966 atomic_inc(&td->limits_changed);
956 smp_mb__after_atomic_inc(); 967 smp_mb__after_atomic_inc();
957 throtl_schedule_delayed_work(td->queue, 0); 968 throtl_schedule_delayed_work(td, 0);
958} 969}
959 970
960void throtl_shutdown_timer_wq(struct request_queue *q) 971void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1127,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
1127 1138
1128static int __init throtl_init(void) 1139static int __init throtl_init(void)
1129{ 1140{
1141 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1142 if (!kthrotld_workqueue)
1143 panic("Failed to create kthrotld\n");
1144
1130 blkio_policy_register(&blkio_policy_throtl); 1145 blkio_policy_register(&blkio_policy_throtl);
1131 return 0; 1146 return 0;
1132} 1147}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 501ffdf0399c..ea83a4f0c27d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -599,7 +599,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
599} 599}
600 600
601static inline unsigned 601static inline unsigned
602cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 602cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
603{ 603{
604 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); 604 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
605 if (cfqd->cfq_latency) { 605 if (cfqd->cfq_latency) {
@@ -631,7 +631,7 @@ cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
631static inline void 631static inline void
632cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 632cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
633{ 633{
634 unsigned slice = cfq_scaled_group_slice(cfqd, cfqq); 634 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
635 635
636 cfqq->slice_start = jiffies; 636 cfqq->slice_start = jiffies;
637 cfqq->slice_end = jiffies + slice; 637 cfqq->slice_end = jiffies + slice;
@@ -1671,7 +1671,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1671 */ 1671 */
1672 if (timed_out) { 1672 if (timed_out) {
1673 if (cfq_cfqq_slice_new(cfqq)) 1673 if (cfq_cfqq_slice_new(cfqq))
1674 cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq); 1674 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1675 else 1675 else
1676 cfqq->slice_resid = cfqq->slice_end - jiffies; 1676 cfqq->slice_resid = cfqq->slice_end - jiffies;
1677 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 1677 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3355 cfqd->busy_queues > 1) { 3355 cfqd->busy_queues > 1) {
3356 cfq_del_timer(cfqd, cfqq); 3356 cfq_del_timer(cfqd, cfqq);
3357 cfq_clear_cfqq_wait_request(cfqq); 3357 cfq_clear_cfqq_wait_request(cfqq);
3358 __blk_run_queue(cfqd->queue); 3358 __blk_run_queue(cfqd->queue, false);
3359 } else { 3359 } else {
3360 cfq_blkiocg_update_idle_time_stats( 3360 cfq_blkiocg_update_idle_time_stats(
3361 &cfqq->cfqg->blkg); 3361 &cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3370 * this new queue is RT and the current one is BE 3370 * this new queue is RT and the current one is BE
3371 */ 3371 */
3372 cfq_preempt_queue(cfqd, cfqq); 3372 cfq_preempt_queue(cfqd, cfqq);
3373 __blk_run_queue(cfqd->queue); 3373 __blk_run_queue(cfqd->queue, false);
3374 } 3374 }
3375} 3375}
3376 3376
@@ -3432,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3432{ 3432{
3433 struct cfq_io_context *cic = cfqd->active_cic; 3433 struct cfq_io_context *cic = cfqd->active_cic;
3434 3434
3435 /* If the queue already has requests, don't wait */
3436 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3437 return false;
3438
3435 /* If there are other queues in the group, don't wait */ 3439 /* If there are other queues in the group, don't wait */
3436 if (cfqq->cfqg->nr_cfqq > 1) 3440 if (cfqq->cfqg->nr_cfqq > 1)
3437 return false; 3441 return false;
@@ -3727,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
3727 struct request_queue *q = cfqd->queue; 3731 struct request_queue *q = cfqd->queue;
3728 3732
3729 spin_lock_irq(q->queue_lock); 3733 spin_lock_irq(q->queue_lock);
3730 __blk_run_queue(cfqd->queue); 3734 __blk_run_queue(cfqd->queue, false);
3731 spin_unlock_irq(q->queue_lock); 3735 spin_unlock_irq(q->queue_lock);
3732} 3736}
3733 3737
diff --git a/block/elevator.c b/block/elevator.c
index 2569512830d3..236e93c1f46c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
602 */ 602 */
603 elv_drain_elevator(q); 603 elv_drain_elevator(q);
604 while (q->rq.elvpriv) { 604 while (q->rq.elvpriv) {
605 __blk_run_queue(q); 605 __blk_run_queue(q, false);
606 spin_unlock_irq(q->queue_lock); 606 spin_unlock_irq(q->queue_lock);
607 msleep(10); 607 msleep(10);
608 spin_lock_irq(q->queue_lock); 608 spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
651 * with anything. There's no point in delaying queue 651 * with anything. There's no point in delaying queue
652 * processing. 652 * processing.
653 */ 653 */
654 __blk_run_queue(q); 654 __blk_run_queue(q, false);
655 break; 655 break;
656 656
657 case ELEVATOR_INSERT_SORT: 657 case ELEVATOR_INSERT_SORT:
diff --git a/block/genhd.c b/block/genhd.c
index 6a5b772aa201..cbf1112a885c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
1355 struct block_device *bdev = bdget_disk(disk, partno); 1355 struct block_device *bdev = bdget_disk(disk, partno);
1356 if (bdev) { 1356 if (bdev) {
1357 fsync_bdev(bdev); 1357 fsync_bdev(bdev);
1358 res = __invalidate_device(bdev); 1358 res = __invalidate_device(bdev, true);
1359 bdput(bdev); 1359 bdput(bdev);
1360 } 1360 }
1361 return res; 1361 return res;
diff --git a/block/ioctl.c b/block/ioctl.c
index 9049d460fa89..1124cd297263 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
294 return -EINVAL; 294 return -EINVAL;
295 if (get_user(n, (int __user *) arg)) 295 if (get_user(n, (int __user *) arg))
296 return -EFAULT; 296 return -EFAULT;
297 if (!(mode & FMODE_EXCL) && 297 if (!(mode & FMODE_EXCL)) {
298 blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) 298 bdgrab(bdev);
299 return -EBUSY; 299 if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
300 return -EBUSY;
301 }
300 ret = set_blocksize(bdev, n); 302 ret = set_blocksize(bdev, n);
301 if (!(mode & FMODE_EXCL)) 303 if (!(mode & FMODE_EXCL))
302 blkdev_put(bdev, mode | FMODE_EXCL); 304 blkdev_put(bdev, mode | FMODE_EXCL);