diff options
author | Nick Piggin <npiggin@suse.de> | 2008-04-29 08:48:33 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-29 08:48:33 -0400 |
commit | 75ad23bc0fcb4f992a5d06982bf0857ab1738e9e (patch) | |
tree | 8668ef63b1f420252ae41aed9e13737d49fd8054 | |
parent | 68154e90c9d1492d570671ae181d9a8f8530da55 (diff) |
block: make queue flags non-atomic
We can save some atomic ops in the IO path, if we clearly define
the rules of how to modify the queue flags.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-core.c | 39 | ||||
-rw-r--r-- | block/blk-merge.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 2 | ||||
-rw-r--r-- | block/blk-tag.c | 8 | ||||
-rw-r--r-- | block/elevator.c | 13 | ||||
-rw-r--r-- | drivers/block/loop.c | 2 | ||||
-rw-r--r-- | drivers/block/ub.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 7 | ||||
-rw-r--r-- | drivers/md/md.c | 3 | ||||
-rw-r--r-- | drivers/scsi/scsi_debug.c | 2 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 31 | ||||
-rw-r--r-- | drivers/scsi/scsi_transport_sas.c | 3 | ||||
-rw-r--r-- | include/linux/blkdev.h | 33 |
13 files changed, 102 insertions, 49 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e447799256d6..d2f23ec5ebfa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -198,7 +198,8 @@ void blk_plug_device(struct request_queue *q) | |||
198 | if (blk_queue_stopped(q)) | 198 | if (blk_queue_stopped(q)) |
199 | return; | 199 | return; |
200 | 200 | ||
201 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { | 201 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { |
202 | __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); | ||
202 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 203 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
203 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | 204 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); |
204 | } | 205 | } |
@@ -213,9 +214,10 @@ int blk_remove_plug(struct request_queue *q) | |||
213 | { | 214 | { |
214 | WARN_ON(!irqs_disabled()); | 215 | WARN_ON(!irqs_disabled()); |
215 | 216 | ||
216 | if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | 217 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) |
217 | return 0; | 218 | return 0; |
218 | 219 | ||
220 | queue_flag_clear(QUEUE_FLAG_PLUGGED, q); | ||
219 | del_timer(&q->unplug_timer); | 221 | del_timer(&q->unplug_timer); |
220 | return 1; | 222 | return 1; |
221 | } | 223 | } |
@@ -311,15 +313,16 @@ void blk_start_queue(struct request_queue *q) | |||
311 | { | 313 | { |
312 | WARN_ON(!irqs_disabled()); | 314 | WARN_ON(!irqs_disabled()); |
313 | 315 | ||
314 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 316 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
315 | 317 | ||
316 | /* | 318 | /* |
317 | * one level of recursion is ok and is much faster than kicking | 319 | * one level of recursion is ok and is much faster than kicking |
318 | * the unplug handling | 320 | * the unplug handling |
319 | */ | 321 | */ |
320 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 322 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { |
323 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
321 | q->request_fn(q); | 324 | q->request_fn(q); |
322 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | 325 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
323 | } else { | 326 | } else { |
324 | blk_plug_device(q); | 327 | blk_plug_device(q); |
325 | kblockd_schedule_work(&q->unplug_work); | 328 | kblockd_schedule_work(&q->unplug_work); |
@@ -344,7 +347,7 @@ EXPORT_SYMBOL(blk_start_queue); | |||
344 | void blk_stop_queue(struct request_queue *q) | 347 | void blk_stop_queue(struct request_queue *q) |
345 | { | 348 | { |
346 | blk_remove_plug(q); | 349 | blk_remove_plug(q); |
347 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 350 | queue_flag_set(QUEUE_FLAG_STOPPED, q); |
348 | } | 351 | } |
349 | EXPORT_SYMBOL(blk_stop_queue); | 352 | EXPORT_SYMBOL(blk_stop_queue); |
350 | 353 | ||
@@ -373,11 +376,8 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
373 | * blk_run_queue - run a single device queue | 376 | * blk_run_queue - run a single device queue |
374 | * @q: The queue to run | 377 | * @q: The queue to run |
375 | */ | 378 | */ |
376 | void blk_run_queue(struct request_queue *q) | 379 | void __blk_run_queue(struct request_queue *q) |
377 | { | 380 | { |
378 | unsigned long flags; | ||
379 | |||
380 | spin_lock_irqsave(q->queue_lock, flags); | ||
381 | blk_remove_plug(q); | 381 | blk_remove_plug(q); |
382 | 382 | ||
383 | /* | 383 | /* |
@@ -385,15 +385,28 @@ void blk_run_queue(struct request_queue *q) | |||
385 | * handling reinvoke the handler shortly if we already got there. | 385 | * handling reinvoke the handler shortly if we already got there. |
386 | */ | 386 | */ |
387 | if (!elv_queue_empty(q)) { | 387 | if (!elv_queue_empty(q)) { |
388 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 388 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { |
389 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
389 | q->request_fn(q); | 390 | q->request_fn(q); |
390 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | 391 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
391 | } else { | 392 | } else { |
392 | blk_plug_device(q); | 393 | blk_plug_device(q); |
393 | kblockd_schedule_work(&q->unplug_work); | 394 | kblockd_schedule_work(&q->unplug_work); |
394 | } | 395 | } |
395 | } | 396 | } |
397 | } | ||
398 | EXPORT_SYMBOL(__blk_run_queue); | ||
396 | 399 | ||
400 | /** | ||
401 | * blk_run_queue - run a single device queue | ||
402 | * @q: The queue to run | ||
403 | */ | ||
404 | void blk_run_queue(struct request_queue *q) | ||
405 | { | ||
406 | unsigned long flags; | ||
407 | |||
408 | spin_lock_irqsave(q->queue_lock, flags); | ||
409 | __blk_run_queue(q); | ||
397 | spin_unlock_irqrestore(q->queue_lock, flags); | 410 | spin_unlock_irqrestore(q->queue_lock, flags); |
398 | } | 411 | } |
399 | EXPORT_SYMBOL(blk_run_queue); | 412 | EXPORT_SYMBOL(blk_run_queue); |
@@ -406,7 +419,7 @@ void blk_put_queue(struct request_queue *q) | |||
406 | void blk_cleanup_queue(struct request_queue *q) | 419 | void blk_cleanup_queue(struct request_queue *q) |
407 | { | 420 | { |
408 | mutex_lock(&q->sysfs_lock); | 421 | mutex_lock(&q->sysfs_lock); |
409 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | 422 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
410 | mutex_unlock(&q->sysfs_lock); | 423 | mutex_unlock(&q->sysfs_lock); |
411 | 424 | ||
412 | if (q->elevator) | 425 | if (q->elevator) |
diff --git a/block/blk-merge.c b/block/blk-merge.c index b5c5c4a9e3f0..73b23562af20 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq) | |||
55 | if (!rq->bio) | 55 | if (!rq->bio) |
56 | return; | 56 | return; |
57 | 57 | ||
58 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 58 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
59 | hw_seg_size = seg_size = 0; | 59 | hw_seg_size = seg_size = 0; |
60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | 60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; |
61 | rq_for_each_segment(bv, rq, iter) { | 61 | rq_for_each_segment(bv, rq, iter) { |
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments); | |||
128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | 128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
129 | struct bio *nxt) | 129 | struct bio *nxt) |
130 | { | 130 | { |
131 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | 131 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
132 | return 0; | 132 | return 0; |
133 | 133 | ||
134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | 134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) |
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
175 | int nsegs, cluster; | 175 | int nsegs, cluster; |
176 | 176 | ||
177 | nsegs = 0; | 177 | nsegs = 0; |
178 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 178 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * for each bio in rq | 181 | * for each bio in rq |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 77b51dc37a3c..6089384ab064 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -287,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
287 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); | 287 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); |
288 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); | 288 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); |
289 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | 289 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) |
290 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | 290 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); |
291 | } | 291 | } |
292 | EXPORT_SYMBOL(blk_queue_stack_limits); | 292 | EXPORT_SYMBOL(blk_queue_stack_limits); |
293 | 293 | ||
diff --git a/block/blk-tag.c b/block/blk-tag.c index 4780a46ce234..e176ddbe599e 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q) | |||
70 | __blk_free_tags(bqt); | 70 | __blk_free_tags(bqt); |
71 | 71 | ||
72 | q->queue_tags = NULL; | 72 | q->queue_tags = NULL; |
73 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | 73 | queue_flag_clear(QUEUE_FLAG_QUEUED, q); |
74 | } | 74 | } |
75 | 75 | ||
76 | /** | 76 | /** |
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags); | |||
98 | **/ | 98 | **/ |
99 | void blk_queue_free_tags(struct request_queue *q) | 99 | void blk_queue_free_tags(struct request_queue *q) |
100 | { | 100 | { |
101 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | 101 | queue_flag_clear(QUEUE_FLAG_QUEUED, q); |
102 | } | 102 | } |
103 | EXPORT_SYMBOL(blk_queue_free_tags); | 103 | EXPORT_SYMBOL(blk_queue_free_tags); |
104 | 104 | ||
@@ -188,7 +188,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
188 | rc = blk_queue_resize_tags(q, depth); | 188 | rc = blk_queue_resize_tags(q, depth); |
189 | if (rc) | 189 | if (rc) |
190 | return rc; | 190 | return rc; |
191 | set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | 191 | queue_flag_set(QUEUE_FLAG_QUEUED, q); |
192 | return 0; | 192 | return 0; |
193 | } else | 193 | } else |
194 | atomic_inc(&tags->refcnt); | 194 | atomic_inc(&tags->refcnt); |
@@ -197,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
197 | * assign it, all done | 197 | * assign it, all done |
198 | */ | 198 | */ |
199 | q->queue_tags = tags; | 199 | q->queue_tags = tags; |
200 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | 200 | queue_flag_set(QUEUE_FLAG_QUEUED, q); |
201 | INIT_LIST_HEAD(&q->tag_busy_list); | 201 | INIT_LIST_HEAD(&q->tag_busy_list); |
202 | return 0; | 202 | return 0; |
203 | fail: | 203 | fail: |
diff --git a/block/elevator.c b/block/elevator.c index 88318c383608..e8a90fe23424 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -1070,7 +1070,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1070 | */ | 1070 | */ |
1071 | spin_lock_irq(q->queue_lock); | 1071 | spin_lock_irq(q->queue_lock); |
1072 | 1072 | ||
1073 | set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1073 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); |
1074 | 1074 | ||
1075 | elv_drain_elevator(q); | 1075 | elv_drain_elevator(q); |
1076 | 1076 | ||
@@ -1104,7 +1104,10 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1104 | * finally exit old elevator and turn off BYPASS. | 1104 | * finally exit old elevator and turn off BYPASS. |
1105 | */ | 1105 | */ |
1106 | elevator_exit(old_elevator); | 1106 | elevator_exit(old_elevator); |
1107 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1107 | spin_lock_irq(q->queue_lock); |
1108 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
1109 | spin_unlock_irq(q->queue_lock); | ||
1110 | |||
1108 | return 1; | 1111 | return 1; |
1109 | 1112 | ||
1110 | fail_register: | 1113 | fail_register: |
@@ -1115,7 +1118,11 @@ fail_register: | |||
1115 | elevator_exit(e); | 1118 | elevator_exit(e); |
1116 | q->elevator = old_elevator; | 1119 | q->elevator = old_elevator; |
1117 | elv_register_queue(q); | 1120 | elv_register_queue(q); |
1118 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1121 | |
1122 | spin_lock_irq(q->queue_lock); | ||
1123 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
1124 | spin_unlock_irq(q->queue_lock); | ||
1125 | |||
1119 | return 0; | 1126 | return 0; |
1120 | } | 1127 | } |
1121 | 1128 | ||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f7f163557aa0..d3a25b027ff9 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -546,7 +546,7 @@ static void loop_unplug(struct request_queue *q) | |||
546 | { | 546 | { |
547 | struct loop_device *lo = q->queuedata; | 547 | struct loop_device *lo = q->queuedata; |
548 | 548 | ||
549 | clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); | 549 | queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q); |
550 | blk_run_address_space(lo->lo_backing_file->f_mapping); | 550 | blk_run_address_space(lo->lo_backing_file->f_mapping); |
551 | } | 551 | } |
552 | 552 | ||
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 27bfe72aab59..e322cce8c12d 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -2399,7 +2399,7 @@ static void ub_disconnect(struct usb_interface *intf) | |||
2399 | del_gendisk(lun->disk); | 2399 | del_gendisk(lun->disk); |
2400 | /* | 2400 | /* |
2401 | * I wish I could do: | 2401 | * I wish I could do: |
2402 | * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | 2402 | * queue_flag_set(QUEUE_FLAG_DEAD, q); |
2403 | * As it is, we rely on our internal poisoning and let | 2403 | * As it is, we rely on our internal poisoning and let |
2404 | * the upper levels to spin furiously failing all the I/O. | 2404 | * the upper levels to spin furiously failing all the I/O. |
2405 | */ | 2405 | */ |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 51be53344214..73326e7c54bf 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -873,10 +873,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
873 | q->max_hw_sectors = t->limits.max_hw_sectors; | 873 | q->max_hw_sectors = t->limits.max_hw_sectors; |
874 | q->seg_boundary_mask = t->limits.seg_boundary_mask; | 874 | q->seg_boundary_mask = t->limits.seg_boundary_mask; |
875 | q->bounce_pfn = t->limits.bounce_pfn; | 875 | q->bounce_pfn = t->limits.bounce_pfn; |
876 | /* XXX: the below will probably go bug. must ensure there can be no | ||
877 | * concurrency on queue_flags, and use the unlocked versions... | ||
878 | */ | ||
876 | if (t->limits.no_cluster) | 879 | if (t->limits.no_cluster) |
877 | q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); | 880 | queue_flag_clear(QUEUE_FLAG_CLUSTER, q); |
878 | else | 881 | else |
879 | q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER); | 882 | queue_flag_set(QUEUE_FLAG_CLUSTER, q); |
880 | 883 | ||
881 | } | 884 | } |
882 | 885 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 87620b705bee..acd716b657b8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -282,7 +282,8 @@ static mddev_t * mddev_find(dev_t unit) | |||
282 | kfree(new); | 282 | kfree(new); |
283 | return NULL; | 283 | return NULL; |
284 | } | 284 | } |
285 | set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); | 285 | /* Can be unlocked because the queue is new: no concurrency */ |
286 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue); | ||
286 | 287 | ||
287 | blk_queue_make_request(new->queue, md_fail_request); | 288 | blk_queue_make_request(new->queue, md_fail_request); |
288 | 289 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 07103c399fe0..f6600bfb5bde 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -1773,7 +1773,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp) | |||
1773 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 1773 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
1774 | printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", | 1774 | printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", |
1775 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); | 1775 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); |
1776 | set_bit(QUEUE_FLAG_BIDI, &sdp->request_queue->queue_flags); | 1776 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); |
1777 | return 0; | 1777 | return 0; |
1778 | } | 1778 | } |
1779 | 1779 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 67f412bb4974..d545ad1cf47a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -536,6 +536,9 @@ static void scsi_run_queue(struct request_queue *q) | |||
536 | !shost->host_blocked && !shost->host_self_blocked && | 536 | !shost->host_blocked && !shost->host_self_blocked && |
537 | !((shost->can_queue > 0) && | 537 | !((shost->can_queue > 0) && |
538 | (shost->host_busy >= shost->can_queue))) { | 538 | (shost->host_busy >= shost->can_queue))) { |
539 | |||
540 | int flagset; | ||
541 | |||
539 | /* | 542 | /* |
540 | * As long as shost is accepting commands and we have | 543 | * As long as shost is accepting commands and we have |
541 | * starved queues, call blk_run_queue. scsi_request_fn | 544 | * starved queues, call blk_run_queue. scsi_request_fn |
@@ -549,19 +552,20 @@ static void scsi_run_queue(struct request_queue *q) | |||
549 | sdev = list_entry(shost->starved_list.next, | 552 | sdev = list_entry(shost->starved_list.next, |
550 | struct scsi_device, starved_entry); | 553 | struct scsi_device, starved_entry); |
551 | list_del_init(&sdev->starved_entry); | 554 | list_del_init(&sdev->starved_entry); |
552 | spin_unlock_irqrestore(shost->host_lock, flags); | 555 | spin_unlock(shost->host_lock); |
553 | 556 | ||
557 | spin_lock(sdev->request_queue->queue_lock); | ||
558 | flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | ||
559 | !test_bit(QUEUE_FLAG_REENTER, | ||
560 | &sdev->request_queue->queue_flags); | ||
561 | if (flagset) | ||
562 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
563 | __blk_run_queue(sdev->request_queue); | ||
564 | if (flagset) | ||
565 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
566 | spin_unlock(sdev->request_queue->queue_lock); | ||
554 | 567 | ||
555 | if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | 568 | spin_lock(shost->host_lock); |
556 | !test_and_set_bit(QUEUE_FLAG_REENTER, | ||
557 | &sdev->request_queue->queue_flags)) { | ||
558 | blk_run_queue(sdev->request_queue); | ||
559 | clear_bit(QUEUE_FLAG_REENTER, | ||
560 | &sdev->request_queue->queue_flags); | ||
561 | } else | ||
562 | blk_run_queue(sdev->request_queue); | ||
563 | |||
564 | spin_lock_irqsave(shost->host_lock, flags); | ||
565 | if (unlikely(!list_empty(&sdev->starved_entry))) | 569 | if (unlikely(!list_empty(&sdev->starved_entry))) |
566 | /* | 570 | /* |
567 | * sdev lost a race, and was put back on the | 571 | * sdev lost a race, and was put back on the |
@@ -1585,8 +1589,9 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
1585 | 1589 | ||
1586 | blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); | 1590 | blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); |
1587 | 1591 | ||
1592 | /* New queue, no concurrency on queue_flags */ | ||
1588 | if (!shost->use_clustering) | 1593 | if (!shost->use_clustering) |
1589 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 1594 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
1590 | 1595 | ||
1591 | /* | 1596 | /* |
1592 | * set a reasonable default alignment on word boundaries: the | 1597 | * set a reasonable default alignment on word boundaries: the |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 7899e3dda9bf..f4461d35ffb9 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -248,8 +248,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) | |||
248 | else | 248 | else |
249 | q->queuedata = shost; | 249 | q->queuedata = shost; |
250 | 250 | ||
251 | set_bit(QUEUE_FLAG_BIDI, &q->queue_flags); | 251 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); |
252 | |||
253 | return 0; | 252 | return 0; |
254 | } | 253 | } |
255 | 254 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c5065e3d2ca9..8ca481cd7d73 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -408,6 +408,30 @@ struct request_queue | |||
408 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 408 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
409 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ | 409 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ |
410 | 410 | ||
411 | static inline void queue_flag_set_unlocked(unsigned int flag, | ||
412 | struct request_queue *q) | ||
413 | { | ||
414 | __set_bit(flag, &q->queue_flags); | ||
415 | } | ||
416 | |||
417 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | ||
418 | { | ||
419 | WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); | ||
420 | __set_bit(flag, &q->queue_flags); | ||
421 | } | ||
422 | |||
423 | static inline void queue_flag_clear_unlocked(unsigned int flag, | ||
424 | struct request_queue *q) | ||
425 | { | ||
426 | __clear_bit(flag, &q->queue_flags); | ||
427 | } | ||
428 | |||
429 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | ||
430 | { | ||
431 | WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); | ||
432 | __clear_bit(flag, &q->queue_flags); | ||
433 | } | ||
434 | |||
411 | enum { | 435 | enum { |
412 | /* | 436 | /* |
413 | * Hardbarrier is supported with one of the following methods. | 437 | * Hardbarrier is supported with one of the following methods. |
@@ -496,17 +520,17 @@ static inline int blk_queue_full(struct request_queue *q, int rw) | |||
496 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 520 | static inline void blk_set_queue_full(struct request_queue *q, int rw) |
497 | { | 521 | { |
498 | if (rw == READ) | 522 | if (rw == READ) |
499 | set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 523 | queue_flag_set(QUEUE_FLAG_READFULL, q); |
500 | else | 524 | else |
501 | set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 525 | queue_flag_set(QUEUE_FLAG_WRITEFULL, q); |
502 | } | 526 | } |
503 | 527 | ||
504 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 528 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) |
505 | { | 529 | { |
506 | if (rw == READ) | 530 | if (rw == READ) |
507 | clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 531 | queue_flag_clear(QUEUE_FLAG_READFULL, q); |
508 | else | 532 | else |
509 | clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 533 | queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); |
510 | } | 534 | } |
511 | 535 | ||
512 | 536 | ||
@@ -626,6 +650,7 @@ extern void blk_start_queue(struct request_queue *q); | |||
626 | extern void blk_stop_queue(struct request_queue *q); | 650 | extern void blk_stop_queue(struct request_queue *q); |
627 | extern void blk_sync_queue(struct request_queue *q); | 651 | extern void blk_sync_queue(struct request_queue *q); |
628 | extern void __blk_stop_queue(struct request_queue *q); | 652 | extern void __blk_stop_queue(struct request_queue *q); |
653 | extern void __blk_run_queue(struct request_queue *); | ||
629 | extern void blk_run_queue(struct request_queue *); | 654 | extern void blk_run_queue(struct request_queue *); |
630 | extern void blk_start_queueing(struct request_queue *); | 655 | extern void blk_start_queueing(struct request_queue *); |
631 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); | 656 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); |