aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-barrier.c29
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-settings.c20
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/virtio_blk.c25
-rw-r--r--drivers/block/xen-blkfront.c43
-rw-r--r--drivers/ide/ide-disk.c13
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/mmc/card/queue.c1
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/scsi/sd.c16
-rw-r--r--include/linux/blkdev.h6
15 files changed, 67 insertions, 102 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index c807e9ca3a68..ed0aba5463ab 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -9,35 +9,6 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12/**
13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_*
16 *
17 * Description:
18 * For journalled file systems, doing ordered writes on a commit
19 * block instead of explicitly doing wait_on_buffer (which is bad
20 * for performance) can be a big win. Block drivers supporting this
21 * feature should call this function and indicate so.
22 *
23 **/
24int blk_queue_ordered(struct request_queue *q, unsigned ordered)
25{
26 if (ordered != QUEUE_ORDERED_NONE &&
27 ordered != QUEUE_ORDERED_DRAIN &&
28 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
29 ordered != QUEUE_ORDERED_DRAIN_FUA) {
30 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
31 return -EINVAL;
32 }
33
34 q->ordered = ordered;
35 q->next_ordered = ordered;
36
37 return 0;
38}
39EXPORT_SYMBOL(blk_queue_ordered);
40
41/* 12/*
42 * Cache flushing for ordered writes handling 13 * Cache flushing for ordered writes handling
43 */ 14 */
diff --git a/block/blk-core.c b/block/blk-core.c
index ee1a1e7e63cc..f06354183b29 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1203,11 +1203,13 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1203 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1203 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1204 int rw_flags; 1204 int rw_flags;
1205 1205
1206 if ((bio->bi_rw & REQ_HARDBARRIER) && 1206 /* REQ_HARDBARRIER is no more */
1207 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1207 if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
1208 "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
1208 bio_endio(bio, -EOPNOTSUPP); 1209 bio_endio(bio, -EOPNOTSUPP);
1209 return 0; 1210 return 0;
1210 } 1211 }
1212
1211 /* 1213 /*
1212 * low level driver can indicate that it wants pages above a 1214 * low level driver can indicate that it wants pages above a
1213 * certain limit bounced to low memory (ie for highmem, or even 1215 * certain limit bounced to low memory (ie for highmem, or even
diff --git a/block/blk-settings.c b/block/blk-settings.c
index a234f4bf1d6f..9b18afcfe925 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -794,6 +794,26 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
794} 794}
795EXPORT_SYMBOL(blk_queue_update_dma_alignment); 795EXPORT_SYMBOL(blk_queue_update_dma_alignment);
796 796
797/**
798 * blk_queue_flush - configure queue's cache flush capability
799 * @q: the request queue for the device
800 * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
801 *
802 * Tell block layer cache flush capability of @q. If it supports
803 * flushing, REQ_FLUSH should be set. If it supports bypassing
804 * write cache for individual writes, REQ_FUA should be set.
805 */
806void blk_queue_flush(struct request_queue *q, unsigned int flush)
807{
808 WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
809
810 if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
811 flush &= ~REQ_FUA;
812
813 q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
814}
815EXPORT_SYMBOL_GPL(blk_queue_flush);
816
797static int __init blk_settings_init(void) 817static int __init blk_settings_init(void)
798{ 818{
799 blk_max_low_pfn = max_low_pfn - 1; 819 blk_max_low_pfn = max_low_pfn - 1;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 47a41272d26b..fa33f97722ba 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -482,7 +482,6 @@ static struct brd_device *brd_alloc(int i)
482 if (!brd->brd_queue) 482 if (!brd->brd_queue)
483 goto out_free_dev; 483 goto out_free_dev;
484 blk_queue_make_request(brd->brd_queue, brd_make_request); 484 blk_queue_make_request(brd->brd_queue, brd_make_request);
485 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_DRAIN);
486 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 485 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
487 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 486 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
488 487
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c3a4a2e176da..953d1e12f4d4 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -832,7 +832,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
832 lo->lo_queue->unplug_fn = loop_unplug; 832 lo->lo_queue->unplug_fn = loop_unplug;
833 833
834 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 834 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
835 blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN_FLUSH); 835 blk_queue_flush(lo->lo_queue, REQ_FLUSH);
836 836
837 set_capacity(lo->lo_disk, size); 837 set_capacity(lo->lo_disk, size);
838 bd_set_size(bdev, size << 9); 838 bd_set_size(bdev, size << 9);
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 2284b4f05c62..72d62462433d 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -439,7 +439,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
439 blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); 439 blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
440 440
441 blk_queue_prep_rq(q, blk_queue_start_tag); 441 blk_queue_prep_rq(q, blk_queue_start_tag);
442 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 442 blk_queue_flush(q, REQ_FLUSH);
443 443
444 disk->queue = q; 444 disk->queue = q;
445 445
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index e9da874d0419..4911f9e57bc7 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -468,7 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
468 blk_queue_dma_alignment(queue, dev->blk_size-1); 468 blk_queue_dma_alignment(queue, dev->blk_size-1);
469 blk_queue_logical_block_size(queue, dev->blk_size); 469 blk_queue_logical_block_size(queue, dev->blk_size);
470 470
471 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH); 471 blk_queue_flush(queue, REQ_FLUSH);
472 472
473 blk_queue_max_segments(queue, -1); 473 blk_queue_max_segments(queue, -1);
474 blk_queue_max_segment_size(queue, dev->bounce_size); 474 blk_queue_max_segment_size(queue, dev->bounce_size);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 79652809eee8..d10b635b3946 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -388,22 +388,15 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
388 vblk->disk->driverfs_dev = &vdev->dev; 388 vblk->disk->driverfs_dev = &vdev->dev;
389 index++; 389 index++;
390 390
391 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) { 391 /*
392 /* 392 * If the FLUSH feature is supported we do have support for
393 * If the FLUSH feature is supported we do have support for 393 * flushing a volatile write cache on the host. Use that to
394 * flushing a volatile write cache on the host. Use that 394 * implement write barrier support; otherwise, we must assume
395 * to implement write barrier support. 395 * that the host does not perform any kind of volatile write
396 */ 396 * caching.
397 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 397 */
398 } else { 398 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
399 /* 399 blk_queue_flush(q, REQ_FLUSH);
400 * If the FLUSH feature is not supported we must assume that
401 * the host does not perform any kind of volatile write
402 * caching. We still need to drain the queue to provider
403 * proper barrier semantics.
404 */
405 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
406 }
407 400
408 /* If disk is read-only in the host, the guest should obey */ 401 /* If disk is read-only in the host, the guest should obey */
409 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 402 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 50ec6f834996..0b1eea643262 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -95,7 +95,7 @@ struct blkfront_info
95 struct gnttab_free_callback callback; 95 struct gnttab_free_callback callback;
96 struct blk_shadow shadow[BLK_RING_SIZE]; 96 struct blk_shadow shadow[BLK_RING_SIZE];
97 unsigned long shadow_free; 97 unsigned long shadow_free;
98 int feature_barrier; 98 unsigned int feature_flush;
99 int is_ready; 99 int is_ready;
100}; 100};
101 101
@@ -418,25 +418,12 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
418} 418}
419 419
420 420
421static int xlvbd_barrier(struct blkfront_info *info) 421static void xlvbd_flush(struct blkfront_info *info)
422{ 422{
423 int err; 423 blk_queue_flush(info->rq, info->feature_flush);
424 const char *barrier;
425
426 switch (info->feature_barrier) {
427 case QUEUE_ORDERED_DRAIN: barrier = "enabled"; break;
428 case QUEUE_ORDERED_NONE: barrier = "disabled"; break;
429 default: return -EINVAL;
430 }
431
432 err = blk_queue_ordered(info->rq, info->feature_barrier);
433
434 if (err)
435 return err;
436
437 printk(KERN_INFO "blkfront: %s: barriers %s\n", 424 printk(KERN_INFO "blkfront: %s: barriers %s\n",
438 info->gd->disk_name, barrier); 425 info->gd->disk_name,
439 return 0; 426 info->feature_flush ? "enabled" : "disabled");
440} 427}
441 428
442 429
@@ -515,7 +502,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
515 info->rq = gd->queue; 502 info->rq = gd->queue;
516 info->gd = gd; 503 info->gd = gd;
517 504
518 xlvbd_barrier(info); 505 xlvbd_flush(info);
519 506
520 if (vdisk_info & VDISK_READONLY) 507 if (vdisk_info & VDISK_READONLY)
521 set_disk_ro(gd, 1); 508 set_disk_ro(gd, 1);
@@ -661,8 +648,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
661 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 648 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
662 info->gd->disk_name); 649 info->gd->disk_name);
663 error = -EOPNOTSUPP; 650 error = -EOPNOTSUPP;
664 info->feature_barrier = QUEUE_ORDERED_NONE; 651 info->feature_flush = 0;
665 xlvbd_barrier(info); 652 xlvbd_flush(info);
666 } 653 }
667 /* fall through */ 654 /* fall through */
668 case BLKIF_OP_READ: 655 case BLKIF_OP_READ:
@@ -1075,19 +1062,13 @@ static void blkfront_connect(struct blkfront_info *info)
1075 /* 1062 /*
1076 * If there's no "feature-barrier" defined, then it means 1063 * If there's no "feature-barrier" defined, then it means
1077 * we're dealing with a very old backend which writes 1064 * we're dealing with a very old backend which writes
1078 * synchronously; draining will do what needs to get done. 1065 * synchronously; nothing to do.
1079 * 1066 *
1080 * If there are barriers, then we use flush. 1067 * If there are barriers, then we use flush.
1081 *
1082 * If barriers are not supported, then there's no much we can
1083 * do, so just set ordering to NONE.
1084 */ 1068 */
1085 if (err) 1069 info->feature_flush = 0;
1086 info->feature_barrier = QUEUE_ORDERED_DRAIN; 1070 if (!err && barrier)
1087 else if (barrier) 1071 info->feature_flush = REQ_FLUSH;
1088 info->feature_barrier = QUEUE_ORDERED_DRAIN_FLUSH;
1089 else
1090 info->feature_barrier = QUEUE_ORDERED_NONE;
1091 1072
1092 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1073 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1093 if (err) { 1074 if (err) {
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 7433e07de30e..7c5b01ce51d2 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -516,10 +516,10 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
516 return ide_no_data_taskfile(drive, &cmd); 516 return ide_no_data_taskfile(drive, &cmd);
517} 517}
518 518
519static void update_ordered(ide_drive_t *drive) 519static void update_flush(ide_drive_t *drive)
520{ 520{
521 u16 *id = drive->id; 521 u16 *id = drive->id;
522 unsigned ordered = QUEUE_ORDERED_NONE; 522 unsigned flush = 0;
523 523
524 if (drive->dev_flags & IDE_DFLAG_WCACHE) { 524 if (drive->dev_flags & IDE_DFLAG_WCACHE) {
525 unsigned long long capacity; 525 unsigned long long capacity;
@@ -543,13 +543,12 @@ static void update_ordered(ide_drive_t *drive)
543 drive->name, barrier ? "" : "not "); 543 drive->name, barrier ? "" : "not ");
544 544
545 if (barrier) { 545 if (barrier) {
546 ordered = QUEUE_ORDERED_DRAIN_FLUSH; 546 flush = REQ_FLUSH;
547 blk_queue_prep_rq(drive->queue, idedisk_prep_fn); 547 blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
548 } 548 }
549 } else 549 }
550 ordered = QUEUE_ORDERED_DRAIN;
551 550
552 blk_queue_ordered(drive->queue, ordered); 551 blk_queue_flush(drive->queue, flush);
553} 552}
554 553
555ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE); 554ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
@@ -572,7 +571,7 @@ static int set_wcache(ide_drive_t *drive, int arg)
572 } 571 }
573 } 572 }
574 573
575 update_ordered(drive); 574 update_flush(drive);
576 575
577 return err; 576 return err;
578} 577}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ac384b2a6a33..b1d92be8f990 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2245,7 +2245,7 @@ static int dm_init_request_based_queue(struct mapped_device *md)
2245 blk_queue_softirq_done(md->queue, dm_softirq_done); 2245 blk_queue_softirq_done(md->queue, dm_softirq_done);
2246 blk_queue_prep_rq(md->queue, dm_prep_fn); 2246 blk_queue_prep_rq(md->queue, dm_prep_fn);
2247 blk_queue_lld_busy(md->queue, dm_lld_busy); 2247 blk_queue_lld_busy(md->queue, dm_lld_busy);
2248 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH); 2248 blk_queue_flush(md->queue, REQ_FLUSH);
2249 2249
2250 elv_register_queue(md->queue); 2250 elv_register_queue(md->queue);
2251 2251
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index e876678176be..9c0b42bfe089 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -128,7 +128,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
128 mq->req = NULL; 128 mq->req = NULL;
129 129
130 blk_queue_prep_rq(mq->queue, mmc_prep_request); 130 blk_queue_prep_rq(mq->queue, mmc_prep_request);
131 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 131 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 if (mmc_can_erase(card)) { 132 if (mmc_can_erase(card)) {
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 133 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 8373ca0de8e0..9b106d83b0cd 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2197,7 +2197,6 @@ static void dasd_setup_queue(struct dasd_block *block)
2197 */ 2197 */
2198 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2198 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2199 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2199 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2200 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
2201} 2200}
2202 2201
2203/* 2202/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cdfc51ab9cf2..63bd01ae534f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2109,7 +2109,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2109 struct scsi_disk *sdkp = scsi_disk(disk); 2109 struct scsi_disk *sdkp = scsi_disk(disk);
2110 struct scsi_device *sdp = sdkp->device; 2110 struct scsi_device *sdp = sdkp->device;
2111 unsigned char *buffer; 2111 unsigned char *buffer;
2112 unsigned ordered; 2112 unsigned flush = 0;
2113 2113
2114 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 2114 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
2115 "sd_revalidate_disk\n")); 2115 "sd_revalidate_disk\n"));
@@ -2151,15 +2151,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
2151 2151
2152 /* 2152 /*
2153 * We now have all cache related info, determine how we deal 2153 * We now have all cache related info, determine how we deal
2154 * with ordered requests. 2154 * with flush requests.
2155 */ 2155 */
2156 if (sdkp->WCE) 2156 if (sdkp->WCE) {
2157 ordered = sdkp->DPOFUA 2157 flush |= REQ_FLUSH;
2158 ? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH; 2158 if (sdkp->DPOFUA)
2159 else 2159 flush |= REQ_FUA;
2160 ordered = QUEUE_ORDERED_DRAIN; 2160 }
2161 2161
2162 blk_queue_ordered(sdkp->disk->queue, ordered); 2162 blk_queue_flush(sdkp->disk->queue, flush);
2163 2163
2164 set_capacity(disk, sdkp->capacity); 2164 set_capacity(disk, sdkp->capacity);
2165 kfree(buffer); 2165 kfree(buffer);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7077bc0d6138..e97911d4dec3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -355,8 +355,10 @@ struct request_queue
355 struct blk_trace *blk_trace; 355 struct blk_trace *blk_trace;
356#endif 356#endif
357 /* 357 /*
358 * reserved for flush operations 358 * for flush operations
359 */ 359 */
360 unsigned int flush_flags;
361
360 unsigned int ordered, next_ordered, ordseq; 362 unsigned int ordered, next_ordered, ordseq;
361 int orderr, ordcolor; 363 int orderr, ordcolor;
362 struct request pre_flush_rq, bar_rq, post_flush_rq; 364 struct request pre_flush_rq, bar_rq, post_flush_rq;
@@ -865,8 +867,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
865extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 867extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
866extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 868extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
867extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 869extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
870extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
868extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 871extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
869extern int blk_queue_ordered(struct request_queue *, unsigned);
870extern bool blk_do_ordered(struct request_queue *, struct request **); 872extern bool blk_do_ordered(struct request_queue *, struct request **);
871extern unsigned blk_ordered_cur_seq(struct request_queue *); 873extern unsigned blk_ordered_cur_seq(struct request_queue *);
872extern unsigned blk_ordered_req_seq(struct request *); 874extern unsigned blk_ordered_req_seq(struct request *);