aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 12:01:25 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 12:01:25 -0500
commitd99cf9d679a520d67f81d805b7cb91c68e1847f0 (patch)
tree415aefe6d168df27c006fcc53b1ea5242eabaaea
parent7ed40918a386afc2e14a6d3da563ea6d13686c25 (diff)
parente650c305ec3178818b317dad37a6d9c7fa8ba28d (diff)
Merge branch 'post-2.6.15' of git://brick.kernel.dk/data/git/linux-2.6-block
Manual fixup for merge with Jens' "Suspend support for libata", commit ID 9b847548663ef1039dd49f0eb4463d001e596bc3. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--Documentation/block/biodoc.txt10
-rw-r--r--block/as-iosched.c144
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/elevator.c86
-rw-r--r--block/ll_rw_blk.c536
-rw-r--r--block/scsi_ioctl.c2
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/sx8.c2
-rw-r--r--drivers/block/ub.c2
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/cdrom/cdu31a.c2
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-disk.c137
-rw-r--r--drivers/ide/ide-io.c11
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/mmc/mmc_block.c4
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/char/tape_block.c2
-rw-r--r--drivers/scsi/ahci.c1
-rw-r--r--drivers/scsi/ata_piix.c1
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/ide-scsi.c4
-rw-r--r--drivers/scsi/libata-core.c31
-rw-r--r--drivers/scsi/libata-scsi.c32
-rw-r--r--drivers/scsi/libata.h4
-rw-r--r--drivers/scsi/sata_mv.c1
-rw-r--r--drivers/scsi/sata_nv.c1
-rw-r--r--drivers/scsi/sata_promise.c1
-rw-r--r--drivers/scsi/sata_sil.c1
-rw-r--r--drivers/scsi/sata_sil24.c1
-rw-r--r--drivers/scsi/sata_sis.c1
-rw-r--r--drivers/scsi/sata_svw.c1
-rw-r--r--drivers/scsi/sata_sx4.c1
-rw-r--r--drivers/scsi/sata_uli.c1
-rw-r--r--drivers/scsi/sata_via.c1
-rw-r--r--drivers/scsi/sata_vsc.c1
-rw-r--r--drivers/scsi/scsi_lib.c50
-rw-r--r--drivers/scsi/sd.c85
-rw-r--r--fs/bio.c26
-rw-r--r--include/linux/ata.h6
-rw-r--r--include/linux/blkdev.h91
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_host.h1
50 files changed, 695 insertions, 644 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 303c57a7fad9..8e63831971d5 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -263,14 +263,8 @@ A flag in the bio structure, BIO_BARRIER is used to identify a barrier i/o.
263The generic i/o scheduler would make sure that it places the barrier request and 263The generic i/o scheduler would make sure that it places the barrier request and
264all other requests coming after it after all the previous requests in the 264all other requests coming after it after all the previous requests in the
265queue. Barriers may be implemented in different ways depending on the 265queue. Barriers may be implemented in different ways depending on the
266driver. A SCSI driver for example could make use of ordered tags to 266driver. For more details regarding I/O barriers, please read barrier.txt
267preserve the necessary ordering with a lower impact on throughput. For IDE 267in this directory.
268this might be two sync cache flush: a pre and post flush when encountering
269a barrier write.
270
271There is a provision for queues to indicate what kind of barriers they
272can provide. This is as of yet unmerged, details will be added here once it
273is in the kernel.
274 268
2751.2.2 Request Priority/Latency 2691.2.2 Request Priority/Latency
276 270
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 43fa20495688..8da3cf66894c 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -182,6 +182,9 @@ struct as_rq {
182 182
183static kmem_cache_t *arq_pool; 183static kmem_cache_t *arq_pool;
184 184
185static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
186static void as_antic_stop(struct as_data *ad);
187
185/* 188/*
186 * IO Context helper functions 189 * IO Context helper functions
187 */ 190 */
@@ -370,7 +373,7 @@ static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
370 * existing request against the same sector), which can happen when using 373 * existing request against the same sector), which can happen when using
371 * direct IO, then return the alias. 374 * direct IO, then return the alias.
372 */ 375 */
373static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq) 376static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
374{ 377{
375 struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node; 378 struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
376 struct rb_node *parent = NULL; 379 struct rb_node *parent = NULL;
@@ -397,6 +400,16 @@ static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
397 return NULL; 400 return NULL;
398} 401}
399 402
403static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
404{
405 struct as_rq *alias;
406
407 while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
408 as_move_to_dispatch(ad, alias);
409 as_antic_stop(ad);
410 }
411}
412
400static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) 413static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
401{ 414{
402 if (!ON_RB(&arq->rb_node)) { 415 if (!ON_RB(&arq->rb_node)) {
@@ -1133,23 +1146,6 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1133 /* 1146 /*
1134 * take it off the sort and fifo list, add to dispatch queue 1147 * take it off the sort and fifo list, add to dispatch queue
1135 */ 1148 */
1136 while (!list_empty(&rq->queuelist)) {
1137 struct request *__rq = list_entry_rq(rq->queuelist.next);
1138 struct as_rq *__arq = RQ_DATA(__rq);
1139
1140 list_del(&__rq->queuelist);
1141
1142 elv_dispatch_add_tail(ad->q, __rq);
1143
1144 if (__arq->io_context && __arq->io_context->aic)
1145 atomic_inc(&__arq->io_context->aic->nr_dispatched);
1146
1147 WARN_ON(__arq->state != AS_RQ_QUEUED);
1148 __arq->state = AS_RQ_DISPATCHED;
1149
1150 ad->nr_dispatched++;
1151 }
1152
1153 as_remove_queued_request(ad->q, rq); 1149 as_remove_queued_request(ad->q, rq);
1154 WARN_ON(arq->state != AS_RQ_QUEUED); 1150 WARN_ON(arq->state != AS_RQ_QUEUED);
1155 1151
@@ -1326,49 +1322,12 @@ fifo_expired:
1326} 1322}
1327 1323
1328/* 1324/*
1329 * Add arq to a list behind alias
1330 */
1331static inline void
1332as_add_aliased_request(struct as_data *ad, struct as_rq *arq,
1333 struct as_rq *alias)
1334{
1335 struct request *req = arq->request;
1336 struct list_head *insert = alias->request->queuelist.prev;
1337
1338 /*
1339 * Transfer list of aliases
1340 */
1341 while (!list_empty(&req->queuelist)) {
1342 struct request *__rq = list_entry_rq(req->queuelist.next);
1343 struct as_rq *__arq = RQ_DATA(__rq);
1344
1345 list_move_tail(&__rq->queuelist, &alias->request->queuelist);
1346
1347 WARN_ON(__arq->state != AS_RQ_QUEUED);
1348 }
1349
1350 /*
1351 * Another request with the same start sector on the rbtree.
1352 * Link this request to that sector. They are untangled in
1353 * as_move_to_dispatch
1354 */
1355 list_add(&arq->request->queuelist, insert);
1356
1357 /*
1358 * Don't want to have to handle merges.
1359 */
1360 as_del_arq_hash(arq);
1361 arq->request->flags |= REQ_NOMERGE;
1362}
1363
1364/*
1365 * add arq to rbtree and fifo 1325 * add arq to rbtree and fifo
1366 */ 1326 */
1367static void as_add_request(request_queue_t *q, struct request *rq) 1327static void as_add_request(request_queue_t *q, struct request *rq)
1368{ 1328{
1369 struct as_data *ad = q->elevator->elevator_data; 1329 struct as_data *ad = q->elevator->elevator_data;
1370 struct as_rq *arq = RQ_DATA(rq); 1330 struct as_rq *arq = RQ_DATA(rq);
1371 struct as_rq *alias;
1372 int data_dir; 1331 int data_dir;
1373 1332
1374 arq->state = AS_RQ_NEW; 1333 arq->state = AS_RQ_NEW;
@@ -1387,33 +1346,17 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1387 atomic_inc(&arq->io_context->aic->nr_queued); 1346 atomic_inc(&arq->io_context->aic->nr_queued);
1388 } 1347 }
1389 1348
1390 alias = as_add_arq_rb(ad, arq); 1349 as_add_arq_rb(ad, arq);
1391 if (!alias) { 1350 if (rq_mergeable(arq->request))
1392 /* 1351 as_add_arq_hash(ad, arq);
1393 * set expire time (only used for reads) and add to fifo list
1394 */
1395 arq->expires = jiffies + ad->fifo_expire[data_dir];
1396 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
1397 1352
1398 if (rq_mergeable(arq->request)) 1353 /*
1399 as_add_arq_hash(ad, arq); 1354 * set expire time (only used for reads) and add to fifo list
1400 as_update_arq(ad, arq); /* keep state machine up to date */ 1355 */
1401 1356 arq->expires = jiffies + ad->fifo_expire[data_dir];
1402 } else { 1357 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
1403 as_add_aliased_request(ad, arq, alias);
1404
1405 /*
1406 * have we been anticipating this request?
1407 * or does it come from the same process as the one we are
1408 * anticipating for?
1409 */
1410 if (ad->antic_status == ANTIC_WAIT_REQ
1411 || ad->antic_status == ANTIC_WAIT_NEXT) {
1412 if (as_can_break_anticipation(ad, arq))
1413 as_antic_stop(ad);
1414 }
1415 }
1416 1358
1359 as_update_arq(ad, arq); /* keep state machine up to date */
1417 arq->state = AS_RQ_QUEUED; 1360 arq->state = AS_RQ_QUEUED;
1418} 1361}
1419 1362
@@ -1536,23 +1479,8 @@ static void as_merged_request(request_queue_t *q, struct request *req)
1536 * if the merge was a front merge, we need to reposition request 1479 * if the merge was a front merge, we need to reposition request
1537 */ 1480 */
1538 if (rq_rb_key(req) != arq->rb_key) { 1481 if (rq_rb_key(req) != arq->rb_key) {
1539 struct as_rq *alias, *next_arq = NULL;
1540
1541 if (ad->next_arq[arq->is_sync] == arq)
1542 next_arq = as_find_next_arq(ad, arq);
1543
1544 /*
1545 * Note! We should really be moving any old aliased requests
1546 * off this request and try to insert them into the rbtree. We
1547 * currently don't bother. Ditto the next function.
1548 */
1549 as_del_arq_rb(ad, arq); 1482 as_del_arq_rb(ad, arq);
1550 if ((alias = as_add_arq_rb(ad, arq))) { 1483 as_add_arq_rb(ad, arq);
1551 list_del_init(&arq->fifo);
1552 as_add_aliased_request(ad, arq, alias);
1553 if (next_arq)
1554 ad->next_arq[arq->is_sync] = next_arq;
1555 }
1556 /* 1484 /*
1557 * Note! At this stage of this and the next function, our next 1485 * Note! At this stage of this and the next function, our next
1558 * request may not be optimal - eg the request may have "grown" 1486 * request may not be optimal - eg the request may have "grown"
@@ -1579,18 +1507,8 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
1579 as_add_arq_hash(ad, arq); 1507 as_add_arq_hash(ad, arq);
1580 1508
1581 if (rq_rb_key(req) != arq->rb_key) { 1509 if (rq_rb_key(req) != arq->rb_key) {
1582 struct as_rq *alias, *next_arq = NULL;
1583
1584 if (ad->next_arq[arq->is_sync] == arq)
1585 next_arq = as_find_next_arq(ad, arq);
1586
1587 as_del_arq_rb(ad, arq); 1510 as_del_arq_rb(ad, arq);
1588 if ((alias = as_add_arq_rb(ad, arq))) { 1511 as_add_arq_rb(ad, arq);
1589 list_del_init(&arq->fifo);
1590 as_add_aliased_request(ad, arq, alias);
1591 if (next_arq)
1592 ad->next_arq[arq->is_sync] = next_arq;
1593 }
1594 } 1512 }
1595 1513
1596 /* 1514 /*
@@ -1610,18 +1528,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
1610 } 1528 }
1611 1529
1612 /* 1530 /*
1613 * Transfer list of aliases
1614 */
1615 while (!list_empty(&next->queuelist)) {
1616 struct request *__rq = list_entry_rq(next->queuelist.next);
1617 struct as_rq *__arq = RQ_DATA(__rq);
1618
1619 list_move_tail(&__rq->queuelist, &req->queuelist);
1620
1621 WARN_ON(__arq->state != AS_RQ_QUEUED);
1622 }
1623
1624 /*
1625 * kill knowledge of next, this one is a goner 1531 * kill knowledge of next, this one is a goner
1626 */ 1532 */
1627 as_remove_queued_request(q, next); 1533 as_remove_queued_request(q, next);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ee0bb41694b0..74fae2daf87e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -25,15 +25,15 @@
25/* 25/*
26 * tunables 26 * tunables
27 */ 27 */
28static int cfq_quantum = 4; /* max queue in one round of service */ 28static const int cfq_quantum = 4; /* max queue in one round of service */
29static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ 29static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
30static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 30static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
31static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 31static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
32static int cfq_back_penalty = 2; /* penalty of a backwards seek */ 32static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
33 33
34static int cfq_slice_sync = HZ / 10; 34static const int cfq_slice_sync = HZ / 10;
35static int cfq_slice_async = HZ / 25; 35static int cfq_slice_async = HZ / 25;
36static int cfq_slice_async_rq = 2; 36static const int cfq_slice_async_rq = 2;
37static int cfq_slice_idle = HZ / 100; 37static int cfq_slice_idle = HZ / 100;
38 38
39#define CFQ_IDLE_GRACE (HZ / 10) 39#define CFQ_IDLE_GRACE (HZ / 10)
@@ -45,7 +45,7 @@ static int cfq_slice_idle = HZ / 100;
45/* 45/*
46 * disable queueing at the driver/hardware level 46 * disable queueing at the driver/hardware level
47 */ 47 */
48static int cfq_max_depth = 2; 48static const int cfq_max_depth = 2;
49 49
50/* 50/*
51 * for the hash of cfqq inside the cfqd 51 * for the hash of cfqq inside the cfqd
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 9cbec09e8415..27e494b1bf97 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -19,10 +19,10 @@
19/* 19/*
20 * See Documentation/block/deadline-iosched.txt 20 * See Documentation/block/deadline-iosched.txt
21 */ 21 */
22static int read_expire = HZ / 2; /* max time before a read is submitted. */ 22static const int read_expire = HZ / 2; /* max time before a read is submitted. */
23static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ 23static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
24static int writes_starved = 2; /* max times reads can starve a write */ 24static const int writes_starved = 2; /* max times reads can starve a write */
25static int fifo_batch = 16; /* # of sequential requests treated as one 25static const int fifo_batch = 16; /* # of sequential requests treated as one
26 by the above parameters. For throughput. */ 26 by the above parameters. For throughput. */
27 27
28static const int deadline_hash_shift = 5; 28static const int deadline_hash_shift = 5;
diff --git a/block/elevator.c b/block/elevator.c
index 6c3fc8a10bf2..39dcccc82ada 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -304,15 +304,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
304 304
305 rq->flags &= ~REQ_STARTED; 305 rq->flags &= ~REQ_STARTED;
306 306
307 /* 307 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
308 * if this is the flush, requeue the original instead and drop the flush
309 */
310 if (rq->flags & REQ_BAR_FLUSH) {
311 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
312 rq = rq->end_io_data;
313 }
314
315 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
316} 308}
317 309
318static void elv_drain_elevator(request_queue_t *q) 310static void elv_drain_elevator(request_queue_t *q)
@@ -332,8 +324,19 @@ static void elv_drain_elevator(request_queue_t *q)
332void __elv_add_request(request_queue_t *q, struct request *rq, int where, 324void __elv_add_request(request_queue_t *q, struct request *rq, int where,
333 int plug) 325 int plug)
334{ 326{
327 struct list_head *pos;
328 unsigned ordseq;
329
330 if (q->ordcolor)
331 rq->flags |= REQ_ORDERED_COLOR;
332
335 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 333 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
336 /* 334 /*
335 * toggle ordered color
336 */
337 q->ordcolor ^= 1;
338
339 /*
337 * barriers implicitly indicate back insertion 340 * barriers implicitly indicate back insertion
338 */ 341 */
339 if (where == ELEVATOR_INSERT_SORT) 342 if (where == ELEVATOR_INSERT_SORT)
@@ -393,6 +396,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
393 q->elevator->ops->elevator_add_req_fn(q, rq); 396 q->elevator->ops->elevator_add_req_fn(q, rq);
394 break; 397 break;
395 398
399 case ELEVATOR_INSERT_REQUEUE:
400 /*
401 * If ordered flush isn't in progress, we do front
402 * insertion; otherwise, requests should be requeued
403 * in ordseq order.
404 */
405 rq->flags |= REQ_SOFTBARRIER;
406
407 if (q->ordseq == 0) {
408 list_add(&rq->queuelist, &q->queue_head);
409 break;
410 }
411
412 ordseq = blk_ordered_req_seq(rq);
413
414 list_for_each(pos, &q->queue_head) {
415 struct request *pos_rq = list_entry_rq(pos);
416 if (ordseq <= blk_ordered_req_seq(pos_rq))
417 break;
418 }
419
420 list_add_tail(&rq->queuelist, pos);
421 break;
422
396 default: 423 default:
397 printk(KERN_ERR "%s: bad insertion point %d\n", 424 printk(KERN_ERR "%s: bad insertion point %d\n",
398 __FUNCTION__, where); 425 __FUNCTION__, where);
@@ -422,25 +449,16 @@ static inline struct request *__elv_next_request(request_queue_t *q)
422{ 449{
423 struct request *rq; 450 struct request *rq;
424 451
425 if (unlikely(list_empty(&q->queue_head) && 452 while (1) {
426 !q->elevator->ops->elevator_dispatch_fn(q, 0))) 453 while (!list_empty(&q->queue_head)) {
427 return NULL; 454 rq = list_entry_rq(q->queue_head.next);
428 455 if (blk_do_ordered(q, &rq))
429 rq = list_entry_rq(q->queue_head.next); 456 return rq;
430 457 }
431 /*
432 * if this is a barrier write and the device has to issue a
433 * flush sequence to support it, check how far we are
434 */
435 if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
436 BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
437 458
438 if (q->ordered == QUEUE_ORDERED_FLUSH && 459 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
439 !blk_barrier_preflush(rq)) 460 return NULL;
440 rq = blk_start_pre_flush(q, rq);
441 } 461 }
442
443 return rq;
444} 462}
445 463
446struct request *elv_next_request(request_queue_t *q) 464struct request *elv_next_request(request_queue_t *q)
@@ -498,7 +516,7 @@ struct request *elv_next_request(request_queue_t *q)
498 blkdev_dequeue_request(rq); 516 blkdev_dequeue_request(rq);
499 rq->flags |= REQ_QUIET; 517 rq->flags |= REQ_QUIET;
500 end_that_request_chunk(rq, 0, nr_bytes); 518 end_that_request_chunk(rq, 0, nr_bytes);
501 end_that_request_last(rq); 519 end_that_request_last(rq, 0);
502 } else { 520 } else {
503 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, 521 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
504 ret); 522 ret);
@@ -593,7 +611,21 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
593 * request is released from the driver, io must be done 611 * request is released from the driver, io must be done
594 */ 612 */
595 if (blk_account_rq(rq)) { 613 if (blk_account_rq(rq)) {
614 struct request *first_rq = list_entry_rq(q->queue_head.next);
615
596 q->in_flight--; 616 q->in_flight--;
617
618 /*
619 * Check if the queue is waiting for fs requests to be
620 * drained for flush sequence.
621 */
622 if (q->ordseq && q->in_flight == 0 &&
623 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
624 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
625 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
626 q->request_fn(q);
627 }
628
597 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 629 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
598 e->ops->elevator_completed_req_fn(q, rq); 630 e->ops->elevator_completed_req_fn(q, rq);
599 } 631 }
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d4beb9a89ee0..91d3b4828c49 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -36,6 +36,8 @@
36static void blk_unplug_work(void *data); 36static void blk_unplug_work(void *data);
37static void blk_unplug_timeout(unsigned long data); 37static void blk_unplug_timeout(unsigned long data);
38static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 38static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
39static void init_request_from_bio(struct request *req, struct bio *bio);
40static int __make_request(request_queue_t *q, struct bio *bio);
39 41
40/* 42/*
41 * For the allocated request tables 43 * For the allocated request tables
@@ -288,8 +290,8 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
288 290
289/** 291/**
290 * blk_queue_ordered - does this queue support ordered writes 292 * blk_queue_ordered - does this queue support ordered writes
291 * @q: the request queue 293 * @q: the request queue
292 * @flag: see below 294 * @ordered: one of QUEUE_ORDERED_*
293 * 295 *
294 * Description: 296 * Description:
295 * For journalled file systems, doing ordered writes on a commit 297 * For journalled file systems, doing ordered writes on a commit
@@ -298,28 +300,30 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
298 * feature should call this function and indicate so. 300 * feature should call this function and indicate so.
299 * 301 *
300 **/ 302 **/
301void blk_queue_ordered(request_queue_t *q, int flag) 303int blk_queue_ordered(request_queue_t *q, unsigned ordered,
302{ 304 prepare_flush_fn *prepare_flush_fn)
303 switch (flag) { 305{
304 case QUEUE_ORDERED_NONE: 306 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
305 if (q->flush_rq) 307 prepare_flush_fn == NULL) {
306 kmem_cache_free(request_cachep, q->flush_rq); 308 printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
307 q->flush_rq = NULL; 309 return -EINVAL;
308 q->ordered = flag; 310 }
309 break; 311
310 case QUEUE_ORDERED_TAG: 312 if (ordered != QUEUE_ORDERED_NONE &&
311 q->ordered = flag; 313 ordered != QUEUE_ORDERED_DRAIN &&
312 break; 314 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
313 case QUEUE_ORDERED_FLUSH: 315 ordered != QUEUE_ORDERED_DRAIN_FUA &&
314 q->ordered = flag; 316 ordered != QUEUE_ORDERED_TAG &&
315 if (!q->flush_rq) 317 ordered != QUEUE_ORDERED_TAG_FLUSH &&
316 q->flush_rq = kmem_cache_alloc(request_cachep, 318 ordered != QUEUE_ORDERED_TAG_FUA) {
317 GFP_KERNEL); 319 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
318 break; 320 return -EINVAL;
319 default:
320 printk("blk_queue_ordered: bad value %d\n", flag);
321 break;
322 } 321 }
322
323 q->next_ordered = ordered;
324 q->prepare_flush_fn = prepare_flush_fn;
325
326 return 0;
323} 327}
324 328
325EXPORT_SYMBOL(blk_queue_ordered); 329EXPORT_SYMBOL(blk_queue_ordered);
@@ -344,167 +348,265 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
344/* 348/*
345 * Cache flushing for ordered writes handling 349 * Cache flushing for ordered writes handling
346 */ 350 */
347static void blk_pre_flush_end_io(struct request *flush_rq) 351inline unsigned blk_ordered_cur_seq(request_queue_t *q)
348{ 352{
349 struct request *rq = flush_rq->end_io_data; 353 if (!q->ordseq)
350 request_queue_t *q = rq->q; 354 return 0;
351 355 return 1 << ffz(q->ordseq);
352 elv_completed_request(q, flush_rq);
353
354 rq->flags |= REQ_BAR_PREFLUSH;
355
356 if (!flush_rq->errors)
357 elv_requeue_request(q, rq);
358 else {
359 q->end_flush_fn(q, flush_rq);
360 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
361 q->request_fn(q);
362 }
363} 356}
364 357
365static void blk_post_flush_end_io(struct request *flush_rq) 358unsigned blk_ordered_req_seq(struct request *rq)
366{ 359{
367 struct request *rq = flush_rq->end_io_data;
368 request_queue_t *q = rq->q; 360 request_queue_t *q = rq->q;
369 361
370 elv_completed_request(q, flush_rq); 362 BUG_ON(q->ordseq == 0);
371 363
372 rq->flags |= REQ_BAR_POSTFLUSH; 364 if (rq == &q->pre_flush_rq)
365 return QUEUE_ORDSEQ_PREFLUSH;
366 if (rq == &q->bar_rq)
367 return QUEUE_ORDSEQ_BAR;
368 if (rq == &q->post_flush_rq)
369 return QUEUE_ORDSEQ_POSTFLUSH;
373 370
374 q->end_flush_fn(q, flush_rq); 371 if ((rq->flags & REQ_ORDERED_COLOR) ==
375 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags); 372 (q->orig_bar_rq->flags & REQ_ORDERED_COLOR))
376 q->request_fn(q); 373 return QUEUE_ORDSEQ_DRAIN;
374 else
375 return QUEUE_ORDSEQ_DONE;
377} 376}
378 377
379struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq) 378void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
380{ 379{
381 struct request *flush_rq = q->flush_rq; 380 struct request *rq;
382 381 int uptodate;
383 BUG_ON(!blk_barrier_rq(rq));
384 382
385 if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags)) 383 if (error && !q->orderr)
386 return NULL; 384 q->orderr = error;
387 385
388 rq_init(q, flush_rq); 386 BUG_ON(q->ordseq & seq);
389 flush_rq->elevator_private = NULL; 387 q->ordseq |= seq;
390 flush_rq->flags = REQ_BAR_FLUSH;
391 flush_rq->rq_disk = rq->rq_disk;
392 flush_rq->rl = NULL;
393 388
394 /* 389 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
395 * prepare_flush returns 0 if no flush is needed, just mark both 390 return;
396 * pre and post flush as done in that case
397 */
398 if (!q->prepare_flush_fn(q, flush_rq)) {
399 rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
400 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
401 return rq;
402 }
403 391
404 /* 392 /*
405 * some drivers dequeue requests right away, some only after io 393 * Okay, sequence complete.
406 * completion. make sure the request is dequeued.
407 */ 394 */
408 if (!list_empty(&rq->queuelist)) 395 rq = q->orig_bar_rq;
409 blkdev_dequeue_request(rq); 396 uptodate = q->orderr ? q->orderr : 1;
410 397
411 flush_rq->end_io_data = rq; 398 q->ordseq = 0;
412 flush_rq->end_io = blk_pre_flush_end_io;
413 399
414 __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0); 400 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
415 return flush_rq; 401 end_that_request_last(rq, uptodate);
416} 402}
417 403
418static void blk_start_post_flush(request_queue_t *q, struct request *rq) 404static void pre_flush_end_io(struct request *rq, int error)
419{ 405{
420 struct request *flush_rq = q->flush_rq; 406 elv_completed_request(rq->q, rq);
407 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
408}
421 409
422 BUG_ON(!blk_barrier_rq(rq)); 410static void bar_end_io(struct request *rq, int error)
411{
412 elv_completed_request(rq->q, rq);
413 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
414}
423 415
424 rq_init(q, flush_rq); 416static void post_flush_end_io(struct request *rq, int error)
425 flush_rq->elevator_private = NULL; 417{
426 flush_rq->flags = REQ_BAR_FLUSH; 418 elv_completed_request(rq->q, rq);
427 flush_rq->rq_disk = rq->rq_disk; 419 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
428 flush_rq->rl = NULL; 420}
429 421
430 if (q->prepare_flush_fn(q, flush_rq)) { 422static void queue_flush(request_queue_t *q, unsigned which)
431 flush_rq->end_io_data = rq; 423{
432 flush_rq->end_io = blk_post_flush_end_io; 424 struct request *rq;
425 rq_end_io_fn *end_io;
433 426
434 __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0); 427 if (which == QUEUE_ORDERED_PREFLUSH) {
435 q->request_fn(q); 428 rq = &q->pre_flush_rq;
429 end_io = pre_flush_end_io;
430 } else {
431 rq = &q->post_flush_rq;
432 end_io = post_flush_end_io;
436 } 433 }
434
435 rq_init(q, rq);
436 rq->flags = REQ_HARDBARRIER;
437 rq->elevator_private = NULL;
438 rq->rq_disk = q->bar_rq.rq_disk;
439 rq->rl = NULL;
440 rq->end_io = end_io;
441 q->prepare_flush_fn(q, rq);
442
443 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
437} 444}
438 445
439static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq, 446static inline struct request *start_ordered(request_queue_t *q,
440 int sectors) 447 struct request *rq)
441{ 448{
442 if (sectors > rq->nr_sectors) 449 q->bi_size = 0;
443 sectors = rq->nr_sectors; 450 q->orderr = 0;
451 q->ordered = q->next_ordered;
452 q->ordseq |= QUEUE_ORDSEQ_STARTED;
453
454 /*
455 * Prep proxy barrier request.
456 */
457 blkdev_dequeue_request(rq);
458 q->orig_bar_rq = rq;
459 rq = &q->bar_rq;
460 rq_init(q, rq);
461 rq->flags = bio_data_dir(q->orig_bar_rq->bio);
462 rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
463 rq->elevator_private = NULL;
464 rq->rl = NULL;
465 init_request_from_bio(rq, q->orig_bar_rq->bio);
466 rq->end_io = bar_end_io;
467
468 /*
469 * Queue ordered sequence. As we stack them at the head, we
470 * need to queue in reverse order. Note that we rely on that
471 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
472 * request gets inbetween ordered sequence.
473 */
474 if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
475 queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
476 else
477 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
478
479 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
480
481 if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
482 queue_flush(q, QUEUE_ORDERED_PREFLUSH);
483 rq = &q->pre_flush_rq;
484 } else
485 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
444 486
445 rq->nr_sectors -= sectors; 487 if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
446 return rq->nr_sectors; 488 q->ordseq |= QUEUE_ORDSEQ_DRAIN;
489 else
490 rq = NULL;
491
492 return rq;
447} 493}
448 494
449static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq, 495int blk_do_ordered(request_queue_t *q, struct request **rqp)
450 int sectors, int queue_locked)
451{ 496{
452 if (q->ordered != QUEUE_ORDERED_FLUSH) 497 struct request *rq = *rqp, *allowed_rq;
453 return 0; 498 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
454 if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
455 return 0;
456 if (blk_barrier_postflush(rq))
457 return 0;
458 499
459 if (!blk_check_end_barrier(q, rq, sectors)) { 500 if (!q->ordseq) {
460 unsigned long flags = 0; 501 if (!is_barrier)
502 return 1;
461 503
462 if (!queue_locked) 504 if (q->next_ordered != QUEUE_ORDERED_NONE) {
463 spin_lock_irqsave(q->queue_lock, flags); 505 *rqp = start_ordered(q, rq);
506 return 1;
507 } else {
508 /*
509 * This can happen when the queue switches to
510 * ORDERED_NONE while this request is on it.
511 */
512 blkdev_dequeue_request(rq);
513 end_that_request_first(rq, -EOPNOTSUPP,
514 rq->hard_nr_sectors);
515 end_that_request_last(rq, -EOPNOTSUPP);
516 *rqp = NULL;
517 return 0;
518 }
519 }
464 520
465 blk_start_post_flush(q, rq); 521 if (q->ordered & QUEUE_ORDERED_TAG) {
522 if (is_barrier && rq != &q->bar_rq)
523 *rqp = NULL;
524 return 1;
525 }
466 526
467 if (!queue_locked) 527 switch (blk_ordered_cur_seq(q)) {
468 spin_unlock_irqrestore(q->queue_lock, flags); 528 case QUEUE_ORDSEQ_PREFLUSH:
529 allowed_rq = &q->pre_flush_rq;
530 break;
531 case QUEUE_ORDSEQ_BAR:
532 allowed_rq = &q->bar_rq;
533 break;
534 case QUEUE_ORDSEQ_POSTFLUSH:
535 allowed_rq = &q->post_flush_rq;
536 break;
537 default:
538 allowed_rq = NULL;
539 break;
469 } 540 }
470 541
542 if (rq != allowed_rq &&
543 (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
544 rq == &q->post_flush_rq))
545 *rqp = NULL;
546
471 return 1; 547 return 1;
472} 548}
473 549
474/** 550static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
475 * blk_complete_barrier_rq - complete possible barrier request
476 * @q: the request queue for the device
477 * @rq: the request
478 * @sectors: number of sectors to complete
479 *
480 * Description:
481 * Used in driver end_io handling to determine whether to postpone
482 * completion of a barrier request until a post flush has been done. This
483 * is the unlocked variant, used if the caller doesn't already hold the
484 * queue lock.
485 **/
486int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
487{ 551{
488 return __blk_complete_barrier_rq(q, rq, sectors, 0); 552 request_queue_t *q = bio->bi_private;
553 struct bio_vec *bvec;
554 int i;
555
556 /*
557 * This is dry run, restore bio_sector and size. We'll finish
558 * this request again with the original bi_end_io after an
559 * error occurs or post flush is complete.
560 */
561 q->bi_size += bytes;
562
563 if (bio->bi_size)
564 return 1;
565
566 /* Rewind bvec's */
567 bio->bi_idx = 0;
568 bio_for_each_segment(bvec, bio, i) {
569 bvec->bv_len += bvec->bv_offset;
570 bvec->bv_offset = 0;
571 }
572
573 /* Reset bio */
574 set_bit(BIO_UPTODATE, &bio->bi_flags);
575 bio->bi_size = q->bi_size;
576 bio->bi_sector -= (q->bi_size >> 9);
577 q->bi_size = 0;
578
579 return 0;
489} 580}
490EXPORT_SYMBOL(blk_complete_barrier_rq);
491 581
492/** 582static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
493 * blk_complete_barrier_rq_locked - complete possible barrier request 583 unsigned int nbytes, int error)
494 * @q: the request queue for the device
495 * @rq: the request
496 * @sectors: number of sectors to complete
497 *
498 * Description:
499 * See blk_complete_barrier_rq(). This variant must be used if the caller
500 * holds the queue lock.
501 **/
502int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
503 int sectors)
504{ 584{
505 return __blk_complete_barrier_rq(q, rq, sectors, 1); 585 request_queue_t *q = rq->q;
586 bio_end_io_t *endio;
587 void *private;
588
589 if (&q->bar_rq != rq)
590 return 0;
591
592 /*
593 * Okay, this is the barrier request in progress, dry finish it.
594 */
595 if (error && !q->orderr)
596 q->orderr = error;
597
598 endio = bio->bi_end_io;
599 private = bio->bi_private;
600 bio->bi_end_io = flush_dry_bio_endio;
601 bio->bi_private = q;
602
603 bio_endio(bio, nbytes, error);
604
605 bio->bi_end_io = endio;
606 bio->bi_private = private;
607
608 return 1;
506} 609}
507EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
508 610
509/** 611/**
510 * blk_queue_bounce_limit - set bounce buffer limit for queue 612 * blk_queue_bounce_limit - set bounce buffer limit for queue
@@ -1039,12 +1141,13 @@ void blk_queue_invalidate_tags(request_queue_t *q)
1039 1141
1040EXPORT_SYMBOL(blk_queue_invalidate_tags); 1142EXPORT_SYMBOL(blk_queue_invalidate_tags);
1041 1143
1042static char *rq_flags[] = { 1144static const char * const rq_flags[] = {
1043 "REQ_RW", 1145 "REQ_RW",
1044 "REQ_FAILFAST", 1146 "REQ_FAILFAST",
1045 "REQ_SORTED", 1147 "REQ_SORTED",
1046 "REQ_SOFTBARRIER", 1148 "REQ_SOFTBARRIER",
1047 "REQ_HARDBARRIER", 1149 "REQ_HARDBARRIER",
1150 "REQ_FUA",
1048 "REQ_CMD", 1151 "REQ_CMD",
1049 "REQ_NOMERGE", 1152 "REQ_NOMERGE",
1050 "REQ_STARTED", 1153 "REQ_STARTED",
@@ -1064,6 +1167,7 @@ static char *rq_flags[] = {
1064 "REQ_PM_SUSPEND", 1167 "REQ_PM_SUSPEND",
1065 "REQ_PM_RESUME", 1168 "REQ_PM_RESUME",
1066 "REQ_PM_SHUTDOWN", 1169 "REQ_PM_SHUTDOWN",
1170 "REQ_ORDERED_COLOR",
1067}; 1171};
1068 1172
1069void blk_dump_rq_flags(struct request *rq, char *msg) 1173void blk_dump_rq_flags(struct request *rq, char *msg)
@@ -1641,8 +1745,6 @@ void blk_cleanup_queue(request_queue_t * q)
1641 if (q->queue_tags) 1745 if (q->queue_tags)
1642 __blk_queue_free_tags(q); 1746 __blk_queue_free_tags(q);
1643 1747
1644 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1645
1646 kmem_cache_free(requestq_cachep, q); 1748 kmem_cache_free(requestq_cachep, q);
1647} 1749}
1648 1750
@@ -1667,8 +1769,6 @@ static int blk_init_free_list(request_queue_t *q)
1667 return 0; 1769 return 0;
1668} 1770}
1669 1771
1670static int __make_request(request_queue_t *, struct bio *);
1671
1672request_queue_t *blk_alloc_queue(gfp_t gfp_mask) 1772request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
1673{ 1773{
1674 return blk_alloc_queue_node(gfp_mask, -1); 1774 return blk_alloc_queue_node(gfp_mask, -1);
@@ -1908,40 +2008,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
1908{ 2008{
1909 struct request *rq = NULL; 2009 struct request *rq = NULL;
1910 struct request_list *rl = &q->rq; 2010 struct request_list *rl = &q->rq;
1911 struct io_context *ioc = current_io_context(GFP_ATOMIC); 2011 struct io_context *ioc = NULL;
1912 int priv; 2012 int may_queue, priv;
1913 2013
1914 if (rl->count[rw]+1 >= q->nr_requests) { 2014 may_queue = elv_may_queue(q, rw, bio);
1915 /* 2015 if (may_queue == ELV_MQUEUE_NO)
1916 * The queue will fill after this allocation, so set it as 2016 goto rq_starved;
1917 * full, and mark this process as "batching". This process
1918 * will be allowed to complete a batch of requests, others
1919 * will be blocked.
1920 */
1921 if (!blk_queue_full(q, rw)) {
1922 ioc_set_batching(q, ioc);
1923 blk_set_queue_full(q, rw);
1924 }
1925 }
1926 2017
1927 switch (elv_may_queue(q, rw, bio)) { 2018 if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
1928 case ELV_MQUEUE_NO: 2019 if (rl->count[rw]+1 >= q->nr_requests) {
1929 goto rq_starved; 2020 ioc = current_io_context(GFP_ATOMIC);
1930 case ELV_MQUEUE_MAY: 2021 /*
1931 break; 2022 * The queue will fill after this allocation, so set
1932 case ELV_MQUEUE_MUST: 2023 * it as full, and mark this process as "batching".
1933 goto get_rq; 2024 * This process will be allowed to complete a batch of
1934 } 2025 * requests, others will be blocked.
1935 2026 */
1936 if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) { 2027 if (!blk_queue_full(q, rw)) {
1937 /* 2028 ioc_set_batching(q, ioc);
1938 * The queue is full and the allocating process is not a 2029 blk_set_queue_full(q, rw);
1939 * "batcher", and not exempted by the IO scheduler 2030 } else {
1940 */ 2031 if (may_queue != ELV_MQUEUE_MUST
1941 goto out; 2032 && !ioc_batching(q, ioc)) {
2033 /*
2034 * The queue is full and the allocating
2035 * process is not a "batcher", and not
2036 * exempted by the IO scheduler
2037 */
2038 goto out;
2039 }
2040 }
2041 }
2042 set_queue_congested(q, rw);
1942 } 2043 }
1943 2044
1944get_rq:
1945 /* 2045 /*
1946 * Only allow batching queuers to allocate up to 50% over the defined 2046 * Only allow batching queuers to allocate up to 50% over the defined
1947 * limit of requests, otherwise we could have thousands of requests 2047 * limit of requests, otherwise we could have thousands of requests
@@ -1952,8 +2052,6 @@ get_rq:
1952 2052
1953 rl->count[rw]++; 2053 rl->count[rw]++;
1954 rl->starved[rw] = 0; 2054 rl->starved[rw] = 0;
1955 if (rl->count[rw] >= queue_congestion_on_threshold(q))
1956 set_queue_congested(q, rw);
1957 2055
1958 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 2056 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
1959 if (priv) 2057 if (priv)
@@ -1962,7 +2060,7 @@ get_rq:
1962 spin_unlock_irq(q->queue_lock); 2060 spin_unlock_irq(q->queue_lock);
1963 2061
1964 rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); 2062 rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
1965 if (!rq) { 2063 if (unlikely(!rq)) {
1966 /* 2064 /*
1967 * Allocation failed presumably due to memory. Undo anything 2065 * Allocation failed presumably due to memory. Undo anything
1968 * we might have messed up. 2066 * we might have messed up.
@@ -1987,6 +2085,12 @@ rq_starved:
1987 goto out; 2085 goto out;
1988 } 2086 }
1989 2087
2088 /*
2089 * ioc may be NULL here, and ioc_batching will be false. That's
2090 * OK, if the queue is under the request limit then requests need
2091 * not count toward the nr_batch_requests limit. There will always
2092 * be some limit enforced by BLK_BATCH_TIME.
2093 */
1990 if (ioc_batching(q, ioc)) 2094 if (ioc_batching(q, ioc))
1991 ioc->nr_batch_requests--; 2095 ioc->nr_batch_requests--;
1992 2096
@@ -2313,7 +2417,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
2313 */ 2417 */
2314void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, 2418void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2315 struct request *rq, int at_head, 2419 struct request *rq, int at_head,
2316 void (*done)(struct request *)) 2420 rq_end_io_fn *done)
2317{ 2421{
2318 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2422 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2319 2423
@@ -2517,7 +2621,7 @@ EXPORT_SYMBOL(blk_put_request);
2517 * blk_end_sync_rq - executes a completion event on a request 2621 * blk_end_sync_rq - executes a completion event on a request
2518 * @rq: request to complete 2622 * @rq: request to complete
2519 */ 2623 */
2520void blk_end_sync_rq(struct request *rq) 2624void blk_end_sync_rq(struct request *rq, int error)
2521{ 2625{
2522 struct completion *waiting = rq->waiting; 2626 struct completion *waiting = rq->waiting;
2523 2627
@@ -2655,6 +2759,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
2655 2759
2656EXPORT_SYMBOL(blk_attempt_remerge); 2760EXPORT_SYMBOL(blk_attempt_remerge);
2657 2761
2762static void init_request_from_bio(struct request *req, struct bio *bio)
2763{
2764 req->flags |= REQ_CMD;
2765
2766 /*
2767 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2768 */
2769 if (bio_rw_ahead(bio) || bio_failfast(bio))
2770 req->flags |= REQ_FAILFAST;
2771
2772 /*
2773 * REQ_BARRIER implies no merging, but lets make it explicit
2774 */
2775 if (unlikely(bio_barrier(bio)))
2776 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2777
2778 req->errors = 0;
2779 req->hard_sector = req->sector = bio->bi_sector;
2780 req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
2781 req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
2782 req->nr_phys_segments = bio_phys_segments(req->q, bio);
2783 req->nr_hw_segments = bio_hw_segments(req->q, bio);
2784 req->buffer = bio_data(bio); /* see ->buffer comment above */
2785 req->waiting = NULL;
2786 req->bio = req->biotail = bio;
2787 req->ioprio = bio_prio(bio);
2788 req->rq_disk = bio->bi_bdev->bd_disk;
2789 req->start_time = jiffies;
2790}
2791
2658static int __make_request(request_queue_t *q, struct bio *bio) 2792static int __make_request(request_queue_t *q, struct bio *bio)
2659{ 2793{
2660 struct request *req; 2794 struct request *req;
@@ -2680,7 +2814,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2680 spin_lock_prefetch(q->queue_lock); 2814 spin_lock_prefetch(q->queue_lock);
2681 2815
2682 barrier = bio_barrier(bio); 2816 barrier = bio_barrier(bio);
2683 if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) { 2817 if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
2684 err = -EOPNOTSUPP; 2818 err = -EOPNOTSUPP;
2685 goto end_io; 2819 goto end_io;
2686 } 2820 }
@@ -2750,33 +2884,7 @@ get_rq:
2750 * We don't worry about that case for efficiency. It won't happen 2884 * We don't worry about that case for efficiency. It won't happen
2751 * often, and the elevators are able to handle it. 2885 * often, and the elevators are able to handle it.
2752 */ 2886 */
2753 2887 init_request_from_bio(req, bio);
2754 req->flags |= REQ_CMD;
2755
2756 /*
2757 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2758 */
2759 if (bio_rw_ahead(bio) || bio_failfast(bio))
2760 req->flags |= REQ_FAILFAST;
2761
2762 /*
2763 * REQ_BARRIER implies no merging, but lets make it explicit
2764 */
2765 if (unlikely(barrier))
2766 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2767
2768 req->errors = 0;
2769 req->hard_sector = req->sector = sector;
2770 req->hard_nr_sectors = req->nr_sectors = nr_sectors;
2771 req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
2772 req->nr_phys_segments = bio_phys_segments(q, bio);
2773 req->nr_hw_segments = bio_hw_segments(q, bio);
2774 req->buffer = bio_data(bio); /* see ->buffer comment above */
2775 req->waiting = NULL;
2776 req->bio = req->biotail = bio;
2777 req->ioprio = prio;
2778 req->rq_disk = bio->bi_bdev->bd_disk;
2779 req->start_time = jiffies;
2780 2888
2781 spin_lock_irq(q->queue_lock); 2889 spin_lock_irq(q->queue_lock);
2782 if (elv_queue_empty(q)) 2890 if (elv_queue_empty(q))
@@ -3067,7 +3175,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
3067 if (nr_bytes >= bio->bi_size) { 3175 if (nr_bytes >= bio->bi_size) {
3068 req->bio = bio->bi_next; 3176 req->bio = bio->bi_next;
3069 nbytes = bio->bi_size; 3177 nbytes = bio->bi_size;
3070 bio_endio(bio, nbytes, error); 3178 if (!ordered_bio_endio(req, bio, nbytes, error))
3179 bio_endio(bio, nbytes, error);
3071 next_idx = 0; 3180 next_idx = 0;
3072 bio_nbytes = 0; 3181 bio_nbytes = 0;
3073 } else { 3182 } else {
@@ -3122,7 +3231,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
3122 * if the request wasn't completed, update state 3231 * if the request wasn't completed, update state
3123 */ 3232 */
3124 if (bio_nbytes) { 3233 if (bio_nbytes) {
3125 bio_endio(bio, bio_nbytes, error); 3234 if (!ordered_bio_endio(req, bio, bio_nbytes, error))
3235 bio_endio(bio, bio_nbytes, error);
3126 bio->bi_idx += next_idx; 3236 bio->bi_idx += next_idx;
3127 bio_iovec(bio)->bv_offset += nr_bytes; 3237 bio_iovec(bio)->bv_offset += nr_bytes;
3128 bio_iovec(bio)->bv_len -= nr_bytes; 3238 bio_iovec(bio)->bv_len -= nr_bytes;
@@ -3179,9 +3289,17 @@ EXPORT_SYMBOL(end_that_request_chunk);
3179/* 3289/*
3180 * queue lock must be held 3290 * queue lock must be held
3181 */ 3291 */
3182void end_that_request_last(struct request *req) 3292void end_that_request_last(struct request *req, int uptodate)
3183{ 3293{
3184 struct gendisk *disk = req->rq_disk; 3294 struct gendisk *disk = req->rq_disk;
3295 int error;
3296
3297 /*
3298 * extend uptodate bool to allow < 0 value to be direct io error
3299 */
3300 error = 0;
3301 if (end_io_error(uptodate))
3302 error = !uptodate ? -EIO : uptodate;
3185 3303
3186 if (unlikely(laptop_mode) && blk_fs_request(req)) 3304 if (unlikely(laptop_mode) && blk_fs_request(req))
3187 laptop_io_completion(); 3305 laptop_io_completion();
@@ -3196,7 +3314,7 @@ void end_that_request_last(struct request *req)
3196 disk->in_flight--; 3314 disk->in_flight--;
3197 } 3315 }
3198 if (req->end_io) 3316 if (req->end_io)
3199 req->end_io(req); 3317 req->end_io(req, error);
3200 else 3318 else
3201 __blk_put_request(req->q, req); 3319 __blk_put_request(req->q, req);
3202} 3320}
@@ -3208,7 +3326,7 @@ void end_request(struct request *req, int uptodate)
3208 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { 3326 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
3209 add_disk_randomness(req->rq_disk); 3327 add_disk_randomness(req->rq_disk);
3210 blkdev_dequeue_request(req); 3328 blkdev_dequeue_request(req);
3211 end_that_request_last(req); 3329 end_that_request_last(req, uptodate);
3212 } 3330 }
3213} 3331}
3214 3332
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 1d8852f7bbff..c2ac36dfe4f3 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(scsi_command_size);
46 46
47static int sg_get_version(int __user *p) 47static int sg_get_version(int __user *p)
48{ 48{
49 static int sg_version_num = 30527; 49 static const int sg_version_num = 30527;
50 return put_user(sg_version_num, p); 50 return put_user(sg_version_num, p);
51} 51}
52 52
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 70eaa5c7ac08..21097a39a057 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3471,7 +3471,7 @@ static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
3471 3471
3472 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) { 3472 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
3473 3473
3474 end_that_request_last(Request); 3474 end_that_request_last(Request, UpToDate);
3475 3475
3476 if (Command->Completion) { 3476 if (Command->Completion) {
3477 complete(Command->Completion); 3477 complete(Command->Completion);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c3441b3f086e..d2815b7a9150 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2310,7 +2310,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2310 printk("Done with %p\n", cmd->rq); 2310 printk("Done with %p\n", cmd->rq);
2311#endif /* CCISS_DEBUG */ 2311#endif /* CCISS_DEBUG */
2312 2312
2313 end_that_request_last(cmd->rq); 2313 end_that_request_last(cmd->rq, status ? 1 : -EIO);
2314 cmd_free(h,cmd,1); 2314 cmd_free(h,cmd,1);
2315} 2315}
2316 2316
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index cf1822a6361c..9bddb6874873 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1036,7 +1036,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1036 complete_buffers(cmd->rq->bio, ok); 1036 complete_buffers(cmd->rq->bio, ok);
1037 1037
1038 DBGPX(printk("Done with %p\n", cmd->rq);); 1038 DBGPX(printk("Done with %p\n", cmd->rq););
1039 end_that_request_last(cmd->rq); 1039 end_that_request_last(cmd->rq, ok ? 1 : -EIO);
1040} 1040}
1041 1041
1042/* 1042/*
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index f7e765a1d313..a5b857c5c4b8 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2301,7 +2301,7 @@ static void floppy_end_request(struct request *req, int uptodate)
2301 add_disk_randomness(req->rq_disk); 2301 add_disk_randomness(req->rq_disk);
2302 floppy_off((long)req->rq_disk->private_data); 2302 floppy_off((long)req->rq_disk->private_data);
2303 blkdev_dequeue_request(req); 2303 blkdev_dequeue_request(req);
2304 end_that_request_last(req); 2304 end_that_request_last(req, uptodate);
2305 2305
2306 /* We're done with the request */ 2306 /* We're done with the request */
2307 current_req = NULL; 2307 current_req = NULL;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index d5c8ee7d9815..33d6f237b2ed 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -140,7 +140,7 @@ static void nbd_end_request(struct request *req)
140 140
141 spin_lock_irqsave(q->queue_lock, flags); 141 spin_lock_irqsave(q->queue_lock, flags);
142 if (!end_that_request_first(req, uptodate, req->nr_sectors)) { 142 if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
143 end_that_request_last(req); 143 end_that_request_last(req, uptodate);
144 } 144 }
145 spin_unlock_irqrestore(q->queue_lock, flags); 145 spin_unlock_irqrestore(q->queue_lock, flags);
146} 146}
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 1ded3b433459..9251f4131b53 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -770,7 +770,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
770 rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); 770 rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
771 assert(rc == 0); 771 assert(rc == 0);
772 772
773 end_that_request_last(req); 773 end_that_request_last(req, uptodate);
774 774
775 rc = carm_put_request(host, crq); 775 rc = carm_put_request(host, crq);
776 assert(rc == 0); 776 assert(rc == 0);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 10740a065088..a05fe5843e6c 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -951,7 +951,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
951static void ub_end_rq(struct request *rq, int uptodate) 951static void ub_end_rq(struct request *rq, int uptodate)
952{ 952{
953 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 953 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
954 end_that_request_last(rq); 954 end_that_request_last(rq, uptodate);
955} 955}
956 956
957static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 957static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 2d518aa2720a..063f0304a163 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -305,7 +305,7 @@ static void viodasd_end_request(struct request *req, int uptodate,
305 if (end_that_request_first(req, uptodate, num_sectors)) 305 if (end_that_request_first(req, uptodate, num_sectors))
306 return; 306 return;
307 add_disk_randomness(req->rq_disk); 307 add_disk_randomness(req->rq_disk);
308 end_that_request_last(req); 308 end_that_request_last(req, uptodate);
309} 309}
310 310
311/* 311/*
diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c
index ac96de15d833..378e88d20757 100644
--- a/drivers/cdrom/cdu31a.c
+++ b/drivers/cdrom/cdu31a.c
@@ -1402,7 +1402,7 @@ static void do_cdu31a_request(request_queue_t * q)
1402 if (!end_that_request_first(req, 1, nblock)) { 1402 if (!end_that_request_first(req, 1, nblock)) {
1403 spin_lock_irq(q->queue_lock); 1403 spin_lock_irq(q->queue_lock);
1404 blkdev_dequeue_request(req); 1404 blkdev_dequeue_request(req);
1405 end_that_request_last(req); 1405 end_that_request_last(req, 1);
1406 spin_unlock_irq(q->queue_lock); 1406 spin_unlock_irq(q->queue_lock);
1407 } 1407 }
1408 continue; 1408 continue;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 70aeb3a60120..d31117eb95aa 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -614,7 +614,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
614 */ 614 */
615 spin_lock_irqsave(&ide_lock, flags); 615 spin_lock_irqsave(&ide_lock, flags);
616 end_that_request_chunk(failed, 0, failed->data_len); 616 end_that_request_chunk(failed, 0, failed->data_len);
617 end_that_request_last(failed); 617 end_that_request_last(failed, 0);
618 spin_unlock_irqrestore(&ide_lock, flags); 618 spin_unlock_irqrestore(&ide_lock, flags);
619 } 619 }
620 620
@@ -1735,7 +1735,7 @@ end_request:
1735 1735
1736 spin_lock_irqsave(&ide_lock, flags); 1736 spin_lock_irqsave(&ide_lock, flags);
1737 blkdev_dequeue_request(rq); 1737 blkdev_dequeue_request(rq);
1738 end_that_request_last(rq); 1738 end_that_request_last(rq, 1);
1739 HWGROUP(drive)->rq = NULL; 1739 HWGROUP(drive)->rq = NULL;
1740 spin_unlock_irqrestore(&ide_lock, flags); 1740 spin_unlock_irqrestore(&ide_lock, flags);
1741 return ide_stopped; 1741 return ide_stopped;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4e5767968d7f..4b441720b6ba 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = {
681 681
682#endif /* CONFIG_PROC_FS */ 682#endif /* CONFIG_PROC_FS */
683 683
684static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq) 684static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
685{ 685{
686 ide_drive_t *drive = q->queuedata; 686 ide_drive_t *drive = q->queuedata;
687 struct request *rq = flush_rq->end_io_data;
688 int good_sectors = rq->hard_nr_sectors;
689 int bad_sectors;
690 sector_t sector;
691
692 if (flush_rq->errors & ABRT_ERR) {
693 printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name);
694 blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE);
695 blk_queue_issue_flush_fn(drive->queue, NULL);
696 good_sectors = 0;
697 } else if (flush_rq->errors) {
698 good_sectors = 0;
699 if (blk_barrier_preflush(rq)) {
700 sector = ide_get_error_location(drive,flush_rq->buffer);
701 if ((sector >= rq->hard_sector) &&
702 (sector < rq->hard_sector + rq->hard_nr_sectors))
703 good_sectors = sector - rq->hard_sector;
704 }
705 }
706
707 if (flush_rq->errors)
708 printk(KERN_ERR "%s: failed barrier write: "
709 "sector=%Lx(good=%d/bad=%d)\n",
710 drive->name, (unsigned long long)rq->sector,
711 good_sectors,
712 (int) (rq->hard_nr_sectors-good_sectors));
713
714 bad_sectors = rq->hard_nr_sectors - good_sectors;
715
716 if (good_sectors)
717 __ide_end_request(drive, rq, 1, good_sectors);
718 if (bad_sectors)
719 __ide_end_request(drive, rq, 0, bad_sectors);
720}
721
722static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
723{
724 ide_drive_t *drive = q->queuedata;
725
726 if (!drive->wcache)
727 return 0;
728 687
729 memset(rq->cmd, 0, sizeof(rq->cmd)); 688 memset(rq->cmd, 0, sizeof(rq->cmd));
730 689
@@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
735 rq->cmd[0] = WIN_FLUSH_CACHE; 694 rq->cmd[0] = WIN_FLUSH_CACHE;
736 695
737 696
738 rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER; 697 rq->flags |= REQ_DRIVE_TASK;
739 rq->buffer = rq->cmd; 698 rq->buffer = rq->cmd;
740 return 1;
741} 699}
742 700
743static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, 701static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
@@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg)
794 return 0; 752 return 0;
795} 753}
796 754
755static void update_ordered(ide_drive_t *drive)
756{
757 struct hd_driveid *id = drive->id;
758 unsigned ordered = QUEUE_ORDERED_NONE;
759 prepare_flush_fn *prep_fn = NULL;
760 issue_flush_fn *issue_fn = NULL;
761
762 if (drive->wcache) {
763 unsigned long long capacity;
764 int barrier;
765 /*
766 * We must avoid issuing commands a drive does not
767 * understand or we may crash it. We check flush cache
768 * is supported. We also check we have the LBA48 flush
769 * cache if the drive capacity is too large. By this
770 * time we have trimmed the drive capacity if LBA48 is
771 * not available so we don't need to recheck that.
772 */
773 capacity = idedisk_capacity(drive);
774 barrier = ide_id_has_flush_cache(id) &&
775 (drive->addressing == 0 || capacity <= (1ULL << 28) ||
776 ide_id_has_flush_cache_ext(id));
777
778 printk(KERN_INFO "%s: cache flushes %ssupported\n",
779 drive->name, barrier ? "" : "not");
780
781 if (barrier) {
782 ordered = QUEUE_ORDERED_DRAIN_FLUSH;
783 prep_fn = idedisk_prepare_flush;
784 issue_fn = idedisk_issue_flush;
785 }
786 } else
787 ordered = QUEUE_ORDERED_DRAIN;
788
789 blk_queue_ordered(drive->queue, ordered, prep_fn);
790 blk_queue_issue_flush_fn(drive->queue, issue_fn);
791}
792
797static int write_cache(ide_drive_t *drive, int arg) 793static int write_cache(ide_drive_t *drive, int arg)
798{ 794{
799 ide_task_t args; 795 ide_task_t args;
800 int err; 796 int err = 1;
801
802 if (!ide_id_has_flush_cache(drive->id))
803 return 1;
804 797
805 memset(&args, 0, sizeof(ide_task_t)); 798 if (ide_id_has_flush_cache(drive->id)) {
806 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? 799 memset(&args, 0, sizeof(ide_task_t));
800 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
807 SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; 801 SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
808 args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; 802 args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
809 args.command_type = IDE_DRIVE_TASK_NO_DATA; 803 args.command_type = IDE_DRIVE_TASK_NO_DATA;
810 args.handler = &task_no_data_intr; 804 args.handler = &task_no_data_intr;
805 err = ide_raw_taskfile(drive, &args, NULL);
806 if (err == 0)
807 drive->wcache = arg;
808 }
811 809
812 err = ide_raw_taskfile(drive, &args, NULL); 810 update_ordered(drive);
813 if (err)
814 return err;
815 811
816 drive->wcache = arg; 812 return err;
817 return 0;
818} 813}
819 814
820static int do_idedisk_flushcache (ide_drive_t *drive) 815static int do_idedisk_flushcache (ide_drive_t *drive)
@@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive)
888{ 883{
889 struct hd_driveid *id = drive->id; 884 struct hd_driveid *id = drive->id;
890 unsigned long long capacity; 885 unsigned long long capacity;
891 int barrier;
892 886
893 idedisk_add_settings(drive); 887 idedisk_add_settings(drive);
894 888
@@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive)
992 drive->wcache = 1; 986 drive->wcache = 1;
993 987
994 write_cache(drive, 1); 988 write_cache(drive, 1);
995
996 /*
997 * We must avoid issuing commands a drive does not understand
998 * or we may crash it. We check flush cache is supported. We also
999 * check we have the LBA48 flush cache if the drive capacity is
1000 * too large. By this time we have trimmed the drive capacity if
1001 * LBA48 is not available so we don't need to recheck that.
1002 */
1003 barrier = 0;
1004 if (ide_id_has_flush_cache(id))
1005 barrier = 1;
1006 if (drive->addressing == 1) {
1007 /* Can't issue the correct flush ? */
1008 if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id))
1009 barrier = 0;
1010 }
1011
1012 printk(KERN_INFO "%s: cache flushes %ssupported\n",
1013 drive->name, barrier ? "" : "not ");
1014 if (barrier) {
1015 blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH);
1016 drive->queue->prepare_flush_fn = idedisk_prepare_flush;
1017 drive->queue->end_flush_fn = idedisk_end_flush;
1018 blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush);
1019 }
1020} 989}
1021 990
1022static void ide_cacheflush_p(ide_drive_t *drive) 991static void ide_cacheflush_p(ide_drive_t *drive)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index ecfafcdafea4..b5dc6df8e67d 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -89,7 +89,7 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
89 89
90 blkdev_dequeue_request(rq); 90 blkdev_dequeue_request(rq);
91 HWGROUP(drive)->rq = NULL; 91 HWGROUP(drive)->rq = NULL;
92 end_that_request_last(rq); 92 end_that_request_last(rq, uptodate);
93 ret = 0; 93 ret = 0;
94 } 94 }
95 return ret; 95 return ret;
@@ -119,10 +119,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
119 if (!nr_sectors) 119 if (!nr_sectors)
120 nr_sectors = rq->hard_cur_sectors; 120 nr_sectors = rq->hard_cur_sectors;
121 121
122 if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors)) 122 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
123 ret = rq->nr_sectors != 0;
124 else
125 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
126 123
127 spin_unlock_irqrestore(&ide_lock, flags); 124 spin_unlock_irqrestore(&ide_lock, flags);
128 return ret; 125 return ret;
@@ -247,7 +244,7 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
247 } 244 }
248 blkdev_dequeue_request(rq); 245 blkdev_dequeue_request(rq);
249 HWGROUP(drive)->rq = NULL; 246 HWGROUP(drive)->rq = NULL;
250 end_that_request_last(rq); 247 end_that_request_last(rq, 1);
251 spin_unlock_irqrestore(&ide_lock, flags); 248 spin_unlock_irqrestore(&ide_lock, flags);
252} 249}
253 250
@@ -379,7 +376,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
379 blkdev_dequeue_request(rq); 376 blkdev_dequeue_request(rq);
380 HWGROUP(drive)->rq = NULL; 377 HWGROUP(drive)->rq = NULL;
381 rq->errors = err; 378 rq->errors = err;
382 end_that_request_last(rq); 379 end_that_request_last(rq, !rq->errors);
383 spin_unlock_irqrestore(&ide_lock, flags); 380 spin_unlock_irqrestore(&ide_lock, flags);
384} 381}
385 382
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index c5807d6e85cc..5b1febed3133 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -468,7 +468,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
468 468
469 spin_lock_irqsave(q->queue_lock, flags); 469 spin_lock_irqsave(q->queue_lock, flags);
470 470
471 end_that_request_last(req); 471 end_that_request_last(req, uptodate);
472 472
473 if (likely(dev)) { 473 if (likely(dev)) {
474 dev->open_queue_depth--; 474 dev->open_queue_depth--;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index abcf19116d70..8e380c14bf65 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -263,7 +263,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
263 */ 263 */
264 add_disk_randomness(req->rq_disk); 264 add_disk_randomness(req->rq_disk);
265 blkdev_dequeue_request(req); 265 blkdev_dequeue_request(req);
266 end_that_request_last(req); 266 end_that_request_last(req, 1);
267 } 267 }
268 spin_unlock_irq(&md->lock); 268 spin_unlock_irq(&md->lock);
269 } while (ret); 269 } while (ret);
@@ -289,7 +289,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
289 289
290 add_disk_randomness(req->rq_disk); 290 add_disk_randomness(req->rq_disk);
291 blkdev_dequeue_request(req); 291 blkdev_dequeue_request(req);
292 end_that_request_last(req); 292 end_that_request_last(req, 0);
293 spin_unlock_irq(&md->lock); 293 spin_unlock_irq(&md->lock);
294 294
295 return 0; 295 return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 041e1a621885..f779f674dfa0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1035,7 +1035,7 @@ dasd_end_request(struct request *req, int uptodate)
1035 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1035 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1036 BUG(); 1036 BUG();
1037 add_disk_randomness(req->rq_disk); 1037 add_disk_randomness(req->rq_disk);
1038 end_that_request_last(req); 1038 end_that_request_last(req, uptodate);
1039} 1039}
1040 1040
1041/* 1041/*
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 482e07e388c8..5ced2725d6c7 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -78,7 +78,7 @@ tapeblock_end_request(struct request *req, int uptodate)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
80 BUG(); 80 BUG();
81 end_that_request_last(req); 81 end_that_request_last(req, uptodate);
82} 82}
83 83
84static void 84static void
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 887eaa2a3ebf..d113290b5fc0 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -214,7 +214,6 @@ static struct scsi_host_template ahci_sht = {
214 .dma_boundary = AHCI_DMA_BOUNDARY, 214 .dma_boundary = AHCI_DMA_BOUNDARY,
215 .slave_configure = ata_scsi_slave_config, 215 .slave_configure = ata_scsi_slave_config,
216 .bios_param = ata_std_bios_param, 216 .bios_param = ata_std_bios_param,
217 .ordered_flush = 1,
218}; 217};
219 218
220static const struct ata_port_operations ahci_ops = { 219static const struct ata_port_operations ahci_ops = {
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index f79630340028..557788ec4eec 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -187,7 +187,6 @@ static struct scsi_host_template piix_sht = {
187 .dma_boundary = ATA_DMA_BOUNDARY, 187 .dma_boundary = ATA_DMA_BOUNDARY,
188 .slave_configure = ata_scsi_slave_config, 188 .slave_configure = ata_scsi_slave_config,
189 .bios_param = ata_std_bios_param, 189 .bios_param = ata_std_bios_param,
190 .ordered_flush = 1,
191 .resume = ata_scsi_device_resume, 190 .resume = ata_scsi_device_resume,
192 .suspend = ata_scsi_device_suspend, 191 .suspend = ata_scsi_device_suspend,
193}; 192};
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 5b9c2c5a7f0e..66783c860a19 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -347,17 +347,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
347 shost->cmd_per_lun = sht->cmd_per_lun; 347 shost->cmd_per_lun = sht->cmd_per_lun;
348 shost->unchecked_isa_dma = sht->unchecked_isa_dma; 348 shost->unchecked_isa_dma = sht->unchecked_isa_dma;
349 shost->use_clustering = sht->use_clustering; 349 shost->use_clustering = sht->use_clustering;
350 shost->ordered_flush = sht->ordered_flush;
351 shost->ordered_tag = sht->ordered_tag; 350 shost->ordered_tag = sht->ordered_tag;
352 351
353 /*
354 * hosts/devices that do queueing must support ordered tags
355 */
356 if (shost->can_queue > 1 && shost->ordered_flush) {
357 printk(KERN_ERR "scsi: ordered flushes don't support queueing\n");
358 shost->ordered_flush = 0;
359 }
360
361 if (sht->max_host_blocked) 352 if (sht->max_host_blocked)
362 shost->max_host_blocked = sht->max_host_blocked; 353 shost->max_host_blocked = sht->max_host_blocked;
363 else 354 else
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 4cb1f3ed9100..3c688ef54660 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -1046,7 +1046,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
1046 1046
1047 /* kill current request */ 1047 /* kill current request */
1048 blkdev_dequeue_request(req); 1048 blkdev_dequeue_request(req);
1049 end_that_request_last(req); 1049 end_that_request_last(req, 0);
1050 if (req->flags & REQ_SENSE) 1050 if (req->flags & REQ_SENSE)
1051 kfree(scsi->pc->buffer); 1051 kfree(scsi->pc->buffer);
1052 kfree(scsi->pc); 1052 kfree(scsi->pc);
@@ -1056,7 +1056,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
1056 /* now nuke the drive queue */ 1056 /* now nuke the drive queue */
1057 while ((req = elv_next_request(drive->queue))) { 1057 while ((req = elv_next_request(drive->queue))) {
1058 blkdev_dequeue_request(req); 1058 blkdev_dequeue_request(req);
1059 end_that_request_last(req); 1059 end_that_request_last(req, 0);
1060 } 1060 }
1061 1061
1062 HWGROUP(drive)->rq = NULL; 1062 HWGROUP(drive)->rq = NULL;
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 9c66d4059399..f55b9b3f7b37 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -562,16 +562,28 @@ static const u8 ata_rw_cmds[] = {
562 ATA_CMD_WRITE_MULTI, 562 ATA_CMD_WRITE_MULTI,
563 ATA_CMD_READ_MULTI_EXT, 563 ATA_CMD_READ_MULTI_EXT,
564 ATA_CMD_WRITE_MULTI_EXT, 564 ATA_CMD_WRITE_MULTI_EXT,
565 0,
566 0,
567 0,
568 ATA_CMD_WRITE_MULTI_FUA_EXT,
565 /* pio */ 569 /* pio */
566 ATA_CMD_PIO_READ, 570 ATA_CMD_PIO_READ,
567 ATA_CMD_PIO_WRITE, 571 ATA_CMD_PIO_WRITE,
568 ATA_CMD_PIO_READ_EXT, 572 ATA_CMD_PIO_READ_EXT,
569 ATA_CMD_PIO_WRITE_EXT, 573 ATA_CMD_PIO_WRITE_EXT,
574 0,
575 0,
576 0,
577 0,
570 /* dma */ 578 /* dma */
571 ATA_CMD_READ, 579 ATA_CMD_READ,
572 ATA_CMD_WRITE, 580 ATA_CMD_WRITE,
573 ATA_CMD_READ_EXT, 581 ATA_CMD_READ_EXT,
574 ATA_CMD_WRITE_EXT 582 ATA_CMD_WRITE_EXT,
583 0,
584 0,
585 0,
586 ATA_CMD_WRITE_FUA_EXT
575}; 587};
576 588
577/** 589/**
@@ -584,25 +596,32 @@ static const u8 ata_rw_cmds[] = {
584 * LOCKING: 596 * LOCKING:
585 * caller. 597 * caller.
586 */ 598 */
587void ata_rwcmd_protocol(struct ata_queued_cmd *qc) 599int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
588{ 600{
589 struct ata_taskfile *tf = &qc->tf; 601 struct ata_taskfile *tf = &qc->tf;
590 struct ata_device *dev = qc->dev; 602 struct ata_device *dev = qc->dev;
603 u8 cmd;
591 604
592 int index, lba48, write; 605 int index, fua, lba48, write;
593 606
607 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
594 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 608 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
595 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 609 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
596 610
597 if (dev->flags & ATA_DFLAG_PIO) { 611 if (dev->flags & ATA_DFLAG_PIO) {
598 tf->protocol = ATA_PROT_PIO; 612 tf->protocol = ATA_PROT_PIO;
599 index = dev->multi_count ? 0 : 4; 613 index = dev->multi_count ? 0 : 8;
600 } else { 614 } else {
601 tf->protocol = ATA_PROT_DMA; 615 tf->protocol = ATA_PROT_DMA;
602 index = 8; 616 index = 16;
603 } 617 }
604 618
605 tf->command = ata_rw_cmds[index + lba48 + write]; 619 cmd = ata_rw_cmds[index + fua + lba48 + write];
620 if (cmd) {
621 tf->command = cmd;
622 return 0;
623 }
624 return -1;
606} 625}
607 626
608static const char * const xfer_mode_str[] = { 627static const char * const xfer_mode_str[] = {
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index c1ebede14a48..cfbceb504718 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -1096,11 +1096,13 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1096 scsicmd[0] == WRITE_16) 1096 scsicmd[0] == WRITE_16)
1097 tf->flags |= ATA_TFLAG_WRITE; 1097 tf->flags |= ATA_TFLAG_WRITE;
1098 1098
1099 /* Calculate the SCSI LBA and transfer length. */ 1099 /* Calculate the SCSI LBA, transfer length and FUA. */
1100 switch (scsicmd[0]) { 1100 switch (scsicmd[0]) {
1101 case READ_10: 1101 case READ_10:
1102 case WRITE_10: 1102 case WRITE_10:
1103 scsi_10_lba_len(scsicmd, &block, &n_block); 1103 scsi_10_lba_len(scsicmd, &block, &n_block);
1104 if (unlikely(scsicmd[1] & (1 << 3)))
1105 tf->flags |= ATA_TFLAG_FUA;
1104 break; 1106 break;
1105 case READ_6: 1107 case READ_6:
1106 case WRITE_6: 1108 case WRITE_6:
@@ -1115,6 +1117,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1115 case READ_16: 1117 case READ_16:
1116 case WRITE_16: 1118 case WRITE_16:
1117 scsi_16_lba_len(scsicmd, &block, &n_block); 1119 scsi_16_lba_len(scsicmd, &block, &n_block);
1120 if (unlikely(scsicmd[1] & (1 << 3)))
1121 tf->flags |= ATA_TFLAG_FUA;
1118 break; 1122 break;
1119 default: 1123 default:
1120 DPRINTK("no-byte command\n"); 1124 DPRINTK("no-byte command\n");
@@ -1158,7 +1162,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1158 tf->device |= (block >> 24) & 0xf; 1162 tf->device |= (block >> 24) & 0xf;
1159 } 1163 }
1160 1164
1161 ata_rwcmd_protocol(qc); 1165 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld;
1162 1167
1163 qc->nsect = n_block; 1168 qc->nsect = n_block;
1164 tf->nsect = n_block & 0xff; 1169 tf->nsect = n_block & 0xff;
@@ -1176,7 +1181,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1176 if ((block >> 28) || (n_block > 256)) 1181 if ((block >> 28) || (n_block > 256))
1177 goto out_of_range; 1182 goto out_of_range;
1178 1183
1179 ata_rwcmd_protocol(qc); 1184 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1185 goto invalid_fld;
1180 1186
1181 /* Convert LBA to CHS */ 1187 /* Convert LBA to CHS */
1182 track = (u32)block / dev->sectors; 1188 track = (u32)block / dev->sectors;
@@ -1711,6 +1717,7 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1711unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, 1717unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1712 unsigned int buflen) 1718 unsigned int buflen)
1713{ 1719{
1720 struct ata_device *dev = args->dev;
1714 u8 *scsicmd = args->cmd->cmnd, *p, *last; 1721 u8 *scsicmd = args->cmd->cmnd, *p, *last;
1715 const u8 sat_blk_desc[] = { 1722 const u8 sat_blk_desc[] = {
1716 0, 0, 0, 0, /* number of blocks: sat unspecified */ 1723 0, 0, 0, 0, /* number of blocks: sat unspecified */
@@ -1719,6 +1726,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1719 }; 1726 };
1720 u8 pg, spg; 1727 u8 pg, spg;
1721 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; 1728 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
1729 u8 dpofua;
1722 1730
1723 VPRINTK("ENTER\n"); 1731 VPRINTK("ENTER\n");
1724 1732
@@ -1787,9 +1795,17 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1787 1795
1788 if (minlen < 1) 1796 if (minlen < 1)
1789 return 0; 1797 return 0;
1798
1799 dpofua = 0;
1800 if (ata_id_has_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
1801 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
1802 dpofua = 1 << 4;
1803
1790 if (six_byte) { 1804 if (six_byte) {
1791 output_len--; 1805 output_len--;
1792 rbuf[0] = output_len; 1806 rbuf[0] = output_len;
1807 if (minlen > 2)
1808 rbuf[2] |= dpofua;
1793 if (ebd) { 1809 if (ebd) {
1794 if (minlen > 3) 1810 if (minlen > 3)
1795 rbuf[3] = sizeof(sat_blk_desc); 1811 rbuf[3] = sizeof(sat_blk_desc);
@@ -1802,6 +1818,8 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1802 rbuf[0] = output_len >> 8; 1818 rbuf[0] = output_len >> 8;
1803 if (minlen > 1) 1819 if (minlen > 1)
1804 rbuf[1] = output_len; 1820 rbuf[1] = output_len;
1821 if (minlen > 3)
1822 rbuf[3] |= dpofua;
1805 if (ebd) { 1823 if (ebd) {
1806 if (minlen > 7) 1824 if (minlen > 7)
1807 rbuf[7] = sizeof(sat_blk_desc); 1825 rbuf[7] = sizeof(sat_blk_desc);
@@ -2462,7 +2480,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2462 if (xlat_func) 2480 if (xlat_func)
2463 ata_scsi_translate(ap, dev, cmd, done, xlat_func); 2481 ata_scsi_translate(ap, dev, cmd, done, xlat_func);
2464 else 2482 else
2465 ata_scsi_simulate(dev->id, cmd, done); 2483 ata_scsi_simulate(ap, dev, cmd, done);
2466 } else 2484 } else
2467 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); 2485 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat);
2468 2486
@@ -2485,14 +2503,16 @@ out_unlock:
2485 * spin_lock_irqsave(host_set lock) 2503 * spin_lock_irqsave(host_set lock)
2486 */ 2504 */
2487 2505
2488void ata_scsi_simulate(u16 *id, 2506void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
2489 struct scsi_cmnd *cmd, 2507 struct scsi_cmnd *cmd,
2490 void (*done)(struct scsi_cmnd *)) 2508 void (*done)(struct scsi_cmnd *))
2491{ 2509{
2492 struct ata_scsi_args args; 2510 struct ata_scsi_args args;
2493 const u8 *scsicmd = cmd->cmnd; 2511 const u8 *scsicmd = cmd->cmnd;
2494 2512
2495 args.id = id; 2513 args.ap = ap;
2514 args.dev = dev;
2515 args.id = dev->id;
2496 args.cmd = cmd; 2516 args.cmd = cmd;
2497 args.done = done; 2517 args.done = done;
2498 2518
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 251e53bdc6e0..e03ce48b7b4b 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -32,6 +32,8 @@
32#define DRV_VERSION "1.20" /* must be exactly four chars */ 32#define DRV_VERSION "1.20" /* must be exactly four chars */
33 33
34struct ata_scsi_args { 34struct ata_scsi_args {
35 struct ata_port *ap;
36 struct ata_device *dev;
35 u16 *id; 37 u16 *id;
36 struct scsi_cmnd *cmd; 38 struct scsi_cmnd *cmd;
37 void (*done)(struct scsi_cmnd *); 39 void (*done)(struct scsi_cmnd *);
@@ -41,7 +43,7 @@ struct ata_scsi_args {
41extern int atapi_enabled; 43extern int atapi_enabled;
42extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 44extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
43 struct ata_device *dev); 45 struct ata_device *dev);
44extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc); 46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
45extern void ata_qc_free(struct ata_queued_cmd *qc); 47extern void ata_qc_free(struct ata_queued_cmd *qc);
46extern int ata_qc_issue(struct ata_queued_cmd *qc); 48extern int ata_qc_issue(struct ata_queued_cmd *qc);
47extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 49extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index b2bf16a9bf4b..cd54244058b5 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -374,7 +374,6 @@ static struct scsi_host_template mv_sht = {
374 .dma_boundary = MV_DMA_BOUNDARY, 374 .dma_boundary = MV_DMA_BOUNDARY,
375 .slave_configure = ata_scsi_slave_config, 375 .slave_configure = ata_scsi_slave_config,
376 .bios_param = ata_std_bios_param, 376 .bios_param = ata_std_bios_param,
377 .ordered_flush = 1,
378}; 377};
379 378
380static const struct ata_port_operations mv5_ops = { 379static const struct ata_port_operations mv5_ops = {
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 4954896dfdb9..c0cf52cb975a 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -235,7 +235,6 @@ static struct scsi_host_template nv_sht = {
235 .dma_boundary = ATA_DMA_BOUNDARY, 235 .dma_boundary = ATA_DMA_BOUNDARY,
236 .slave_configure = ata_scsi_slave_config, 236 .slave_configure = ata_scsi_slave_config,
237 .bios_param = ata_std_bios_param, 237 .bios_param = ata_std_bios_param,
238 .ordered_flush = 1,
239}; 238};
240 239
241static const struct ata_port_operations nv_ops = { 240static const struct ata_port_operations nv_ops = {
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index da7fa04b8a73..3d1ea09a06a1 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -114,7 +114,6 @@ static struct scsi_host_template pdc_ata_sht = {
114 .dma_boundary = ATA_DMA_BOUNDARY, 114 .dma_boundary = ATA_DMA_BOUNDARY,
115 .slave_configure = ata_scsi_slave_config, 115 .slave_configure = ata_scsi_slave_config,
116 .bios_param = ata_std_bios_param, 116 .bios_param = ata_std_bios_param,
117 .ordered_flush = 1,
118}; 117};
119 118
120static const struct ata_port_operations pdc_sata_ops = { 119static const struct ata_port_operations pdc_sata_ops = {
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index d2053487c73b..b017f85e6d6a 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -147,7 +147,6 @@ static struct scsi_host_template sil_sht = {
147 .dma_boundary = ATA_DMA_BOUNDARY, 147 .dma_boundary = ATA_DMA_BOUNDARY,
148 .slave_configure = ata_scsi_slave_config, 148 .slave_configure = ata_scsi_slave_config,
149 .bios_param = ata_std_bios_param, 149 .bios_param = ata_std_bios_param,
150 .ordered_flush = 1,
151}; 150};
152 151
153static const struct ata_port_operations sil_ops = { 152static const struct ata_port_operations sil_ops = {
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index a0ad3ed2200a..923130185a9e 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -292,7 +292,6 @@ static struct scsi_host_template sil24_sht = {
292 .dma_boundary = ATA_DMA_BOUNDARY, 292 .dma_boundary = ATA_DMA_BOUNDARY,
293 .slave_configure = ata_scsi_slave_config, 293 .slave_configure = ata_scsi_slave_config,
294 .bios_param = ata_std_bios_param, 294 .bios_param = ata_std_bios_param,
295 .ordered_flush = 1, /* NCQ not supported yet */
296}; 295};
297 296
298static const struct ata_port_operations sil24_ops = { 297static const struct ata_port_operations sil24_ops = {
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 32e12620b162..2df8c5632ac3 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -99,7 +99,6 @@ static struct scsi_host_template sis_sht = {
99 .dma_boundary = ATA_DMA_BOUNDARY, 99 .dma_boundary = ATA_DMA_BOUNDARY,
100 .slave_configure = ata_scsi_slave_config, 100 .slave_configure = ata_scsi_slave_config,
101 .bios_param = ata_std_bios_param, 101 .bios_param = ata_std_bios_param,
102 .ordered_flush = 1,
103}; 102};
104 103
105static const struct ata_port_operations sis_ops = { 104static const struct ata_port_operations sis_ops = {
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 6e7f7c83a75a..668373590aa4 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -303,7 +303,6 @@ static struct scsi_host_template k2_sata_sht = {
303 .proc_info = k2_sata_proc_info, 303 .proc_info = k2_sata_proc_info,
304#endif 304#endif
305 .bios_param = ata_std_bios_param, 305 .bios_param = ata_std_bios_param,
306 .ordered_flush = 1,
307}; 306};
308 307
309 308
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 94b253b80da8..bc87c16c80d2 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -194,7 +194,6 @@ static struct scsi_host_template pdc_sata_sht = {
194 .dma_boundary = ATA_DMA_BOUNDARY, 194 .dma_boundary = ATA_DMA_BOUNDARY,
195 .slave_configure = ata_scsi_slave_config, 195 .slave_configure = ata_scsi_slave_config,
196 .bios_param = ata_std_bios_param, 196 .bios_param = ata_std_bios_param,
197 .ordered_flush = 1,
198}; 197};
199 198
200static const struct ata_port_operations pdc_20621_ops = { 199static const struct ata_port_operations pdc_20621_ops = {
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index b2422a0f25c8..9635ca700977 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -87,7 +87,6 @@ static struct scsi_host_template uli_sht = {
87 .dma_boundary = ATA_DMA_BOUNDARY, 87 .dma_boundary = ATA_DMA_BOUNDARY,
88 .slave_configure = ata_scsi_slave_config, 88 .slave_configure = ata_scsi_slave_config,
89 .bios_param = ata_std_bios_param, 89 .bios_param = ata_std_bios_param,
90 .ordered_flush = 1,
91}; 90};
92 91
93static const struct ata_port_operations uli_ops = { 92static const struct ata_port_operations uli_ops = {
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index c76215692da2..6d5b0a794cfd 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -106,7 +106,6 @@ static struct scsi_host_template svia_sht = {
106 .dma_boundary = ATA_DMA_BOUNDARY, 106 .dma_boundary = ATA_DMA_BOUNDARY,
107 .slave_configure = ata_scsi_slave_config, 107 .slave_configure = ata_scsi_slave_config,
108 .bios_param = ata_std_bios_param, 108 .bios_param = ata_std_bios_param,
109 .ordered_flush = 1,
110}; 109};
111 110
112static const struct ata_port_operations svia_sata_ops = { 111static const struct ata_port_operations svia_sata_ops = {
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index fcfa486965b4..2e2c3b7acb0c 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -235,7 +235,6 @@ static struct scsi_host_template vsc_sata_sht = {
235 .dma_boundary = ATA_DMA_BOUNDARY, 235 .dma_boundary = ATA_DMA_BOUNDARY,
236 .slave_configure = ata_scsi_slave_config, 236 .slave_configure = ata_scsi_slave_config,
237 .bios_param = ata_std_bios_param, 237 .bios_param = ata_std_bios_param,
238 .ordered_flush = 1,
239}; 238};
240 239
241 240
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a7f3f0c84db7..ba93d6e66d48 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -308,7 +308,7 @@ struct scsi_io_context {
308 308
309static kmem_cache_t *scsi_io_context_cache; 309static kmem_cache_t *scsi_io_context_cache;
310 310
311static void scsi_end_async(struct request *req) 311static void scsi_end_async(struct request *req, int uptodate)
312{ 312{
313 struct scsi_io_context *sioc = req->end_io_data; 313 struct scsi_io_context *sioc = req->end_io_data;
314 314
@@ -791,7 +791,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
791 spin_lock_irqsave(q->queue_lock, flags); 791 spin_lock_irqsave(q->queue_lock, flags);
792 if (blk_rq_tagged(req)) 792 if (blk_rq_tagged(req))
793 blk_queue_end_tag(q, req); 793 blk_queue_end_tag(q, req);
794 end_that_request_last(req); 794 end_that_request_last(req, uptodate);
795 spin_unlock_irqrestore(q->queue_lock, flags); 795 spin_unlock_irqrestore(q->queue_lock, flags);
796 796
797 /* 797 /*
@@ -932,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
932 int sense_valid = 0; 932 int sense_valid = 0;
933 int sense_deferred = 0; 933 int sense_deferred = 0;
934 934
935 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
936 return;
937
938 /* 935 /*
939 * Free up any indirection buffers we allocated for DMA purposes. 936 * Free up any indirection buffers we allocated for DMA purposes.
940 * For the case of a READ, we need to copy the data out of the 937 * For the case of a READ, we need to copy the data out of the
@@ -1199,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1199 return BLKPREP_KILL; 1196 return BLKPREP_KILL;
1200} 1197}
1201 1198
1202static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1203{
1204 struct scsi_device *sdev = q->queuedata;
1205 struct scsi_driver *drv;
1206
1207 if (sdev->sdev_state == SDEV_RUNNING) {
1208 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1209
1210 if (drv->prepare_flush)
1211 return drv->prepare_flush(q, rq);
1212 }
1213
1214 return 0;
1215}
1216
1217static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1218{
1219 struct scsi_device *sdev = q->queuedata;
1220 struct request *flush_rq = rq->end_io_data;
1221 struct scsi_driver *drv;
1222
1223 if (flush_rq->errors) {
1224 printk("scsi: barrier error, disabling flush support\n");
1225 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1226 }
1227
1228 if (sdev->sdev_state == SDEV_RUNNING) {
1229 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1230 drv->end_flush(q, rq);
1231 }
1232}
1233
1234static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 1199static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1235 sector_t *error_sector) 1200 sector_t *error_sector)
1236{ 1201{
@@ -1703,17 +1668,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1703 blk_queue_segment_boundary(q, shost->dma_boundary); 1668 blk_queue_segment_boundary(q, shost->dma_boundary);
1704 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); 1669 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1705 1670
1706 /*
1707 * ordered tags are superior to flush ordering
1708 */
1709 if (shost->ordered_tag)
1710 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1711 else if (shost->ordered_flush) {
1712 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1713 q->prepare_flush_fn = scsi_prepare_flush_fn;
1714 q->end_flush_fn = scsi_end_flush_fn;
1715 }
1716
1717 if (!shost->use_clustering) 1671 if (!shost->use_clustering)
1718 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1672 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1719 return q; 1673 return q;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3d3ad7d1b779..32d4d8d7b9f3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -102,6 +102,7 @@ struct scsi_disk {
102 u8 write_prot; 102 u8 write_prot;
103 unsigned WCE : 1; /* state of disk WCE bit */ 103 unsigned WCE : 1; /* state of disk WCE bit */
104 unsigned RCD : 1; /* state of disk RCD bit, unused */ 104 unsigned RCD : 1; /* state of disk RCD bit, unused */
105 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
105}; 106};
106 107
107static DEFINE_IDR(sd_index_idr); 108static DEFINE_IDR(sd_index_idr);
@@ -121,8 +122,7 @@ static void sd_shutdown(struct device *dev);
121static void sd_rescan(struct device *); 122static void sd_rescan(struct device *);
122static int sd_init_command(struct scsi_cmnd *); 123static int sd_init_command(struct scsi_cmnd *);
123static int sd_issue_flush(struct device *, sector_t *); 124static int sd_issue_flush(struct device *, sector_t *);
124static void sd_end_flush(request_queue_t *, struct request *); 125static void sd_prepare_flush(request_queue_t *, struct request *);
125static int sd_prepare_flush(request_queue_t *, struct request *);
126static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 126static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
127 unsigned char *buffer); 127 unsigned char *buffer);
128 128
@@ -137,8 +137,6 @@ static struct scsi_driver sd_template = {
137 .rescan = sd_rescan, 137 .rescan = sd_rescan,
138 .init_command = sd_init_command, 138 .init_command = sd_init_command,
139 .issue_flush = sd_issue_flush, 139 .issue_flush = sd_issue_flush,
140 .prepare_flush = sd_prepare_flush,
141 .end_flush = sd_end_flush,
142}; 140};
143 141
144/* 142/*
@@ -346,6 +344,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
346 344
347 if (block > 0xffffffff) { 345 if (block > 0xffffffff) {
348 SCpnt->cmnd[0] += READ_16 - READ_6; 346 SCpnt->cmnd[0] += READ_16 - READ_6;
347 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
349 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; 348 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
350 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; 349 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
351 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; 350 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
@@ -365,6 +364,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
365 this_count = 0xffff; 364 this_count = 0xffff;
366 365
367 SCpnt->cmnd[0] += READ_10 - READ_6; 366 SCpnt->cmnd[0] += READ_10 - READ_6;
367 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
368 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 368 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
369 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; 369 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
370 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; 370 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
@@ -373,6 +373,17 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
373 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; 373 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
374 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; 374 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
375 } else { 375 } else {
376 if (unlikely(blk_fua_rq(rq))) {
377 /*
378 * This happens only if this drive failed
379 * 10byte rw command with ILLEGAL_REQUEST
380 * during operation and thus turned off
381 * use_10_for_rw.
382 */
383 printk(KERN_ERR "sd: FUA write on READ/WRITE(6) drive\n");
384 return 0;
385 }
386
376 SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); 387 SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
377 SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); 388 SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
378 SCpnt->cmnd[3] = (unsigned char) block & 0xff; 389 SCpnt->cmnd[3] = (unsigned char) block & 0xff;
@@ -729,42 +740,13 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
729 return ret; 740 return ret;
730} 741}
731 742
732static void sd_end_flush(request_queue_t *q, struct request *flush_rq) 743static void sd_prepare_flush(request_queue_t *q, struct request *rq)
733{
734 struct request *rq = flush_rq->end_io_data;
735 struct scsi_cmnd *cmd = rq->special;
736 unsigned int bytes = rq->hard_nr_sectors << 9;
737
738 if (!flush_rq->errors) {
739 spin_unlock(q->queue_lock);
740 scsi_io_completion(cmd, bytes, 0);
741 spin_lock(q->queue_lock);
742 } else if (blk_barrier_postflush(rq)) {
743 spin_unlock(q->queue_lock);
744 scsi_io_completion(cmd, 0, bytes);
745 spin_lock(q->queue_lock);
746 } else {
747 /*
748 * force journal abort of barriers
749 */
750 end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors);
751 end_that_request_last(rq);
752 }
753}
754
755static int sd_prepare_flush(request_queue_t *q, struct request *rq)
756{ 744{
757 struct scsi_device *sdev = q->queuedata;
758 struct scsi_disk *sdkp = dev_get_drvdata(&sdev->sdev_gendev);
759
760 if (!sdkp || !sdkp->WCE)
761 return 0;
762
763 memset(rq->cmd, 0, sizeof(rq->cmd)); 745 memset(rq->cmd, 0, sizeof(rq->cmd));
764 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; 746 rq->flags |= REQ_BLOCK_PC;
765 rq->timeout = SD_TIMEOUT; 747 rq->timeout = SD_TIMEOUT;
766 rq->cmd[0] = SYNCHRONIZE_CACHE; 748 rq->cmd[0] = SYNCHRONIZE_CACHE;
767 return 1; 749 rq->cmd_len = 10;
768} 750}
769 751
770static void sd_rescan(struct device *dev) 752static void sd_rescan(struct device *dev)
@@ -1427,10 +1409,18 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1427 sdkp->RCD = 0; 1409 sdkp->RCD = 0;
1428 } 1410 }
1429 1411
1412 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
1413 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
1414 printk(KERN_NOTICE "SCSI device %s: uses "
1415 "READ/WRITE(6), disabling FUA\n", diskname);
1416 sdkp->DPOFUA = 0;
1417 }
1418
1430 ct = sdkp->RCD + 2*sdkp->WCE; 1419 ct = sdkp->RCD + 2*sdkp->WCE;
1431 1420
1432 printk(KERN_NOTICE "SCSI device %s: drive cache: %s\n", 1421 printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n",
1433 diskname, types[ct]); 1422 diskname, types[ct],
1423 sdkp->DPOFUA ? " w/ FUA" : "");
1434 1424
1435 return; 1425 return;
1436 } 1426 }
@@ -1462,6 +1452,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1462 struct scsi_disk *sdkp = scsi_disk(disk); 1452 struct scsi_disk *sdkp = scsi_disk(disk);
1463 struct scsi_device *sdp = sdkp->device; 1453 struct scsi_device *sdp = sdkp->device;
1464 unsigned char *buffer; 1454 unsigned char *buffer;
1455 unsigned ordered;
1465 1456
1466 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); 1457 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));
1467 1458
@@ -1498,7 +1489,21 @@ static int sd_revalidate_disk(struct gendisk *disk)
1498 sd_read_write_protect_flag(sdkp, disk->disk_name, buffer); 1489 sd_read_write_protect_flag(sdkp, disk->disk_name, buffer);
1499 sd_read_cache_type(sdkp, disk->disk_name, buffer); 1490 sd_read_cache_type(sdkp, disk->disk_name, buffer);
1500 } 1491 }
1501 1492
1493 /*
1494 * We now have all cache related info, determine how we deal
1495 * with ordered requests. Note that as the current SCSI
1496 * dispatch function can alter request order, we cannot use
1497 * QUEUE_ORDERED_TAG_* even when ordered tag is supported.
1498 */
1499 if (sdkp->WCE)
1500 ordered = sdkp->DPOFUA
1501 ? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH;
1502 else
1503 ordered = QUEUE_ORDERED_DRAIN;
1504
1505 blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush);
1506
1502 set_capacity(disk, sdkp->capacity); 1507 set_capacity(disk, sdkp->capacity);
1503 kfree(buffer); 1508 kfree(buffer);
1504 1509
@@ -1598,6 +1603,7 @@ static int sd_probe(struct device *dev)
1598 strcpy(gd->devfs_name, sdp->devfs_name); 1603 strcpy(gd->devfs_name, sdp->devfs_name);
1599 1604
1600 gd->private_data = &sdkp->driver; 1605 gd->private_data = &sdkp->driver;
1606 gd->queue = sdkp->device->request_queue;
1601 1607
1602 sd_revalidate_disk(gd); 1608 sd_revalidate_disk(gd);
1603 1609
@@ -1605,7 +1611,6 @@ static int sd_probe(struct device *dev)
1605 gd->flags = GENHD_FL_DRIVERFS; 1611 gd->flags = GENHD_FL_DRIVERFS;
1606 if (sdp->removable) 1612 if (sdp->removable)
1607 gd->flags |= GENHD_FL_REMOVABLE; 1613 gd->flags |= GENHD_FL_REMOVABLE;
1608 gd->queue = sdkp->device->request_queue;
1609 1614
1610 dev_set_drvdata(dev, sdkp); 1615 dev_set_drvdata(dev, sdkp);
1611 add_disk(gd); 1616 add_disk(gd);
diff --git a/fs/bio.c b/fs/bio.c
index 38d3e8023a07..dfe242a21eb4 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -325,10 +325,31 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
325 if (unlikely(bio_flagged(bio, BIO_CLONED))) 325 if (unlikely(bio_flagged(bio, BIO_CLONED)))
326 return 0; 326 return 0;
327 327
328 if (bio->bi_vcnt >= bio->bi_max_vecs) 328 if (((bio->bi_size + len) >> 9) > max_sectors)
329 return 0; 329 return 0;
330 330
331 if (((bio->bi_size + len) >> 9) > max_sectors) 331 /*
332 * For filesystems with a blocksize smaller than the pagesize
333 * we will often be called with the same page as last time and
334 * a consecutive offset. Optimize this special case.
335 */
336 if (bio->bi_vcnt > 0) {
337 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
338
339 if (page == prev->bv_page &&
340 offset == prev->bv_offset + prev->bv_len) {
341 prev->bv_len += len;
342 if (q->merge_bvec_fn &&
343 q->merge_bvec_fn(q, bio, prev) < len) {
344 prev->bv_len -= len;
345 return 0;
346 }
347
348 goto done;
349 }
350 }
351
352 if (bio->bi_vcnt >= bio->bi_max_vecs)
332 return 0; 353 return 0;
333 354
334 /* 355 /*
@@ -382,6 +403,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
382 bio->bi_vcnt++; 403 bio->bi_vcnt++;
383 bio->bi_phys_segments++; 404 bio->bi_phys_segments++;
384 bio->bi_hw_segments++; 405 bio->bi_hw_segments++;
406 done:
385 bio->bi_size += len; 407 bio->bi_size += len;
386 return len; 408 return len;
387} 409}
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 3eb80c391b39..94f77cce27fa 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -129,6 +129,7 @@ enum {
129 ATA_CMD_READ_EXT = 0x25, 129 ATA_CMD_READ_EXT = 0x25,
130 ATA_CMD_WRITE = 0xCA, 130 ATA_CMD_WRITE = 0xCA,
131 ATA_CMD_WRITE_EXT = 0x35, 131 ATA_CMD_WRITE_EXT = 0x35,
132 ATA_CMD_WRITE_FUA_EXT = 0x3D,
132 ATA_CMD_PIO_READ = 0x20, 133 ATA_CMD_PIO_READ = 0x20,
133 ATA_CMD_PIO_READ_EXT = 0x24, 134 ATA_CMD_PIO_READ_EXT = 0x24,
134 ATA_CMD_PIO_WRITE = 0x30, 135 ATA_CMD_PIO_WRITE = 0x30,
@@ -137,6 +138,7 @@ enum {
137 ATA_CMD_READ_MULTI_EXT = 0x29, 138 ATA_CMD_READ_MULTI_EXT = 0x29,
138 ATA_CMD_WRITE_MULTI = 0xC5, 139 ATA_CMD_WRITE_MULTI = 0xC5,
139 ATA_CMD_WRITE_MULTI_EXT = 0x39, 140 ATA_CMD_WRITE_MULTI_EXT = 0x39,
141 ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE,
140 ATA_CMD_SET_FEATURES = 0xEF, 142 ATA_CMD_SET_FEATURES = 0xEF,
141 ATA_CMD_PACKET = 0xA0, 143 ATA_CMD_PACKET = 0xA0,
142 ATA_CMD_VERIFY = 0x40, 144 ATA_CMD_VERIFY = 0x40,
@@ -194,6 +196,7 @@ enum {
194 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ 196 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
195 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 197 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
196 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ 198 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
199 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
197}; 200};
198 201
199enum ata_tf_protocols { 202enum ata_tf_protocols {
@@ -247,7 +250,8 @@ struct ata_taskfile {
247#define ata_id_is_sata(id) ((id)[93] == 0) 250#define ata_id_is_sata(id) ((id)[93] == 0)
248#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6)) 251#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
249#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5)) 252#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
250#define ata_id_has_flush(id) ((id)[83] & (1 << 12)) 253#define ata_id_has_fua(id) ((id)[84] & (1 << 6))
254#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
251#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13)) 255#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
252#define ata_id_has_lba48(id) ((id)[83] & (1 << 10)) 256#define ata_id_has_lba48(id) ((id)[83] & (1 << 10))
253#define ata_id_has_wcache(id) ((id)[82] & (1 << 5)) 257#define ata_id_has_wcache(id) ((id)[82] & (1 << 5))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a18500d196e1..fb0985377421 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -102,7 +102,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
102void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); 102void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
103 103
104struct request; 104struct request;
105typedef void (rq_end_io_fn)(struct request *); 105typedef void (rq_end_io_fn)(struct request *, int);
106 106
107struct request_list { 107struct request_list {
108 int count[2]; 108 int count[2];
@@ -207,6 +207,7 @@ enum rq_flag_bits {
207 __REQ_SORTED, /* elevator knows about this request */ 207 __REQ_SORTED, /* elevator knows about this request */
208 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 208 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
209 __REQ_HARDBARRIER, /* may not be passed by drive either */ 209 __REQ_HARDBARRIER, /* may not be passed by drive either */
210 __REQ_FUA, /* forced unit access */
210 __REQ_CMD, /* is a regular fs rw request */ 211 __REQ_CMD, /* is a regular fs rw request */
211 __REQ_NOMERGE, /* don't touch this for merging */ 212 __REQ_NOMERGE, /* don't touch this for merging */
212 __REQ_STARTED, /* drive already may have started this one */ 213 __REQ_STARTED, /* drive already may have started this one */
@@ -230,9 +231,7 @@ enum rq_flag_bits {
230 __REQ_PM_SUSPEND, /* suspend request */ 231 __REQ_PM_SUSPEND, /* suspend request */
231 __REQ_PM_RESUME, /* resume request */ 232 __REQ_PM_RESUME, /* resume request */
232 __REQ_PM_SHUTDOWN, /* shutdown request */ 233 __REQ_PM_SHUTDOWN, /* shutdown request */
233 __REQ_BAR_PREFLUSH, /* barrier pre-flush done */ 234 __REQ_ORDERED_COLOR, /* is before or after barrier */
234 __REQ_BAR_POSTFLUSH, /* barrier post-flush */
235 __REQ_BAR_FLUSH, /* rq is the flush request */
236 __REQ_NR_BITS, /* stops here */ 235 __REQ_NR_BITS, /* stops here */
237}; 236};
238 237
@@ -241,6 +240,7 @@ enum rq_flag_bits {
241#define REQ_SORTED (1 << __REQ_SORTED) 240#define REQ_SORTED (1 << __REQ_SORTED)
242#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 241#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
243#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 242#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
243#define REQ_FUA (1 << __REQ_FUA)
244#define REQ_CMD (1 << __REQ_CMD) 244#define REQ_CMD (1 << __REQ_CMD)
245#define REQ_NOMERGE (1 << __REQ_NOMERGE) 245#define REQ_NOMERGE (1 << __REQ_NOMERGE)
246#define REQ_STARTED (1 << __REQ_STARTED) 246#define REQ_STARTED (1 << __REQ_STARTED)
@@ -260,9 +260,7 @@ enum rq_flag_bits {
260#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) 260#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
261#define REQ_PM_RESUME (1 << __REQ_PM_RESUME) 261#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
262#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) 262#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
263#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH) 263#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
264#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
265#define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH)
266 264
267/* 265/*
268 * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME 266 * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
@@ -292,8 +290,7 @@ struct bio_vec;
292typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); 290typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
293typedef void (activity_fn) (void *data, int rw); 291typedef void (activity_fn) (void *data, int rw);
294typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 292typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
295typedef int (prepare_flush_fn) (request_queue_t *, struct request *); 293typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
296typedef void (end_flush_fn) (request_queue_t *, struct request *);
297 294
298enum blk_queue_state { 295enum blk_queue_state {
299 Queue_down, 296 Queue_down,
@@ -335,7 +332,6 @@ struct request_queue
335 activity_fn *activity_fn; 332 activity_fn *activity_fn;
336 issue_flush_fn *issue_flush_fn; 333 issue_flush_fn *issue_flush_fn;
337 prepare_flush_fn *prepare_flush_fn; 334 prepare_flush_fn *prepare_flush_fn;
338 end_flush_fn *end_flush_fn;
339 335
340 /* 336 /*
341 * Dispatch queue sorting 337 * Dispatch queue sorting
@@ -420,14 +416,11 @@ struct request_queue
420 /* 416 /*
421 * reserved for flush operations 417 * reserved for flush operations
422 */ 418 */
423 struct request *flush_rq; 419 unsigned int ordered, next_ordered, ordseq;
424 unsigned char ordered; 420 int orderr, ordcolor;
425}; 421 struct request pre_flush_rq, bar_rq, post_flush_rq;
426 422 struct request *orig_bar_rq;
427enum { 423 unsigned int bi_size;
428 QUEUE_ORDERED_NONE,
429 QUEUE_ORDERED_TAG,
430 QUEUE_ORDERED_FLUSH,
431}; 424};
432 425
433#define RQ_INACTIVE (-1) 426#define RQ_INACTIVE (-1)
@@ -445,12 +438,51 @@ enum {
445#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 438#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
446#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 439#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
447#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 440#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
448#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ 441
442enum {
443 /*
444 * Hardbarrier is supported with one of the following methods.
445 *
446 * NONE : hardbarrier unsupported
447 * DRAIN : ordering by draining is enough
448 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
449 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
450 * TAG : ordering by tag is enough
451 * TAG_FLUSH : ordering by tag w/ pre and post flushes
452 * TAG_FUA : ordering by tag w/ pre flush and FUA write
453 */
454 QUEUE_ORDERED_NONE = 0x00,
455 QUEUE_ORDERED_DRAIN = 0x01,
456 QUEUE_ORDERED_TAG = 0x02,
457
458 QUEUE_ORDERED_PREFLUSH = 0x10,
459 QUEUE_ORDERED_POSTFLUSH = 0x20,
460 QUEUE_ORDERED_FUA = 0x40,
461
462 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
463 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
464 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
465 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
466 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
467 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
468 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
469 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
470
471 /*
472 * Ordered operation sequence
473 */
474 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
475 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
476 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
477 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
478 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
479 QUEUE_ORDSEQ_DONE = 0x20,
480};
449 481
450#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 482#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
451#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 483#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
452#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 484#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
453#define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags) 485#define blk_queue_flushing(q) ((q)->ordseq)
454 486
455#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) 487#define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
456#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) 488#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
@@ -466,8 +498,7 @@ enum {
466 498
467#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) 499#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
468#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) 500#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
469#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) 501#define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
470#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
471 502
472#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 503#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
473 504
@@ -560,7 +591,7 @@ extern void register_disk(struct gendisk *dev);
560extern void generic_make_request(struct bio *bio); 591extern void generic_make_request(struct bio *bio);
561extern void blk_put_request(struct request *); 592extern void blk_put_request(struct request *);
562extern void __blk_put_request(request_queue_t *, struct request *); 593extern void __blk_put_request(request_queue_t *, struct request *);
563extern void blk_end_sync_rq(struct request *rq); 594extern void blk_end_sync_rq(struct request *rq, int error);
564extern void blk_attempt_remerge(request_queue_t *, struct request *); 595extern void blk_attempt_remerge(request_queue_t *, struct request *);
565extern struct request *blk_get_request(request_queue_t *, int, gfp_t); 596extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
566extern void blk_insert_request(request_queue_t *, struct request *, int, void *); 597extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
@@ -582,8 +613,7 @@ extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_io
582extern int blk_execute_rq(request_queue_t *, struct gendisk *, 613extern int blk_execute_rq(request_queue_t *, struct gendisk *,
583 struct request *, int); 614 struct request *, int);
584extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, 615extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
585 struct request *, int, 616 struct request *, int, rq_end_io_fn *);
586 void (*done)(struct request *));
587 617
588static inline request_queue_t *bdev_get_queue(struct block_device *bdev) 618static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
589{ 619{
@@ -614,7 +644,7 @@ static inline void blk_run_address_space(struct address_space *mapping)
614 */ 644 */
615extern int end_that_request_first(struct request *, int, int); 645extern int end_that_request_first(struct request *, int, int);
616extern int end_that_request_chunk(struct request *, int, int); 646extern int end_that_request_chunk(struct request *, int, int);
617extern void end_that_request_last(struct request *); 647extern void end_that_request_last(struct request *, int);
618extern void end_request(struct request *req, int uptodate); 648extern void end_request(struct request *req, int uptodate);
619 649
620/* 650/*
@@ -665,11 +695,12 @@ extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
665extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); 695extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
666extern void blk_queue_dma_alignment(request_queue_t *, int); 696extern void blk_queue_dma_alignment(request_queue_t *, int);
667extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 697extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
668extern void blk_queue_ordered(request_queue_t *, int); 698extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
669extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 699extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
670extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); 700extern int blk_do_ordered(request_queue_t *, struct request **);
671extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); 701extern unsigned blk_ordered_cur_seq(request_queue_t *);
672extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); 702extern unsigned blk_ordered_req_seq(struct request *);
703extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
673 704
674extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); 705extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
675extern void blk_dump_rq_flags(struct request *, char *); 706extern void blk_dump_rq_flags(struct request *, char *);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a74c27e460ba..fb80fa44c4dd 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -130,6 +130,7 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
130#define ELEVATOR_INSERT_FRONT 1 130#define ELEVATOR_INSERT_FRONT 1
131#define ELEVATOR_INSERT_BACK 2 131#define ELEVATOR_INSERT_BACK 2
132#define ELEVATOR_INSERT_SORT 3 132#define ELEVATOR_INSERT_SORT 3
133#define ELEVATOR_INSERT_REQUEUE 4
133 134
134/* 135/*
135 * return values from elevator_may_queue_fn 136 * return values from elevator_may_queue_fn
diff --git a/include/linux/libata.h b/include/linux/libata.h
index cdab75c209a0..a43c95f8f968 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -488,7 +488,8 @@ extern u8 ata_bmdma_status(struct ata_port *ap);
488extern void ata_bmdma_irq_clear(struct ata_port *ap); 488extern void ata_bmdma_irq_clear(struct ata_port *ap);
489extern void ata_qc_complete(struct ata_queued_cmd *qc); 489extern void ata_qc_complete(struct ata_queued_cmd *qc);
490extern void ata_eng_timeout(struct ata_port *ap); 490extern void ata_eng_timeout(struct ata_port *ap);
491extern void ata_scsi_simulate(u16 *id, struct scsi_cmnd *cmd, 491extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
492 struct scsi_cmnd *cmd,
492 void (*done)(struct scsi_cmnd *)); 493 void (*done)(struct scsi_cmnd *));
493extern int ata_std_bios_param(struct scsi_device *sdev, 494extern int ata_std_bios_param(struct scsi_device *sdev,
494 struct block_device *bdev, 495 struct block_device *bdev,
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 850dfa877fda..02e26c1672bf 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -15,7 +15,6 @@ struct scsi_driver {
15 void (*rescan)(struct device *); 15 void (*rescan)(struct device *);
16 int (*issue_flush)(struct device *, sector_t *); 16 int (*issue_flush)(struct device *, sector_t *);
17 int (*prepare_flush)(struct request_queue *, struct request *); 17 int (*prepare_flush)(struct request_queue *, struct request *);
18 void (*end_flush)(struct request_queue *, struct request *);
19}; 18};
20#define to_scsi_driver(drv) \ 19#define to_scsi_driver(drv) \
21 container_of((drv), struct scsi_driver, gendrv) 20 container_of((drv), struct scsi_driver, gendrv)
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 6297885a35e7..230bc55c0bfa 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -398,7 +398,6 @@ struct scsi_host_template {
398 /* 398 /*
399 * ordered write support 399 * ordered write support
400 */ 400 */
401 unsigned ordered_flush:1;
402 unsigned ordered_tag:1; 401 unsigned ordered_tag:1;
403 402
404 /* 403 /*