aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 12:01:25 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 12:01:25 -0500
commitd99cf9d679a520d67f81d805b7cb91c68e1847f0 (patch)
tree415aefe6d168df27c006fcc53b1ea5242eabaaea /include
parent7ed40918a386afc2e14a6d3da563ea6d13686c25 (diff)
parente650c305ec3178818b317dad37a6d9c7fa8ba28d (diff)
Merge branch 'post-2.6.15' of git://brick.kernel.dk/data/git/linux-2.6-block
Manual fixup for merge with Jens' "Suspend support for libata", commit ID 9b847548663ef1039dd49f0eb4463d001e596bc3. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/ata.h6
-rw-r--r--include/linux/blkdev.h91
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_host.h1
6 files changed, 69 insertions, 34 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 3eb80c391b39..94f77cce27fa 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -129,6 +129,7 @@ enum {
129 ATA_CMD_READ_EXT = 0x25, 129 ATA_CMD_READ_EXT = 0x25,
130 ATA_CMD_WRITE = 0xCA, 130 ATA_CMD_WRITE = 0xCA,
131 ATA_CMD_WRITE_EXT = 0x35, 131 ATA_CMD_WRITE_EXT = 0x35,
132 ATA_CMD_WRITE_FUA_EXT = 0x3D,
132 ATA_CMD_PIO_READ = 0x20, 133 ATA_CMD_PIO_READ = 0x20,
133 ATA_CMD_PIO_READ_EXT = 0x24, 134 ATA_CMD_PIO_READ_EXT = 0x24,
134 ATA_CMD_PIO_WRITE = 0x30, 135 ATA_CMD_PIO_WRITE = 0x30,
@@ -137,6 +138,7 @@ enum {
137 ATA_CMD_READ_MULTI_EXT = 0x29, 138 ATA_CMD_READ_MULTI_EXT = 0x29,
138 ATA_CMD_WRITE_MULTI = 0xC5, 139 ATA_CMD_WRITE_MULTI = 0xC5,
139 ATA_CMD_WRITE_MULTI_EXT = 0x39, 140 ATA_CMD_WRITE_MULTI_EXT = 0x39,
141 ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE,
140 ATA_CMD_SET_FEATURES = 0xEF, 142 ATA_CMD_SET_FEATURES = 0xEF,
141 ATA_CMD_PACKET = 0xA0, 143 ATA_CMD_PACKET = 0xA0,
142 ATA_CMD_VERIFY = 0x40, 144 ATA_CMD_VERIFY = 0x40,
@@ -194,6 +196,7 @@ enum {
194 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ 196 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
195 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 197 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
196 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ 198 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
199 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
197}; 200};
198 201
199enum ata_tf_protocols { 202enum ata_tf_protocols {
@@ -247,7 +250,8 @@ struct ata_taskfile {
247#define ata_id_is_sata(id) ((id)[93] == 0) 250#define ata_id_is_sata(id) ((id)[93] == 0)
248#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6)) 251#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
249#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5)) 252#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
250#define ata_id_has_flush(id) ((id)[83] & (1 << 12)) 253#define ata_id_has_fua(id) ((id)[84] & (1 << 6))
254#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
251#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13)) 255#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
252#define ata_id_has_lba48(id) ((id)[83] & (1 << 10)) 256#define ata_id_has_lba48(id) ((id)[83] & (1 << 10))
253#define ata_id_has_wcache(id) ((id)[82] & (1 << 5)) 257#define ata_id_has_wcache(id) ((id)[82] & (1 << 5))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a18500d196e1..fb0985377421 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -102,7 +102,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
102void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); 102void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
103 103
104struct request; 104struct request;
105typedef void (rq_end_io_fn)(struct request *); 105typedef void (rq_end_io_fn)(struct request *, int);
106 106
107struct request_list { 107struct request_list {
108 int count[2]; 108 int count[2];
@@ -207,6 +207,7 @@ enum rq_flag_bits {
207 __REQ_SORTED, /* elevator knows about this request */ 207 __REQ_SORTED, /* elevator knows about this request */
208 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 208 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
209 __REQ_HARDBARRIER, /* may not be passed by drive either */ 209 __REQ_HARDBARRIER, /* may not be passed by drive either */
210 __REQ_FUA, /* forced unit access */
210 __REQ_CMD, /* is a regular fs rw request */ 211 __REQ_CMD, /* is a regular fs rw request */
211 __REQ_NOMERGE, /* don't touch this for merging */ 212 __REQ_NOMERGE, /* don't touch this for merging */
212 __REQ_STARTED, /* drive already may have started this one */ 213 __REQ_STARTED, /* drive already may have started this one */
@@ -230,9 +231,7 @@ enum rq_flag_bits {
230 __REQ_PM_SUSPEND, /* suspend request */ 231 __REQ_PM_SUSPEND, /* suspend request */
231 __REQ_PM_RESUME, /* resume request */ 232 __REQ_PM_RESUME, /* resume request */
232 __REQ_PM_SHUTDOWN, /* shutdown request */ 233 __REQ_PM_SHUTDOWN, /* shutdown request */
233 __REQ_BAR_PREFLUSH, /* barrier pre-flush done */ 234 __REQ_ORDERED_COLOR, /* is before or after barrier */
234 __REQ_BAR_POSTFLUSH, /* barrier post-flush */
235 __REQ_BAR_FLUSH, /* rq is the flush request */
236 __REQ_NR_BITS, /* stops here */ 235 __REQ_NR_BITS, /* stops here */
237}; 236};
238 237
@@ -241,6 +240,7 @@ enum rq_flag_bits {
241#define REQ_SORTED (1 << __REQ_SORTED) 240#define REQ_SORTED (1 << __REQ_SORTED)
242#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 241#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
243#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 242#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
243#define REQ_FUA (1 << __REQ_FUA)
244#define REQ_CMD (1 << __REQ_CMD) 244#define REQ_CMD (1 << __REQ_CMD)
245#define REQ_NOMERGE (1 << __REQ_NOMERGE) 245#define REQ_NOMERGE (1 << __REQ_NOMERGE)
246#define REQ_STARTED (1 << __REQ_STARTED) 246#define REQ_STARTED (1 << __REQ_STARTED)
@@ -260,9 +260,7 @@ enum rq_flag_bits {
260#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) 260#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
261#define REQ_PM_RESUME (1 << __REQ_PM_RESUME) 261#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
262#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) 262#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
263#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH) 263#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
264#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
265#define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH)
266 264
267/* 265/*
268 * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME 266 * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
@@ -292,8 +290,7 @@ struct bio_vec;
292typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); 290typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
293typedef void (activity_fn) (void *data, int rw); 291typedef void (activity_fn) (void *data, int rw);
294typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 292typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
295typedef int (prepare_flush_fn) (request_queue_t *, struct request *); 293typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
296typedef void (end_flush_fn) (request_queue_t *, struct request *);
297 294
298enum blk_queue_state { 295enum blk_queue_state {
299 Queue_down, 296 Queue_down,
@@ -335,7 +332,6 @@ struct request_queue
335 activity_fn *activity_fn; 332 activity_fn *activity_fn;
336 issue_flush_fn *issue_flush_fn; 333 issue_flush_fn *issue_flush_fn;
337 prepare_flush_fn *prepare_flush_fn; 334 prepare_flush_fn *prepare_flush_fn;
338 end_flush_fn *end_flush_fn;
339 335
340 /* 336 /*
341 * Dispatch queue sorting 337 * Dispatch queue sorting
@@ -420,14 +416,11 @@ struct request_queue
420 /* 416 /*
421 * reserved for flush operations 417 * reserved for flush operations
422 */ 418 */
423 struct request *flush_rq; 419 unsigned int ordered, next_ordered, ordseq;
424 unsigned char ordered; 420 int orderr, ordcolor;
425}; 421 struct request pre_flush_rq, bar_rq, post_flush_rq;
426 422 struct request *orig_bar_rq;
427enum { 423 unsigned int bi_size;
428 QUEUE_ORDERED_NONE,
429 QUEUE_ORDERED_TAG,
430 QUEUE_ORDERED_FLUSH,
431}; 424};
432 425
433#define RQ_INACTIVE (-1) 426#define RQ_INACTIVE (-1)
@@ -445,12 +438,51 @@ enum {
445#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 438#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
446#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 439#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
447#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 440#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
448#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ 441
442enum {
443 /*
444 * Hardbarrier is supported with one of the following methods.
445 *
446 * NONE : hardbarrier unsupported
447 * DRAIN : ordering by draining is enough
448 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
449 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
450 * TAG : ordering by tag is enough
451 * TAG_FLUSH : ordering by tag w/ pre and post flushes
452 * TAG_FUA : ordering by tag w/ pre flush and FUA write
453 */
454 QUEUE_ORDERED_NONE = 0x00,
455 QUEUE_ORDERED_DRAIN = 0x01,
456 QUEUE_ORDERED_TAG = 0x02,
457
458 QUEUE_ORDERED_PREFLUSH = 0x10,
459 QUEUE_ORDERED_POSTFLUSH = 0x20,
460 QUEUE_ORDERED_FUA = 0x40,
461
462 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
463 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
464 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
465 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
466 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
467 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
468 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
469 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
470
471 /*
472 * Ordered operation sequence
473 */
474 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
475 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
476 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
477 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
478 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
479 QUEUE_ORDSEQ_DONE = 0x20,
480};
449 481
450#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 482#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
451#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 483#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
452#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 484#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
453#define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags) 485#define blk_queue_flushing(q) ((q)->ordseq)
454 486
455#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) 487#define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
456#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) 488#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
@@ -466,8 +498,7 @@ enum {
466 498
467#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) 499#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
468#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) 500#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
469#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) 501#define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
470#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
471 502
472#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 503#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
473 504
@@ -560,7 +591,7 @@ extern void register_disk(struct gendisk *dev);
560extern void generic_make_request(struct bio *bio); 591extern void generic_make_request(struct bio *bio);
561extern void blk_put_request(struct request *); 592extern void blk_put_request(struct request *);
562extern void __blk_put_request(request_queue_t *, struct request *); 593extern void __blk_put_request(request_queue_t *, struct request *);
563extern void blk_end_sync_rq(struct request *rq); 594extern void blk_end_sync_rq(struct request *rq, int error);
564extern void blk_attempt_remerge(request_queue_t *, struct request *); 595extern void blk_attempt_remerge(request_queue_t *, struct request *);
565extern struct request *blk_get_request(request_queue_t *, int, gfp_t); 596extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
566extern void blk_insert_request(request_queue_t *, struct request *, int, void *); 597extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
@@ -582,8 +613,7 @@ extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_io
582extern int blk_execute_rq(request_queue_t *, struct gendisk *, 613extern int blk_execute_rq(request_queue_t *, struct gendisk *,
583 struct request *, int); 614 struct request *, int);
584extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, 615extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
585 struct request *, int, 616 struct request *, int, rq_end_io_fn *);
586 void (*done)(struct request *));
587 617
588static inline request_queue_t *bdev_get_queue(struct block_device *bdev) 618static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
589{ 619{
@@ -614,7 +644,7 @@ static inline void blk_run_address_space(struct address_space *mapping)
614 */ 644 */
615extern int end_that_request_first(struct request *, int, int); 645extern int end_that_request_first(struct request *, int, int);
616extern int end_that_request_chunk(struct request *, int, int); 646extern int end_that_request_chunk(struct request *, int, int);
617extern void end_that_request_last(struct request *); 647extern void end_that_request_last(struct request *, int);
618extern void end_request(struct request *req, int uptodate); 648extern void end_request(struct request *req, int uptodate);
619 649
620/* 650/*
@@ -665,11 +695,12 @@ extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
665extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); 695extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
666extern void blk_queue_dma_alignment(request_queue_t *, int); 696extern void blk_queue_dma_alignment(request_queue_t *, int);
667extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 697extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
668extern void blk_queue_ordered(request_queue_t *, int); 698extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
669extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 699extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
670extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); 700extern int blk_do_ordered(request_queue_t *, struct request **);
671extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); 701extern unsigned blk_ordered_cur_seq(request_queue_t *);
672extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); 702extern unsigned blk_ordered_req_seq(struct request *);
703extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
673 704
674extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); 705extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
675extern void blk_dump_rq_flags(struct request *, char *); 706extern void blk_dump_rq_flags(struct request *, char *);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a74c27e460ba..fb80fa44c4dd 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -130,6 +130,7 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
130#define ELEVATOR_INSERT_FRONT 1 130#define ELEVATOR_INSERT_FRONT 1
131#define ELEVATOR_INSERT_BACK 2 131#define ELEVATOR_INSERT_BACK 2
132#define ELEVATOR_INSERT_SORT 3 132#define ELEVATOR_INSERT_SORT 3
133#define ELEVATOR_INSERT_REQUEUE 4
133 134
134/* 135/*
135 * return values from elevator_may_queue_fn 136 * return values from elevator_may_queue_fn
diff --git a/include/linux/libata.h b/include/linux/libata.h
index cdab75c209a0..a43c95f8f968 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -488,7 +488,8 @@ extern u8 ata_bmdma_status(struct ata_port *ap);
488extern void ata_bmdma_irq_clear(struct ata_port *ap); 488extern void ata_bmdma_irq_clear(struct ata_port *ap);
489extern void ata_qc_complete(struct ata_queued_cmd *qc); 489extern void ata_qc_complete(struct ata_queued_cmd *qc);
490extern void ata_eng_timeout(struct ata_port *ap); 490extern void ata_eng_timeout(struct ata_port *ap);
491extern void ata_scsi_simulate(u16 *id, struct scsi_cmnd *cmd, 491extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
492 struct scsi_cmnd *cmd,
492 void (*done)(struct scsi_cmnd *)); 493 void (*done)(struct scsi_cmnd *));
493extern int ata_std_bios_param(struct scsi_device *sdev, 494extern int ata_std_bios_param(struct scsi_device *sdev,
494 struct block_device *bdev, 495 struct block_device *bdev,
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 850dfa877fda..02e26c1672bf 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -15,7 +15,6 @@ struct scsi_driver {
15 void (*rescan)(struct device *); 15 void (*rescan)(struct device *);
16 int (*issue_flush)(struct device *, sector_t *); 16 int (*issue_flush)(struct device *, sector_t *);
17 int (*prepare_flush)(struct request_queue *, struct request *); 17 int (*prepare_flush)(struct request_queue *, struct request *);
18 void (*end_flush)(struct request_queue *, struct request *);
19}; 18};
20#define to_scsi_driver(drv) \ 19#define to_scsi_driver(drv) \
21 container_of((drv), struct scsi_driver, gendrv) 20 container_of((drv), struct scsi_driver, gendrv)
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 6297885a35e7..230bc55c0bfa 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -398,7 +398,6 @@ struct scsi_host_template {
398 /* 398 /*
399 * ordered write support 399 * ordered write support
400 */ 400 */
401 unsigned ordered_flush:1;
402 unsigned ordered_tag:1; 401 unsigned ordered_tag:1;
403 402
404 /* 403 /*