diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/bio.h | 10 | ||||
-rw-r--r-- | include/linux/blkdev.h | 245 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 2 | ||||
-rw-r--r-- | include/linux/elevator.h | 4 | ||||
-rw-r--r-- | include/linux/fs.h | 2 | ||||
-rw-r--r-- | include/linux/genhd.h | 1 | ||||
-rw-r--r-- | include/linux/iocontext.h | 6 | ||||
-rw-r--r-- | include/linux/loop.h | 3 | ||||
-rw-r--r-- | include/linux/mg_disk.h | 206 | ||||
-rw-r--r-- | include/linux/pipe_fs_i.h | 1 | ||||
-rw-r--r-- | include/linux/splice.h | 3 | ||||
-rw-r--r-- | include/linux/virtio_blk.h | 12 |
12 files changed, 209 insertions, 286 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7b214fd672a2..12737be58601 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -218,12 +218,12 @@ struct bio { | |||
218 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | 218 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
219 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) | 219 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) |
220 | 220 | ||
221 | static inline unsigned int bio_cur_sectors(struct bio *bio) | 221 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
222 | { | 222 | { |
223 | if (bio->bi_vcnt) | 223 | if (bio->bi_vcnt) |
224 | return bio_iovec(bio)->bv_len >> 9; | 224 | return bio_iovec(bio)->bv_len; |
225 | else /* dataless requests such as discard */ | 225 | else /* dataless requests such as discard */ |
226 | return bio->bi_size >> 9; | 226 | return bio->bi_size; |
227 | } | 227 | } |
228 | 228 | ||
229 | static inline void *bio_data(struct bio *bio) | 229 | static inline void *bio_data(struct bio *bio) |
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio) | |||
279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ | 279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ |
280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) | 280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) |
281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | 281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) | 282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ | 283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ |
284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) | 284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) |
285 | 285 | ||
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio) | |||
506 | } | 506 | } |
507 | 507 | ||
508 | /* | 508 | /* |
509 | * BIO list managment for use by remapping drivers (e.g. DM or MD). | 509 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
510 | * | 510 | * |
511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next | 511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next |
512 | * member of the bio. The bio_list also caches the last list member to allow | 512 | * member of the bio. The bio_list also caches the last list member to allow |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b4f71f1a4af7..ebdfde8fe556 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -166,19 +166,9 @@ struct request { | |||
166 | enum rq_cmd_type_bits cmd_type; | 166 | enum rq_cmd_type_bits cmd_type; |
167 | unsigned long atomic_flags; | 167 | unsigned long atomic_flags; |
168 | 168 | ||
169 | /* Maintain bio traversal state for part by part I/O submission. | 169 | /* the following two fields are internal, NEVER access directly */ |
170 | * hard_* are block layer internals, no driver should touch them! | 170 | sector_t __sector; /* sector cursor */ |
171 | */ | 171 | unsigned int __data_len; /* total data len */ |
172 | |||
173 | sector_t sector; /* next sector to submit */ | ||
174 | sector_t hard_sector; /* next sector to complete */ | ||
175 | unsigned long nr_sectors; /* no. of sectors left to submit */ | ||
176 | unsigned long hard_nr_sectors; /* no. of sectors left to complete */ | ||
177 | /* no. of sectors left to submit in the current segment */ | ||
178 | unsigned int current_nr_sectors; | ||
179 | |||
180 | /* no. of sectors left to complete in the current segment */ | ||
181 | unsigned int hard_cur_sectors; | ||
182 | 172 | ||
183 | struct bio *bio; | 173 | struct bio *bio; |
184 | struct bio *biotail; | 174 | struct bio *biotail; |
@@ -211,8 +201,8 @@ struct request { | |||
211 | 201 | ||
212 | unsigned short ioprio; | 202 | unsigned short ioprio; |
213 | 203 | ||
214 | void *special; | 204 | void *special; /* opaque pointer available for LLD use */ |
215 | char *buffer; | 205 | char *buffer; /* kaddr of the current segment if available */ |
216 | 206 | ||
217 | int tag; | 207 | int tag; |
218 | int errors; | 208 | int errors; |
@@ -226,10 +216,9 @@ struct request { | |||
226 | unsigned char __cmd[BLK_MAX_CDB]; | 216 | unsigned char __cmd[BLK_MAX_CDB]; |
227 | unsigned char *cmd; | 217 | unsigned char *cmd; |
228 | 218 | ||
229 | unsigned int data_len; | ||
230 | unsigned int extra_len; /* length of alignment and padding */ | 219 | unsigned int extra_len; /* length of alignment and padding */ |
231 | unsigned int sense_len; | 220 | unsigned int sense_len; |
232 | void *data; | 221 | unsigned int resid_len; /* residual count */ |
233 | void *sense; | 222 | void *sense; |
234 | 223 | ||
235 | unsigned long deadline; | 224 | unsigned long deadline; |
@@ -318,6 +307,26 @@ struct blk_cmd_filter { | |||
318 | struct kobject kobj; | 307 | struct kobject kobj; |
319 | }; | 308 | }; |
320 | 309 | ||
310 | struct queue_limits { | ||
311 | unsigned long bounce_pfn; | ||
312 | unsigned long seg_boundary_mask; | ||
313 | |||
314 | unsigned int max_hw_sectors; | ||
315 | unsigned int max_sectors; | ||
316 | unsigned int max_segment_size; | ||
317 | unsigned int physical_block_size; | ||
318 | unsigned int alignment_offset; | ||
319 | unsigned int io_min; | ||
320 | unsigned int io_opt; | ||
321 | |||
322 | unsigned short logical_block_size; | ||
323 | unsigned short max_hw_segments; | ||
324 | unsigned short max_phys_segments; | ||
325 | |||
326 | unsigned char misaligned; | ||
327 | unsigned char no_cluster; | ||
328 | }; | ||
329 | |||
321 | struct request_queue | 330 | struct request_queue |
322 | { | 331 | { |
323 | /* | 332 | /* |
@@ -369,7 +378,6 @@ struct request_queue | |||
369 | /* | 378 | /* |
370 | * queue needs bounce pages for pages above this limit | 379 | * queue needs bounce pages for pages above this limit |
371 | */ | 380 | */ |
372 | unsigned long bounce_pfn; | ||
373 | gfp_t bounce_gfp; | 381 | gfp_t bounce_gfp; |
374 | 382 | ||
375 | /* | 383 | /* |
@@ -398,14 +406,6 @@ struct request_queue | |||
398 | unsigned int nr_congestion_off; | 406 | unsigned int nr_congestion_off; |
399 | unsigned int nr_batching; | 407 | unsigned int nr_batching; |
400 | 408 | ||
401 | unsigned int max_sectors; | ||
402 | unsigned int max_hw_sectors; | ||
403 | unsigned short max_phys_segments; | ||
404 | unsigned short max_hw_segments; | ||
405 | unsigned short hardsect_size; | ||
406 | unsigned int max_segment_size; | ||
407 | |||
408 | unsigned long seg_boundary_mask; | ||
409 | void *dma_drain_buffer; | 409 | void *dma_drain_buffer; |
410 | unsigned int dma_drain_size; | 410 | unsigned int dma_drain_size; |
411 | unsigned int dma_pad_mask; | 411 | unsigned int dma_pad_mask; |
@@ -415,12 +415,14 @@ struct request_queue | |||
415 | struct list_head tag_busy_list; | 415 | struct list_head tag_busy_list; |
416 | 416 | ||
417 | unsigned int nr_sorted; | 417 | unsigned int nr_sorted; |
418 | unsigned int in_flight; | 418 | unsigned int in_flight[2]; |
419 | 419 | ||
420 | unsigned int rq_timeout; | 420 | unsigned int rq_timeout; |
421 | struct timer_list timeout; | 421 | struct timer_list timeout; |
422 | struct list_head timeout_list; | 422 | struct list_head timeout_list; |
423 | 423 | ||
424 | struct queue_limits limits; | ||
425 | |||
424 | /* | 426 | /* |
425 | * sg stuff | 427 | * sg stuff |
426 | */ | 428 | */ |
@@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, | |||
522 | __clear_bit(flag, &q->queue_flags); | 524 | __clear_bit(flag, &q->queue_flags); |
523 | } | 525 | } |
524 | 526 | ||
527 | static inline int queue_in_flight(struct request_queue *q) | ||
528 | { | ||
529 | return q->in_flight[0] + q->in_flight[1]; | ||
530 | } | ||
531 | |||
525 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 532 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
526 | { | 533 | { |
527 | WARN_ON_ONCE(!queue_is_locked(q)); | 534 | WARN_ON_ONCE(!queue_is_locked(q)); |
@@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); | |||
752 | extern void blk_put_request(struct request *); | 759 | extern void blk_put_request(struct request *); |
753 | extern void __blk_put_request(struct request_queue *, struct request *); | 760 | extern void __blk_put_request(struct request_queue *, struct request *); |
754 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 761 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
762 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | ||
763 | gfp_t); | ||
755 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 764 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
756 | extern void blk_requeue_request(struct request_queue *, struct request *); | 765 | extern void blk_requeue_request(struct request_queue *, struct request *); |
757 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 766 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
758 | extern int blk_lld_busy(struct request_queue *q); | 767 | extern int blk_lld_busy(struct request_queue *q); |
768 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | ||
769 | struct bio_set *bs, gfp_t gfp_mask, | ||
770 | int (*bio_ctr)(struct bio *, struct bio *, void *), | ||
771 | void *data); | ||
772 | extern void blk_rq_unprep_clone(struct request *rq); | ||
759 | extern int blk_insert_cloned_request(struct request_queue *q, | 773 | extern int blk_insert_cloned_request(struct request_queue *q, |
760 | struct request *rq); | 774 | struct request *rq); |
761 | extern void blk_plug_device(struct request_queue *); | 775 | extern void blk_plug_device(struct request_queue *); |
@@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
768 | struct scsi_ioctl_command __user *); | 782 | struct scsi_ioctl_command __user *); |
769 | 783 | ||
770 | /* | 784 | /* |
771 | * Temporary export, until SCSI gets fixed up. | ||
772 | */ | ||
773 | extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
774 | struct bio *bio); | ||
775 | |||
776 | /* | ||
777 | * A queue has just exitted congestion. Note this in the global counter of | 785 | * A queue has just exitted congestion. Note this in the global counter of |
778 | * congested queues, and wake up anyone who was waiting for requests to be | 786 | * congested queues, and wake up anyone who was waiting for requests to be |
779 | * put back. | 787 | * put back. |
@@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q); | |||
798 | extern void __blk_stop_queue(struct request_queue *q); | 806 | extern void __blk_stop_queue(struct request_queue *q); |
799 | extern void __blk_run_queue(struct request_queue *); | 807 | extern void __blk_run_queue(struct request_queue *); |
800 | extern void blk_run_queue(struct request_queue *); | 808 | extern void blk_run_queue(struct request_queue *); |
801 | extern void blk_start_queueing(struct request_queue *); | ||
802 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 809 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
803 | struct rq_map_data *, void __user *, unsigned long, | 810 | struct rq_map_data *, void __user *, unsigned long, |
804 | gfp_t); | 811 | gfp_t); |
@@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
831 | blk_run_backing_dev(mapping->backing_dev_info, NULL); | 838 | blk_run_backing_dev(mapping->backing_dev_info, NULL); |
832 | } | 839 | } |
833 | 840 | ||
834 | extern void blkdev_dequeue_request(struct request *req); | 841 | /* |
842 | * blk_rq_pos() : the current sector | ||
843 | * blk_rq_bytes() : bytes left in the entire request | ||
844 | * blk_rq_cur_bytes() : bytes left in the current segment | ||
845 | * blk_rq_sectors() : sectors left in the entire request | ||
846 | * blk_rq_cur_sectors() : sectors left in the current segment | ||
847 | */ | ||
848 | static inline sector_t blk_rq_pos(const struct request *rq) | ||
849 | { | ||
850 | return rq->__sector; | ||
851 | } | ||
852 | |||
853 | static inline unsigned int blk_rq_bytes(const struct request *rq) | ||
854 | { | ||
855 | return rq->__data_len; | ||
856 | } | ||
857 | |||
858 | static inline int blk_rq_cur_bytes(const struct request *rq) | ||
859 | { | ||
860 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | ||
861 | } | ||
862 | |||
863 | static inline unsigned int blk_rq_sectors(const struct request *rq) | ||
864 | { | ||
865 | return blk_rq_bytes(rq) >> 9; | ||
866 | } | ||
867 | |||
868 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | ||
869 | { | ||
870 | return blk_rq_cur_bytes(rq) >> 9; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * Request issue related functions. | ||
875 | */ | ||
876 | extern struct request *blk_peek_request(struct request_queue *q); | ||
877 | extern void blk_start_request(struct request *rq); | ||
878 | extern struct request *blk_fetch_request(struct request_queue *q); | ||
835 | 879 | ||
836 | /* | 880 | /* |
837 | * blk_end_request() and friends. | 881 | * Request completion related functions. |
838 | * __blk_end_request() and end_request() must be called with | 882 | * |
839 | * the request queue spinlock acquired. | 883 | * blk_update_request() completes given number of bytes and updates |
884 | * the request without completing it. | ||
885 | * | ||
886 | * blk_end_request() and friends. __blk_end_request() must be called | ||
887 | * with the request queue spinlock acquired. | ||
840 | * | 888 | * |
841 | * Several drivers define their own end_request and call | 889 | * Several drivers define their own end_request and call |
842 | * blk_end_request() for parts of the original function. | 890 | * blk_end_request() for parts of the original function. |
843 | * This prevents code duplication in drivers. | 891 | * This prevents code duplication in drivers. |
844 | */ | 892 | */ |
845 | extern int blk_end_request(struct request *rq, int error, | 893 | extern bool blk_update_request(struct request *rq, int error, |
846 | unsigned int nr_bytes); | 894 | unsigned int nr_bytes); |
847 | extern int __blk_end_request(struct request *rq, int error, | 895 | extern bool blk_end_request(struct request *rq, int error, |
848 | unsigned int nr_bytes); | 896 | unsigned int nr_bytes); |
849 | extern int blk_end_bidi_request(struct request *rq, int error, | 897 | extern void blk_end_request_all(struct request *rq, int error); |
850 | unsigned int nr_bytes, unsigned int bidi_bytes); | 898 | extern bool blk_end_request_cur(struct request *rq, int error); |
851 | extern void end_request(struct request *, int); | 899 | extern bool __blk_end_request(struct request *rq, int error, |
852 | extern int blk_end_request_callback(struct request *rq, int error, | 900 | unsigned int nr_bytes); |
853 | unsigned int nr_bytes, | 901 | extern void __blk_end_request_all(struct request *rq, int error); |
854 | int (drv_callback)(struct request *)); | 902 | extern bool __blk_end_request_cur(struct request *rq, int error); |
903 | |||
855 | extern void blk_complete_request(struct request *); | 904 | extern void blk_complete_request(struct request *); |
856 | extern void __blk_complete_request(struct request *); | 905 | extern void __blk_complete_request(struct request *); |
857 | extern void blk_abort_request(struct request *); | 906 | extern void blk_abort_request(struct request *); |
858 | extern void blk_abort_queue(struct request_queue *); | 907 | extern void blk_abort_queue(struct request_queue *); |
859 | extern void blk_update_request(struct request *rq, int error, | ||
860 | unsigned int nr_bytes); | ||
861 | |||
862 | /* | ||
863 | * blk_end_request() takes bytes instead of sectors as a complete size. | ||
864 | * blk_rq_bytes() returns bytes left to complete in the entire request. | ||
865 | * blk_rq_cur_bytes() returns bytes left to complete in the current segment. | ||
866 | */ | ||
867 | extern unsigned int blk_rq_bytes(struct request *rq); | ||
868 | extern unsigned int blk_rq_cur_bytes(struct request *rq); | ||
869 | 908 | ||
870 | /* | 909 | /* |
871 | * Access functions for manipulating queue properties | 910 | * Access functions for manipulating queue properties |
@@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
877 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 916 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
878 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 917 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
879 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 918 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
919 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | ||
880 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 920 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
881 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 921 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
882 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 922 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
883 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 923 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
924 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | ||
925 | extern void blk_queue_alignment_offset(struct request_queue *q, | ||
926 | unsigned int alignment); | ||
927 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | ||
928 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | ||
929 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
930 | sector_t offset); | ||
931 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
932 | sector_t offset); | ||
884 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 933 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
885 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 934 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
886 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 935 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
@@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | |||
967 | 1016 | ||
968 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1017 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
969 | 1018 | ||
970 | static inline int queue_hardsect_size(struct request_queue *q) | 1019 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) |
1020 | { | ||
1021 | return q->limits.bounce_pfn; | ||
1022 | } | ||
1023 | |||
1024 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | ||
1025 | { | ||
1026 | return q->limits.seg_boundary_mask; | ||
1027 | } | ||
1028 | |||
1029 | static inline unsigned int queue_max_sectors(struct request_queue *q) | ||
1030 | { | ||
1031 | return q->limits.max_sectors; | ||
1032 | } | ||
1033 | |||
1034 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | ||
1035 | { | ||
1036 | return q->limits.max_hw_sectors; | ||
1037 | } | ||
1038 | |||
1039 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | ||
1040 | { | ||
1041 | return q->limits.max_hw_segments; | ||
1042 | } | ||
1043 | |||
1044 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
1045 | { | ||
1046 | return q->limits.max_phys_segments; | ||
1047 | } | ||
1048 | |||
1049 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | ||
1050 | { | ||
1051 | return q->limits.max_segment_size; | ||
1052 | } | ||
1053 | |||
1054 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | ||
971 | { | 1055 | { |
972 | int retval = 512; | 1056 | int retval = 512; |
973 | 1057 | ||
974 | if (q && q->hardsect_size) | 1058 | if (q && q->limits.logical_block_size) |
975 | retval = q->hardsect_size; | 1059 | retval = q->limits.logical_block_size; |
976 | 1060 | ||
977 | return retval; | 1061 | return retval; |
978 | } | 1062 | } |
979 | 1063 | ||
980 | static inline int bdev_hardsect_size(struct block_device *bdev) | 1064 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
1065 | { | ||
1066 | return queue_logical_block_size(bdev_get_queue(bdev)); | ||
1067 | } | ||
1068 | |||
1069 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | ||
1070 | { | ||
1071 | return q->limits.physical_block_size; | ||
1072 | } | ||
1073 | |||
1074 | static inline unsigned int queue_io_min(struct request_queue *q) | ||
1075 | { | ||
1076 | return q->limits.io_min; | ||
1077 | } | ||
1078 | |||
1079 | static inline unsigned int queue_io_opt(struct request_queue *q) | ||
1080 | { | ||
1081 | return q->limits.io_opt; | ||
1082 | } | ||
1083 | |||
1084 | static inline int queue_alignment_offset(struct request_queue *q) | ||
1085 | { | ||
1086 | if (q && q->limits.misaligned) | ||
1087 | return -1; | ||
1088 | |||
1089 | if (q && q->limits.alignment_offset) | ||
1090 | return q->limits.alignment_offset; | ||
1091 | |||
1092 | return 0; | ||
1093 | } | ||
1094 | |||
1095 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
1096 | sector_t sector) | ||
981 | { | 1097 | { |
982 | return queue_hardsect_size(bdev_get_queue(bdev)); | 1098 | return ((sector << 9) - q->limits.alignment_offset) |
1099 | & (q->limits.io_min - 1); | ||
983 | } | 1100 | } |
984 | 1101 | ||
985 | static inline int queue_dma_alignment(struct request_queue *q) | 1102 | static inline int queue_dma_alignment(struct request_queue *q) |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ded2d7c42668..49c2362977fd 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -149,7 +149,7 @@ struct io_restrictions { | |||
149 | unsigned max_hw_sectors; | 149 | unsigned max_hw_sectors; |
150 | unsigned max_sectors; | 150 | unsigned max_sectors; |
151 | unsigned max_segment_size; | 151 | unsigned max_segment_size; |
152 | unsigned short hardsect_size; | 152 | unsigned short logical_block_size; |
153 | unsigned short max_hw_segments; | 153 | unsigned short max_hw_segments; |
154 | unsigned short max_phys_segments; | 154 | unsigned short max_phys_segments; |
155 | unsigned char no_cluster; /* inverted so that 0 is default */ | 155 | unsigned char no_cluster; /* inverted so that 0 is default */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index c59b769f62b0..1cb3372e65d8 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *); | |||
103 | extern void elv_merge_requests(struct request_queue *, struct request *, | 103 | extern void elv_merge_requests(struct request_queue *, struct request *, |
104 | struct request *); | 104 | struct request *); |
105 | extern void elv_merged_request(struct request_queue *, struct request *, int); | 105 | extern void elv_merged_request(struct request_queue *, struct request *, int); |
106 | extern void elv_dequeue_request(struct request_queue *, struct request *); | ||
107 | extern void elv_requeue_request(struct request_queue *, struct request *); | 106 | extern void elv_requeue_request(struct request_queue *, struct request *); |
108 | extern int elv_queue_empty(struct request_queue *); | 107 | extern int elv_queue_empty(struct request_queue *); |
109 | extern struct request *elv_next_request(struct request_queue *q); | ||
110 | extern struct request *elv_former_request(struct request_queue *, struct request *); | 108 | extern struct request *elv_former_request(struct request_queue *, struct request *); |
111 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 109 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
112 | extern int elv_register_queue(struct request_queue *q); | 110 | extern int elv_register_queue(struct request_queue *q); |
@@ -171,7 +169,7 @@ enum { | |||
171 | ELV_MQUEUE_MUST, | 169 | ELV_MQUEUE_MUST, |
172 | }; | 170 | }; |
173 | 171 | ||
174 | #define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) | 172 | #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
175 | #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) | 173 | #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) |
176 | 174 | ||
177 | /* | 175 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 3b534e527e09..83d6b4397245 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -2205,6 +2205,8 @@ extern int generic_segment_checks(const struct iovec *iov, | |||
2205 | /* fs/splice.c */ | 2205 | /* fs/splice.c */ |
2206 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, | 2206 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, |
2207 | struct pipe_inode_info *, size_t, unsigned int); | 2207 | struct pipe_inode_info *, size_t, unsigned int); |
2208 | extern ssize_t default_file_splice_read(struct file *, loff_t *, | ||
2209 | struct pipe_inode_info *, size_t, unsigned int); | ||
2208 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | 2210 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, |
2209 | struct file *, loff_t *, size_t, unsigned int); | 2211 | struct file *, loff_t *, size_t, unsigned int); |
2210 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 2212 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a1a28caed23d..149fda264c86 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -90,6 +90,7 @@ struct disk_stats { | |||
90 | struct hd_struct { | 90 | struct hd_struct { |
91 | sector_t start_sect; | 91 | sector_t start_sect; |
92 | sector_t nr_sects; | 92 | sector_t nr_sects; |
93 | sector_t alignment_offset; | ||
93 | struct device __dev; | 94 | struct device __dev; |
94 | struct kobject *holder_dir; | 95 | struct kobject *holder_dir; |
95 | int policy, partno; | 96 | int policy, partno; |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 08b987bccf89..dd05434fa45f 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -64,7 +64,7 @@ struct cfq_io_context { | |||
64 | * and kmalloc'ed. These could be shared between processes. | 64 | * and kmalloc'ed. These could be shared between processes. |
65 | */ | 65 | */ |
66 | struct io_context { | 66 | struct io_context { |
67 | atomic_t refcount; | 67 | atomic_long_t refcount; |
68 | atomic_t nr_tasks; | 68 | atomic_t nr_tasks; |
69 | 69 | ||
70 | /* all the fields below are protected by this lock */ | 70 | /* all the fields below are protected by this lock */ |
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) | |||
91 | * if ref count is zero, don't allow sharing (ioc is going away, it's | 91 | * if ref count is zero, don't allow sharing (ioc is going away, it's |
92 | * a race). | 92 | * a race). |
93 | */ | 93 | */ |
94 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) { | 94 | if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { |
95 | atomic_inc(&ioc->nr_tasks); | 95 | atomic_long_inc(&ioc->refcount); |
96 | return ioc; | 96 | return ioc; |
97 | } | 97 | } |
98 | 98 | ||
diff --git a/include/linux/loop.h b/include/linux/loop.h index 40725447f5e0..66c194e2d9b9 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h | |||
@@ -56,8 +56,7 @@ struct loop_device { | |||
56 | gfp_t old_gfp_mask; | 56 | gfp_t old_gfp_mask; |
57 | 57 | ||
58 | spinlock_t lo_lock; | 58 | spinlock_t lo_lock; |
59 | struct bio *lo_bio; | 59 | struct bio_list lo_bio_list; |
60 | struct bio *lo_biotail; | ||
61 | int lo_state; | 60 | int lo_state; |
62 | struct mutex lo_ctl_mutex; | 61 | struct mutex lo_ctl_mutex; |
63 | struct task_struct *lo_thread; | 62 | struct task_struct *lo_thread; |
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h deleted file mode 100644 index 1f76b1ebf627..000000000000 --- a/include/linux/mg_disk.h +++ /dev/null | |||
@@ -1,206 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/mg_disk.c | ||
3 | * | ||
4 | * Support for the mGine m[g]flash IO mode. | ||
5 | * Based on legacy hd.c | ||
6 | * | ||
7 | * (c) 2008 mGine Co.,LTD | ||
8 | * (c) 2008 unsik Kim <donari75@gmail.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __MG_DISK_H__ | ||
16 | #define __MG_DISK_H__ | ||
17 | |||
18 | #include <linux/blkdev.h> | ||
19 | #include <linux/ata.h> | ||
20 | |||
21 | /* name for block device */ | ||
22 | #define MG_DISK_NAME "mgd" | ||
23 | /* name for platform device */ | ||
24 | #define MG_DEV_NAME "mg_disk" | ||
25 | |||
26 | #define MG_DISK_MAJ 0 | ||
27 | #define MG_DISK_MAX_PART 16 | ||
28 | #define MG_SECTOR_SIZE 512 | ||
29 | #define MG_MAX_SECTS 256 | ||
30 | |||
31 | /* Register offsets */ | ||
32 | #define MG_BUFF_OFFSET 0x8000 | ||
33 | #define MG_STORAGE_BUFFER_SIZE 0x200 | ||
34 | #define MG_REG_OFFSET 0xC000 | ||
35 | #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ | ||
36 | #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ | ||
37 | #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) | ||
38 | #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) | ||
39 | #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) | ||
40 | #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) | ||
41 | #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) | ||
42 | #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ | ||
43 | #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ | ||
44 | #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) | ||
45 | #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) | ||
46 | |||
47 | /* "Drive Select/Head Register" bit values */ | ||
48 | #define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */ | ||
49 | #define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON) | ||
50 | #define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON) | ||
51 | #define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON) | ||
52 | |||
53 | |||
54 | /* "Device Control Register" bit values */ | ||
55 | #define MG_REG_CTRL_INTR_ENABLE 0x0 | ||
56 | #define MG_REG_CTRL_INTR_DISABLE (0x1<<1) | ||
57 | #define MG_REG_CTRL_RESET (0x1<<2) | ||
58 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0 | ||
59 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4) | ||
60 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0 | ||
61 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5) | ||
62 | #define MG_REG_CTRL_DPD_DISABLE 0x0 | ||
63 | #define MG_REG_CTRL_DPD_ENABLE (0x1<<6) | ||
64 | |||
65 | /* Status register bit */ | ||
66 | /* error bit in status register */ | ||
67 | #define MG_REG_STATUS_BIT_ERROR 0x01 | ||
68 | /* corrected error in status register */ | ||
69 | #define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04 | ||
70 | /* data request bit in status register */ | ||
71 | #define MG_REG_STATUS_BIT_DATA_REQ 0x08 | ||
72 | /* DSC - Drive Seek Complete */ | ||
73 | #define MG_REG_STATUS_BIT_SEEK_DONE 0x10 | ||
74 | /* DWF - Drive Write Fault */ | ||
75 | #define MG_REG_STATUS_BIT_WRITE_FAULT 0x20 | ||
76 | #define MG_REG_STATUS_BIT_READY 0x40 | ||
77 | #define MG_REG_STATUS_BIT_BUSY 0x80 | ||
78 | |||
79 | /* handy status */ | ||
80 | #define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE) | ||
81 | #define MG_READY_OK(s) (((s) & (MG_STAT_READY | \ | ||
82 | (MG_REG_STATUS_BIT_BUSY | \ | ||
83 | MG_REG_STATUS_BIT_WRITE_FAULT | \ | ||
84 | MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY) | ||
85 | |||
86 | /* Error register */ | ||
87 | #define MG_REG_ERR_AMNF 0x01 | ||
88 | #define MG_REG_ERR_ABRT 0x04 | ||
89 | #define MG_REG_ERR_IDNF 0x10 | ||
90 | #define MG_REG_ERR_UNC 0x40 | ||
91 | #define MG_REG_ERR_BBK 0x80 | ||
92 | |||
93 | /* error code for others */ | ||
94 | #define MG_ERR_NONE 0 | ||
95 | #define MG_ERR_TIMEOUT 0x100 | ||
96 | #define MG_ERR_INIT_STAT 0x101 | ||
97 | #define MG_ERR_TRANSLATION 0x102 | ||
98 | #define MG_ERR_CTRL_RST 0x103 | ||
99 | #define MG_ERR_INV_STAT 0x104 | ||
100 | #define MG_ERR_RSTOUT 0x105 | ||
101 | |||
102 | #define MG_MAX_ERRORS 6 /* Max read/write errors */ | ||
103 | |||
104 | /* command */ | ||
105 | #define MG_CMD_RD 0x20 | ||
106 | #define MG_CMD_WR 0x30 | ||
107 | #define MG_CMD_SLEEP 0x99 | ||
108 | #define MG_CMD_WAKEUP 0xC3 | ||
109 | #define MG_CMD_ID 0xEC | ||
110 | #define MG_CMD_WR_CONF 0x3C | ||
111 | #define MG_CMD_RD_CONF 0x40 | ||
112 | |||
113 | /* operation mode */ | ||
114 | #define MG_OP_CASCADE (1 << 0) | ||
115 | #define MG_OP_CASCADE_SYNC_RD (1 << 1) | ||
116 | #define MG_OP_CASCADE_SYNC_WR (1 << 2) | ||
117 | #define MG_OP_INTERLEAVE (1 << 3) | ||
118 | |||
119 | /* synchronous */ | ||
120 | #define MG_BURST_LAT_4 (3 << 4) | ||
121 | #define MG_BURST_LAT_5 (4 << 4) | ||
122 | #define MG_BURST_LAT_6 (5 << 4) | ||
123 | #define MG_BURST_LAT_7 (6 << 4) | ||
124 | #define MG_BURST_LAT_8 (7 << 4) | ||
125 | #define MG_BURST_LEN_4 (1 << 1) | ||
126 | #define MG_BURST_LEN_8 (2 << 1) | ||
127 | #define MG_BURST_LEN_16 (3 << 1) | ||
128 | #define MG_BURST_LEN_32 (4 << 1) | ||
129 | #define MG_BURST_LEN_CONT (0 << 1) | ||
130 | |||
131 | /* timeout value (unit: ms) */ | ||
132 | #define MG_TMAX_CONF_TO_CMD 1 | ||
133 | #define MG_TMAX_WAIT_RD_DRQ 10 | ||
134 | #define MG_TMAX_WAIT_WR_DRQ 500 | ||
135 | #define MG_TMAX_RST_TO_BUSY 10 | ||
136 | #define MG_TMAX_HDRST_TO_RDY 500 | ||
137 | #define MG_TMAX_SWRST_TO_RDY 500 | ||
138 | #define MG_TMAX_RSTOUT 3000 | ||
139 | |||
140 | /* device attribution */ | ||
141 | /* use mflash as boot device */ | ||
142 | #define MG_BOOT_DEV (1 << 0) | ||
143 | /* use mflash as storage device */ | ||
144 | #define MG_STORAGE_DEV (1 << 1) | ||
145 | /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ | ||
146 | #define MG_STORAGE_DEV_SKIP_RST (1 << 2) | ||
147 | |||
148 | #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) | ||
149 | |||
150 | /* names of GPIO resource */ | ||
151 | #define MG_RST_PIN "mg_rst" | ||
152 | /* except MG_BOOT_DEV, reset-out pin should be assigned */ | ||
153 | #define MG_RSTOUT_PIN "mg_rstout" | ||
154 | |||
155 | /* private driver data */ | ||
156 | struct mg_drv_data { | ||
157 | /* disk resource */ | ||
158 | u32 use_polling; | ||
159 | |||
160 | /* device attribution */ | ||
161 | u32 dev_attr; | ||
162 | |||
163 | /* internally used */ | ||
164 | struct mg_host *host; | ||
165 | }; | ||
166 | |||
167 | /* main structure for mflash driver */ | ||
168 | struct mg_host { | ||
169 | struct device *dev; | ||
170 | |||
171 | struct request_queue *breq; | ||
172 | spinlock_t lock; | ||
173 | struct gendisk *gd; | ||
174 | |||
175 | struct timer_list timer; | ||
176 | void (*mg_do_intr) (struct mg_host *); | ||
177 | |||
178 | u16 id[ATA_ID_WORDS]; | ||
179 | |||
180 | u16 cyls; | ||
181 | u16 heads; | ||
182 | u16 sectors; | ||
183 | u32 n_sectors; | ||
184 | u32 nres_sectors; | ||
185 | |||
186 | void __iomem *dev_base; | ||
187 | unsigned int irq; | ||
188 | unsigned int rst; | ||
189 | unsigned int rstout; | ||
190 | |||
191 | u32 major; | ||
192 | u32 error; | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * Debugging macro and defines | ||
197 | */ | ||
198 | #undef DO_MG_DEBUG | ||
199 | #ifdef DO_MG_DEBUG | ||
200 | # define MG_DBG(fmt, args...) \ | ||
201 | printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) | ||
202 | #else /* CONFIG_MG_DEBUG */ | ||
203 | # define MG_DBG(fmt, args...) do { } while (0) | ||
204 | #endif /* CONFIG_MG_DEBUG */ | ||
205 | |||
206 | #endif | ||
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index c8f038554e80..b43a9e039059 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void | |||
152 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); | 152 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); |
153 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); | 153 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); |
154 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); | 154 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); |
155 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); | ||
155 | 156 | ||
156 | #endif | 157 | #endif |
diff --git a/include/linux/splice.h b/include/linux/splice.h index 5f3faa9d15ae..18e7c7c0cae6 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h | |||
@@ -11,8 +11,7 @@ | |||
11 | #include <linux/pipe_fs_i.h> | 11 | #include <linux/pipe_fs_i.h> |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * splice is tied to pipes as a transport (at least for now), so we'll just | 14 | * Flags passed in from splice/tee/vmsplice |
15 | * add the splice flags here. | ||
16 | */ | 15 | */ |
17 | #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ | 16 | #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ |
18 | #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ | 17 | #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ |
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 94c56d29869d..be7d255fc7cf 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h | |||
@@ -15,6 +15,10 @@ | |||
15 | #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ | 15 | #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ |
16 | #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ | 16 | #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ |
17 | #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ | 17 | #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ |
18 | #define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ | ||
19 | #define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */ | ||
20 | |||
21 | #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ | ||
18 | 22 | ||
19 | struct virtio_blk_config | 23 | struct virtio_blk_config |
20 | { | 24 | { |
@@ -32,6 +36,7 @@ struct virtio_blk_config | |||
32 | } geometry; | 36 | } geometry; |
33 | /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ | 37 | /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ |
34 | __u32 blk_size; | 38 | __u32 blk_size; |
39 | __u8 identify[VIRTIO_BLK_ID_BYTES]; | ||
35 | } __attribute__((packed)); | 40 | } __attribute__((packed)); |
36 | 41 | ||
37 | /* These two define direction. */ | 42 | /* These two define direction. */ |
@@ -55,6 +60,13 @@ struct virtio_blk_outhdr | |||
55 | __u64 sector; | 60 | __u64 sector; |
56 | }; | 61 | }; |
57 | 62 | ||
63 | struct virtio_scsi_inhdr { | ||
64 | __u32 errors; | ||
65 | __u32 data_len; | ||
66 | __u32 sense_len; | ||
67 | __u32 residual; | ||
68 | }; | ||
69 | |||
58 | /* And this is the final byte of the write scatter-gather list. */ | 70 | /* And this is the final byte of the write scatter-gather list. */ |
59 | #define VIRTIO_BLK_S_OK 0 | 71 | #define VIRTIO_BLK_S_OK 0 |
60 | #define VIRTIO_BLK_S_IOERR 1 | 72 | #define VIRTIO_BLK_S_IOERR 1 |