aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 13:52:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 14:10:35 -0400
commitc9059598ea8981d02356eead3188bf7fa4d717b8 (patch)
tree03e73b20a30e988da7c6a3e0ad93b2dc5843274d /include
parent0a33f80a8373eca7f4bea3961d1346c3815fa5ed (diff)
parentb0fd271d5fba0b2d00888363f3869e3f9b26caa9 (diff)
Merge branch 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block: (153 commits) block: add request clone interface (v2) floppy: fix hibernation ramdisk: remove long-deprecated "ramdisk=" boot-time parameter fs/bio.c: add missing __user annotation block: prevent possible io_context->refcount overflow Add serial number support for virtio_blk, V4a block: Add missing bounce_pfn stacking and fix comments Revert "block: Fix bounce limit setting in DM" cciss: decode unit attention in SCSI error handling code cciss: Remove no longer needed sendcmd reject processing code cciss: change SCSI error handling routines to work with interrupts enabled. cciss: separate error processing and command retrying code in sendcmd_withirq_core() cciss: factor out fix target status processing code from sendcmd functions cciss: simplify interface of sendcmd() and sendcmd_withirq() cciss: factor out core of sendcmd_withirq() for use by SCSI error handling code cciss: Use schedule_timeout_uninterruptible in SCSI error handling code block: needs to set the residual length of a bidi request Revert "block: implement blkdev_readpages" block: Fix bounce limit setting in DM Removed reference to non-existing file Documentation/PCI/PCI-DMA-mapping.txt ... Manually fix conflicts with tracing updates in: block/blk-sysfs.c drivers/ide/ide-atapi.c drivers/ide/ide-cd.c drivers/ide/ide-floppy.c drivers/ide/ide-tape.c include/trace/events/block.h kernel/trace/blktrace.c
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/blkdev.h245
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/elevator.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/iocontext.h6
-rw-r--r--include/linux/loop.h3
-rw-r--r--include/linux/mg_disk.h206
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/splice.h3
-rw-r--r--include/linux/virtio_blk.h12
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/trace/events/block.h29
14 files changed, 222 insertions, 304 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7b214fd672a2..12737be58601 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,12 +218,12 @@ struct bio {
218#define bio_sectors(bio) ((bio)->bi_size >> 9) 218#define bio_sectors(bio) ((bio)->bi_size >> 9)
219#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) 219#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
220 220
221static inline unsigned int bio_cur_sectors(struct bio *bio) 221static inline unsigned int bio_cur_bytes(struct bio *bio)
222{ 222{
223 if (bio->bi_vcnt) 223 if (bio->bi_vcnt)
224 return bio_iovec(bio)->bv_len >> 9; 224 return bio_iovec(bio)->bv_len;
225 else /* dataless requests such as discard */ 225 else /* dataless requests such as discard */
226 return bio->bi_size >> 9; 226 return bio->bi_size;
227} 227}
228 228
229static inline void *bio_data(struct bio *bio) 229static inline void *bio_data(struct bio *bio)
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio)
279#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 279#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
280 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 280 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
281#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 281#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
282 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) 282 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
283#define BIO_SEG_BOUNDARY(q, b1, b2) \ 283#define BIO_SEG_BOUNDARY(q, b1, b2) \
284 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) 284 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
285 285
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio)
506} 506}
507 507
508/* 508/*
509 * BIO list managment for use by remapping drivers (e.g. DM or MD). 509 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
510 * 510 *
511 * A bio_list anchors a singly-linked list of bios chained through the bi_next 511 * A bio_list anchors a singly-linked list of bios chained through the bi_next
512 * member of the bio. The bio_list also caches the last list member to allow 512 * member of the bio. The bio_list also caches the last list member to allow
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b4f71f1a4af7..ebdfde8fe556 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -166,19 +166,9 @@ struct request {
166 enum rq_cmd_type_bits cmd_type; 166 enum rq_cmd_type_bits cmd_type;
167 unsigned long atomic_flags; 167 unsigned long atomic_flags;
168 168
169 /* Maintain bio traversal state for part by part I/O submission. 169 /* the following two fields are internal, NEVER access directly */
170 * hard_* are block layer internals, no driver should touch them! 170 sector_t __sector; /* sector cursor */
171 */ 171 unsigned int __data_len; /* total data len */
172
173 sector_t sector; /* next sector to submit */
174 sector_t hard_sector; /* next sector to complete */
175 unsigned long nr_sectors; /* no. of sectors left to submit */
176 unsigned long hard_nr_sectors; /* no. of sectors left to complete */
177 /* no. of sectors left to submit in the current segment */
178 unsigned int current_nr_sectors;
179
180 /* no. of sectors left to complete in the current segment */
181 unsigned int hard_cur_sectors;
182 172
183 struct bio *bio; 173 struct bio *bio;
184 struct bio *biotail; 174 struct bio *biotail;
@@ -211,8 +201,8 @@ struct request {
211 201
212 unsigned short ioprio; 202 unsigned short ioprio;
213 203
214 void *special; 204 void *special; /* opaque pointer available for LLD use */
215 char *buffer; 205 char *buffer; /* kaddr of the current segment if available */
216 206
217 int tag; 207 int tag;
218 int errors; 208 int errors;
@@ -226,10 +216,9 @@ struct request {
226 unsigned char __cmd[BLK_MAX_CDB]; 216 unsigned char __cmd[BLK_MAX_CDB];
227 unsigned char *cmd; 217 unsigned char *cmd;
228 218
229 unsigned int data_len;
230 unsigned int extra_len; /* length of alignment and padding */ 219 unsigned int extra_len; /* length of alignment and padding */
231 unsigned int sense_len; 220 unsigned int sense_len;
232 void *data; 221 unsigned int resid_len; /* residual count */
233 void *sense; 222 void *sense;
234 223
235 unsigned long deadline; 224 unsigned long deadline;
@@ -318,6 +307,26 @@ struct blk_cmd_filter {
318 struct kobject kobj; 307 struct kobject kobj;
319}; 308};
320 309
310struct queue_limits {
311 unsigned long bounce_pfn;
312 unsigned long seg_boundary_mask;
313
314 unsigned int max_hw_sectors;
315 unsigned int max_sectors;
316 unsigned int max_segment_size;
317 unsigned int physical_block_size;
318 unsigned int alignment_offset;
319 unsigned int io_min;
320 unsigned int io_opt;
321
322 unsigned short logical_block_size;
323 unsigned short max_hw_segments;
324 unsigned short max_phys_segments;
325
326 unsigned char misaligned;
327 unsigned char no_cluster;
328};
329
321struct request_queue 330struct request_queue
322{ 331{
323 /* 332 /*
@@ -369,7 +378,6 @@ struct request_queue
369 /* 378 /*
370 * queue needs bounce pages for pages above this limit 379 * queue needs bounce pages for pages above this limit
371 */ 380 */
372 unsigned long bounce_pfn;
373 gfp_t bounce_gfp; 381 gfp_t bounce_gfp;
374 382
375 /* 383 /*
@@ -398,14 +406,6 @@ struct request_queue
398 unsigned int nr_congestion_off; 406 unsigned int nr_congestion_off;
399 unsigned int nr_batching; 407 unsigned int nr_batching;
400 408
401 unsigned int max_sectors;
402 unsigned int max_hw_sectors;
403 unsigned short max_phys_segments;
404 unsigned short max_hw_segments;
405 unsigned short hardsect_size;
406 unsigned int max_segment_size;
407
408 unsigned long seg_boundary_mask;
409 void *dma_drain_buffer; 409 void *dma_drain_buffer;
410 unsigned int dma_drain_size; 410 unsigned int dma_drain_size;
411 unsigned int dma_pad_mask; 411 unsigned int dma_pad_mask;
@@ -415,12 +415,14 @@ struct request_queue
415 struct list_head tag_busy_list; 415 struct list_head tag_busy_list;
416 416
417 unsigned int nr_sorted; 417 unsigned int nr_sorted;
418 unsigned int in_flight; 418 unsigned int in_flight[2];
419 419
420 unsigned int rq_timeout; 420 unsigned int rq_timeout;
421 struct timer_list timeout; 421 struct timer_list timeout;
422 struct list_head timeout_list; 422 struct list_head timeout_list;
423 423
424 struct queue_limits limits;
425
424 /* 426 /*
425 * sg stuff 427 * sg stuff
426 */ 428 */
@@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
522 __clear_bit(flag, &q->queue_flags); 524 __clear_bit(flag, &q->queue_flags);
523} 525}
524 526
527static inline int queue_in_flight(struct request_queue *q)
528{
529 return q->in_flight[0] + q->in_flight[1];
530}
531
525static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 532static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
526{ 533{
527 WARN_ON_ONCE(!queue_is_locked(q)); 534 WARN_ON_ONCE(!queue_is_locked(q));
@@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
752extern void blk_put_request(struct request *); 759extern void blk_put_request(struct request *);
753extern void __blk_put_request(struct request_queue *, struct request *); 760extern void __blk_put_request(struct request_queue *, struct request *);
754extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 761extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
762extern struct request *blk_make_request(struct request_queue *, struct bio *,
763 gfp_t);
755extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 764extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
756extern void blk_requeue_request(struct request_queue *, struct request *); 765extern void blk_requeue_request(struct request_queue *, struct request *);
757extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 766extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
758extern int blk_lld_busy(struct request_queue *q); 767extern int blk_lld_busy(struct request_queue *q);
768extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
769 struct bio_set *bs, gfp_t gfp_mask,
770 int (*bio_ctr)(struct bio *, struct bio *, void *),
771 void *data);
772extern void blk_rq_unprep_clone(struct request *rq);
759extern int blk_insert_cloned_request(struct request_queue *q, 773extern int blk_insert_cloned_request(struct request_queue *q,
760 struct request *rq); 774 struct request *rq);
761extern void blk_plug_device(struct request_queue *); 775extern void blk_plug_device(struct request_queue *);
@@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
768 struct scsi_ioctl_command __user *); 782 struct scsi_ioctl_command __user *);
769 783
770/* 784/*
771 * Temporary export, until SCSI gets fixed up.
772 */
773extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
774 struct bio *bio);
775
776/*
777 * A queue has just exitted congestion. Note this in the global counter of 785 * A queue has just exitted congestion. Note this in the global counter of
778 * congested queues, and wake up anyone who was waiting for requests to be 786 * congested queues, and wake up anyone who was waiting for requests to be
779 * put back. 787 * put back.
@@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q);
798extern void __blk_stop_queue(struct request_queue *q); 806extern void __blk_stop_queue(struct request_queue *q);
799extern void __blk_run_queue(struct request_queue *); 807extern void __blk_run_queue(struct request_queue *);
800extern void blk_run_queue(struct request_queue *); 808extern void blk_run_queue(struct request_queue *);
801extern void blk_start_queueing(struct request_queue *);
802extern int blk_rq_map_user(struct request_queue *, struct request *, 809extern int blk_rq_map_user(struct request_queue *, struct request *,
803 struct rq_map_data *, void __user *, unsigned long, 810 struct rq_map_data *, void __user *, unsigned long,
804 gfp_t); 811 gfp_t);
@@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping)
831 blk_run_backing_dev(mapping->backing_dev_info, NULL); 838 blk_run_backing_dev(mapping->backing_dev_info, NULL);
832} 839}
833 840
834extern void blkdev_dequeue_request(struct request *req); 841/*
842 * blk_rq_pos() : the current sector
843 * blk_rq_bytes() : bytes left in the entire request
844 * blk_rq_cur_bytes() : bytes left in the current segment
845 * blk_rq_sectors() : sectors left in the entire request
846 * blk_rq_cur_sectors() : sectors left in the current segment
847 */
848static inline sector_t blk_rq_pos(const struct request *rq)
849{
850 return rq->__sector;
851}
852
853static inline unsigned int blk_rq_bytes(const struct request *rq)
854{
855 return rq->__data_len;
856}
857
858static inline int blk_rq_cur_bytes(const struct request *rq)
859{
860 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
861}
862
863static inline unsigned int blk_rq_sectors(const struct request *rq)
864{
865 return blk_rq_bytes(rq) >> 9;
866}
867
868static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
869{
870 return blk_rq_cur_bytes(rq) >> 9;
871}
872
873/*
874 * Request issue related functions.
875 */
876extern struct request *blk_peek_request(struct request_queue *q);
877extern void blk_start_request(struct request *rq);
878extern struct request *blk_fetch_request(struct request_queue *q);
835 879
836/* 880/*
837 * blk_end_request() and friends. 881 * Request completion related functions.
838 * __blk_end_request() and end_request() must be called with 882 *
839 * the request queue spinlock acquired. 883 * blk_update_request() completes given number of bytes and updates
884 * the request without completing it.
885 *
886 * blk_end_request() and friends. __blk_end_request() must be called
887 * with the request queue spinlock acquired.
840 * 888 *
841 * Several drivers define their own end_request and call 889 * Several drivers define their own end_request and call
842 * blk_end_request() for parts of the original function. 890 * blk_end_request() for parts of the original function.
843 * This prevents code duplication in drivers. 891 * This prevents code duplication in drivers.
844 */ 892 */
845extern int blk_end_request(struct request *rq, int error, 893extern bool blk_update_request(struct request *rq, int error,
846 unsigned int nr_bytes); 894 unsigned int nr_bytes);
847extern int __blk_end_request(struct request *rq, int error, 895extern bool blk_end_request(struct request *rq, int error,
848 unsigned int nr_bytes); 896 unsigned int nr_bytes);
849extern int blk_end_bidi_request(struct request *rq, int error, 897extern void blk_end_request_all(struct request *rq, int error);
850 unsigned int nr_bytes, unsigned int bidi_bytes); 898extern bool blk_end_request_cur(struct request *rq, int error);
851extern void end_request(struct request *, int); 899extern bool __blk_end_request(struct request *rq, int error,
852extern int blk_end_request_callback(struct request *rq, int error, 900 unsigned int nr_bytes);
853 unsigned int nr_bytes, 901extern void __blk_end_request_all(struct request *rq, int error);
854 int (drv_callback)(struct request *)); 902extern bool __blk_end_request_cur(struct request *rq, int error);
903
855extern void blk_complete_request(struct request *); 904extern void blk_complete_request(struct request *);
856extern void __blk_complete_request(struct request *); 905extern void __blk_complete_request(struct request *);
857extern void blk_abort_request(struct request *); 906extern void blk_abort_request(struct request *);
858extern void blk_abort_queue(struct request_queue *); 907extern void blk_abort_queue(struct request_queue *);
859extern void blk_update_request(struct request *rq, int error,
860 unsigned int nr_bytes);
861
862/*
863 * blk_end_request() takes bytes instead of sectors as a complete size.
864 * blk_rq_bytes() returns bytes left to complete in the entire request.
865 * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
866 */
867extern unsigned int blk_rq_bytes(struct request *rq);
868extern unsigned int blk_rq_cur_bytes(struct request *rq);
869 908
870/* 909/*
871 * Access functions for manipulating queue properties 910 * Access functions for manipulating queue properties
@@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *);
877extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 916extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
878extern void blk_queue_bounce_limit(struct request_queue *, u64); 917extern void blk_queue_bounce_limit(struct request_queue *, u64);
879extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 918extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
919extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
880extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 920extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
881extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 921extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
882extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 922extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
883extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 923extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
924extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
925extern void blk_queue_alignment_offset(struct request_queue *q,
926 unsigned int alignment);
927extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
928extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
929extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
930 sector_t offset);
931extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
932 sector_t offset);
884extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 933extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
885extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 934extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
886extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 935extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
967 1016
968#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1017#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
969 1018
970static inline int queue_hardsect_size(struct request_queue *q) 1019static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1020{
1021 return q->limits.bounce_pfn;
1022}
1023
1024static inline unsigned long queue_segment_boundary(struct request_queue *q)
1025{
1026 return q->limits.seg_boundary_mask;
1027}
1028
1029static inline unsigned int queue_max_sectors(struct request_queue *q)
1030{
1031 return q->limits.max_sectors;
1032}
1033
1034static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1035{
1036 return q->limits.max_hw_sectors;
1037}
1038
1039static inline unsigned short queue_max_hw_segments(struct request_queue *q)
1040{
1041 return q->limits.max_hw_segments;
1042}
1043
1044static inline unsigned short queue_max_phys_segments(struct request_queue *q)
1045{
1046 return q->limits.max_phys_segments;
1047}
1048
1049static inline unsigned int queue_max_segment_size(struct request_queue *q)
1050{
1051 return q->limits.max_segment_size;
1052}
1053
1054static inline unsigned short queue_logical_block_size(struct request_queue *q)
971{ 1055{
972 int retval = 512; 1056 int retval = 512;
973 1057
974 if (q && q->hardsect_size) 1058 if (q && q->limits.logical_block_size)
975 retval = q->hardsect_size; 1059 retval = q->limits.logical_block_size;
976 1060
977 return retval; 1061 return retval;
978} 1062}
979 1063
980static inline int bdev_hardsect_size(struct block_device *bdev) 1064static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1065{
1066 return queue_logical_block_size(bdev_get_queue(bdev));
1067}
1068
1069static inline unsigned int queue_physical_block_size(struct request_queue *q)
1070{
1071 return q->limits.physical_block_size;
1072}
1073
1074static inline unsigned int queue_io_min(struct request_queue *q)
1075{
1076 return q->limits.io_min;
1077}
1078
1079static inline unsigned int queue_io_opt(struct request_queue *q)
1080{
1081 return q->limits.io_opt;
1082}
1083
1084static inline int queue_alignment_offset(struct request_queue *q)
1085{
1086 if (q && q->limits.misaligned)
1087 return -1;
1088
1089 if (q && q->limits.alignment_offset)
1090 return q->limits.alignment_offset;
1091
1092 return 0;
1093}
1094
1095static inline int queue_sector_alignment_offset(struct request_queue *q,
1096 sector_t sector)
981{ 1097{
982 return queue_hardsect_size(bdev_get_queue(bdev)); 1098 return ((sector << 9) - q->limits.alignment_offset)
1099 & (q->limits.io_min - 1);
983} 1100}
984 1101
985static inline int queue_dma_alignment(struct request_queue *q) 1102static inline int queue_dma_alignment(struct request_queue *q)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ded2d7c42668..49c2362977fd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -149,7 +149,7 @@ struct io_restrictions {
149 unsigned max_hw_sectors; 149 unsigned max_hw_sectors;
150 unsigned max_sectors; 150 unsigned max_sectors;
151 unsigned max_segment_size; 151 unsigned max_segment_size;
152 unsigned short hardsect_size; 152 unsigned short logical_block_size;
153 unsigned short max_hw_segments; 153 unsigned short max_hw_segments;
154 unsigned short max_phys_segments; 154 unsigned short max_phys_segments;
155 unsigned char no_cluster; /* inverted so that 0 is default */ 155 unsigned char no_cluster; /* inverted so that 0 is default */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c59b769f62b0..1cb3372e65d8 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *);
103extern void elv_merge_requests(struct request_queue *, struct request *, 103extern void elv_merge_requests(struct request_queue *, struct request *,
104 struct request *); 104 struct request *);
105extern void elv_merged_request(struct request_queue *, struct request *, int); 105extern void elv_merged_request(struct request_queue *, struct request *, int);
106extern void elv_dequeue_request(struct request_queue *, struct request *);
107extern void elv_requeue_request(struct request_queue *, struct request *); 106extern void elv_requeue_request(struct request_queue *, struct request *);
108extern int elv_queue_empty(struct request_queue *); 107extern int elv_queue_empty(struct request_queue *);
109extern struct request *elv_next_request(struct request_queue *q);
110extern struct request *elv_former_request(struct request_queue *, struct request *); 108extern struct request *elv_former_request(struct request_queue *, struct request *);
111extern struct request *elv_latter_request(struct request_queue *, struct request *); 109extern struct request *elv_latter_request(struct request_queue *, struct request *);
112extern int elv_register_queue(struct request_queue *q); 110extern int elv_register_queue(struct request_queue *q);
@@ -171,7 +169,7 @@ enum {
171 ELV_MQUEUE_MUST, 169 ELV_MQUEUE_MUST,
172}; 170};
173 171
174#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) 172#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
175#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) 173#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
176 174
177/* 175/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3b534e527e09..83d6b4397245 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2205,6 +2205,8 @@ extern int generic_segment_checks(const struct iovec *iov,
2205/* fs/splice.c */ 2205/* fs/splice.c */
2206extern ssize_t generic_file_splice_read(struct file *, loff_t *, 2206extern ssize_t generic_file_splice_read(struct file *, loff_t *,
2207 struct pipe_inode_info *, size_t, unsigned int); 2207 struct pipe_inode_info *, size_t, unsigned int);
2208extern ssize_t default_file_splice_read(struct file *, loff_t *,
2209 struct pipe_inode_info *, size_t, unsigned int);
2208extern ssize_t generic_file_splice_write(struct pipe_inode_info *, 2210extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
2209 struct file *, loff_t *, size_t, unsigned int); 2211 struct file *, loff_t *, size_t, unsigned int);
2210extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2212extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index a1a28caed23d..149fda264c86 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -90,6 +90,7 @@ struct disk_stats {
90struct hd_struct { 90struct hd_struct {
91 sector_t start_sect; 91 sector_t start_sect;
92 sector_t nr_sects; 92 sector_t nr_sects;
93 sector_t alignment_offset;
93 struct device __dev; 94 struct device __dev;
94 struct kobject *holder_dir; 95 struct kobject *holder_dir;
95 int policy, partno; 96 int policy, partno;
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 08b987bccf89..dd05434fa45f 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -64,7 +64,7 @@ struct cfq_io_context {
64 * and kmalloc'ed. These could be shared between processes. 64 * and kmalloc'ed. These could be shared between processes.
65 */ 65 */
66struct io_context { 66struct io_context {
67 atomic_t refcount; 67 atomic_long_t refcount;
68 atomic_t nr_tasks; 68 atomic_t nr_tasks;
69 69
70 /* all the fields below are protected by this lock */ 70 /* all the fields below are protected by this lock */
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
91 * if ref count is zero, don't allow sharing (ioc is going away, it's 91 * if ref count is zero, don't allow sharing (ioc is going away, it's
92 * a race). 92 * a race).
93 */ 93 */
94 if (ioc && atomic_inc_not_zero(&ioc->refcount)) { 94 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
95 atomic_inc(&ioc->nr_tasks); 95 atomic_long_inc(&ioc->refcount);
96 return ioc; 96 return ioc;
97 } 97 }
98 98
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 40725447f5e0..66c194e2d9b9 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -56,8 +56,7 @@ struct loop_device {
56 gfp_t old_gfp_mask; 56 gfp_t old_gfp_mask;
57 57
58 spinlock_t lo_lock; 58 spinlock_t lo_lock;
59 struct bio *lo_bio; 59 struct bio_list lo_bio_list;
60 struct bio *lo_biotail;
61 int lo_state; 60 int lo_state;
62 struct mutex lo_ctl_mutex; 61 struct mutex lo_ctl_mutex;
63 struct task_struct *lo_thread; 62 struct task_struct *lo_thread;
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644
index 1f76b1ebf627..000000000000
--- a/include/linux/mg_disk.h
+++ /dev/null
@@ -1,206 +0,0 @@
1/*
2 * include/linux/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef __MG_DISK_H__
16#define __MG_DISK_H__
17
18#include <linux/blkdev.h>
19#include <linux/ata.h>
20
21/* name for block device */
22#define MG_DISK_NAME "mgd"
23/* name for platform device */
24#define MG_DEV_NAME "mg_disk"
25
26#define MG_DISK_MAJ 0
27#define MG_DISK_MAX_PART 16
28#define MG_SECTOR_SIZE 512
29#define MG_MAX_SECTS 256
30
31/* Register offsets */
32#define MG_BUFF_OFFSET 0x8000
33#define MG_STORAGE_BUFFER_SIZE 0x200
34#define MG_REG_OFFSET 0xC000
35#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
36#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
37#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
38#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
39#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
40#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
41#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
42#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
43#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
44#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
45#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
46
47/* "Drive Select/Head Register" bit values */
48#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
49#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
50#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
51#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
52
53
54/* "Device Control Register" bit values */
55#define MG_REG_CTRL_INTR_ENABLE 0x0
56#define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
57#define MG_REG_CTRL_RESET (0x1<<2)
58#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
59#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
60#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
61#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
62#define MG_REG_CTRL_DPD_DISABLE 0x0
63#define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
64
65/* Status register bit */
66/* error bit in status register */
67#define MG_REG_STATUS_BIT_ERROR 0x01
68/* corrected error in status register */
69#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
70/* data request bit in status register */
71#define MG_REG_STATUS_BIT_DATA_REQ 0x08
72/* DSC - Drive Seek Complete */
73#define MG_REG_STATUS_BIT_SEEK_DONE 0x10
74/* DWF - Drive Write Fault */
75#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
76#define MG_REG_STATUS_BIT_READY 0x40
77#define MG_REG_STATUS_BIT_BUSY 0x80
78
79/* handy status */
80#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
81#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
82 (MG_REG_STATUS_BIT_BUSY | \
83 MG_REG_STATUS_BIT_WRITE_FAULT | \
84 MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
85
86/* Error register */
87#define MG_REG_ERR_AMNF 0x01
88#define MG_REG_ERR_ABRT 0x04
89#define MG_REG_ERR_IDNF 0x10
90#define MG_REG_ERR_UNC 0x40
91#define MG_REG_ERR_BBK 0x80
92
93/* error code for others */
94#define MG_ERR_NONE 0
95#define MG_ERR_TIMEOUT 0x100
96#define MG_ERR_INIT_STAT 0x101
97#define MG_ERR_TRANSLATION 0x102
98#define MG_ERR_CTRL_RST 0x103
99#define MG_ERR_INV_STAT 0x104
100#define MG_ERR_RSTOUT 0x105
101
102#define MG_MAX_ERRORS 6 /* Max read/write errors */
103
104/* command */
105#define MG_CMD_RD 0x20
106#define MG_CMD_WR 0x30
107#define MG_CMD_SLEEP 0x99
108#define MG_CMD_WAKEUP 0xC3
109#define MG_CMD_ID 0xEC
110#define MG_CMD_WR_CONF 0x3C
111#define MG_CMD_RD_CONF 0x40
112
113/* operation mode */
114#define MG_OP_CASCADE (1 << 0)
115#define MG_OP_CASCADE_SYNC_RD (1 << 1)
116#define MG_OP_CASCADE_SYNC_WR (1 << 2)
117#define MG_OP_INTERLEAVE (1 << 3)
118
119/* synchronous */
120#define MG_BURST_LAT_4 (3 << 4)
121#define MG_BURST_LAT_5 (4 << 4)
122#define MG_BURST_LAT_6 (5 << 4)
123#define MG_BURST_LAT_7 (6 << 4)
124#define MG_BURST_LAT_8 (7 << 4)
125#define MG_BURST_LEN_4 (1 << 1)
126#define MG_BURST_LEN_8 (2 << 1)
127#define MG_BURST_LEN_16 (3 << 1)
128#define MG_BURST_LEN_32 (4 << 1)
129#define MG_BURST_LEN_CONT (0 << 1)
130
131/* timeout value (unit: ms) */
132#define MG_TMAX_CONF_TO_CMD 1
133#define MG_TMAX_WAIT_RD_DRQ 10
134#define MG_TMAX_WAIT_WR_DRQ 500
135#define MG_TMAX_RST_TO_BUSY 10
136#define MG_TMAX_HDRST_TO_RDY 500
137#define MG_TMAX_SWRST_TO_RDY 500
138#define MG_TMAX_RSTOUT 3000
139
140/* device attribution */
141/* use mflash as boot device */
142#define MG_BOOT_DEV (1 << 0)
143/* use mflash as storage device */
144#define MG_STORAGE_DEV (1 << 1)
145/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
146#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
147
148#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
149
150/* names of GPIO resource */
151#define MG_RST_PIN "mg_rst"
152/* except MG_BOOT_DEV, reset-out pin should be assigned */
153#define MG_RSTOUT_PIN "mg_rstout"
154
155/* private driver data */
156struct mg_drv_data {
157 /* disk resource */
158 u32 use_polling;
159
160 /* device attribution */
161 u32 dev_attr;
162
163 /* internally used */
164 struct mg_host *host;
165};
166
167/* main structure for mflash driver */
168struct mg_host {
169 struct device *dev;
170
171 struct request_queue *breq;
172 spinlock_t lock;
173 struct gendisk *gd;
174
175 struct timer_list timer;
176 void (*mg_do_intr) (struct mg_host *);
177
178 u16 id[ATA_ID_WORDS];
179
180 u16 cyls;
181 u16 heads;
182 u16 sectors;
183 u32 n_sectors;
184 u32 nres_sectors;
185
186 void __iomem *dev_base;
187 unsigned int irq;
188 unsigned int rst;
189 unsigned int rstout;
190
191 u32 major;
192 u32 error;
193};
194
195/*
196 * Debugging macro and defines
197 */
198#undef DO_MG_DEBUG
199#ifdef DO_MG_DEBUG
200# define MG_DBG(fmt, args...) \
201 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
202#else /* CONFIG_MG_DEBUG */
203# define MG_DBG(fmt, args...) do { } while (0)
204#endif /* CONFIG_MG_DEBUG */
205
206#endif
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index c8f038554e80..b43a9e039059 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void
152void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); 152void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
153int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); 153int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
154int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); 154int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
155void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
155 156
156#endif 157#endif
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 5f3faa9d15ae..18e7c7c0cae6 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -11,8 +11,7 @@
11#include <linux/pipe_fs_i.h> 11#include <linux/pipe_fs_i.h>
12 12
13/* 13/*
14 * splice is tied to pipes as a transport (at least for now), so we'll just 14 * Flags passed in from splice/tee/vmsplice
15 * add the splice flags here.
16 */ 15 */
17#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ 16#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
18#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ 17#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 94c56d29869d..be7d255fc7cf 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -15,6 +15,10 @@
15#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ 15#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
16#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ 16#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
17#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ 17#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
18#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
19#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */
20
21#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
18 22
19struct virtio_blk_config 23struct virtio_blk_config
20{ 24{
@@ -32,6 +36,7 @@ struct virtio_blk_config
32 } geometry; 36 } geometry;
33 /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ 37 /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
34 __u32 blk_size; 38 __u32 blk_size;
39 __u8 identify[VIRTIO_BLK_ID_BYTES];
35} __attribute__((packed)); 40} __attribute__((packed));
36 41
37/* These two define direction. */ 42/* These two define direction. */
@@ -55,6 +60,13 @@ struct virtio_blk_outhdr
55 __u64 sector; 60 __u64 sector;
56}; 61};
57 62
63struct virtio_scsi_inhdr {
64 __u32 errors;
65 __u32 data_len;
66 __u32 sense_len;
67 __u32 residual;
68};
69
58/* And this is the final byte of the write scatter-gather list. */ 70/* And this is the final byte of the write scatter-gather list. */
59#define VIRTIO_BLK_S_OK 0 71#define VIRTIO_BLK_S_OK 0
60#define VIRTIO_BLK_S_IOERR 1 72#define VIRTIO_BLK_S_IOERR 1
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 43b50d36925c..3878d1dc7f59 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -270,7 +270,7 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
270 270
271static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) 271static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
272{ 272{
273 return scmd->request->sector; 273 return blk_rq_pos(scmd->request);
274} 274}
275 275
276static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) 276static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 53effd496a50..d6b05f42dd44 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -25,9 +25,8 @@ TRACE_EVENT(block_rq_abort,
25 25
26 TP_fast_assign( 26 TP_fast_assign(
27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
28 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 28 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
29 __entry->nr_sector = blk_pc_request(rq) ? 29 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
30 0 : rq->hard_nr_sectors;
31 __entry->errors = rq->errors; 30 __entry->errors = rq->errors;
32 31
33 blk_fill_rwbs_rq(__entry->rwbs, rq); 32 blk_fill_rwbs_rq(__entry->rwbs, rq);
@@ -59,10 +58,9 @@ TRACE_EVENT(block_rq_insert,
59 58
60 TP_fast_assign( 59 TP_fast_assign(
61 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 60 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
62 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 61 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
63 __entry->nr_sector = blk_pc_request(rq) ? 62 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
64 0 : rq->hard_nr_sectors; 63 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
65 __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0;
66 64
67 blk_fill_rwbs_rq(__entry->rwbs, rq); 65 blk_fill_rwbs_rq(__entry->rwbs, rq);
68 blk_dump_cmd(__get_str(cmd), rq); 66 blk_dump_cmd(__get_str(cmd), rq);
@@ -94,10 +92,9 @@ TRACE_EVENT(block_rq_issue,
94 92
95 TP_fast_assign( 93 TP_fast_assign(
96 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 94 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
97 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 95 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
98 __entry->nr_sector = blk_pc_request(rq) ? 96 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
99 0 : rq->hard_nr_sectors; 97 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
100 __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0;
101 98
102 blk_fill_rwbs_rq(__entry->rwbs, rq); 99 blk_fill_rwbs_rq(__entry->rwbs, rq);
103 blk_dump_cmd(__get_str(cmd), rq); 100 blk_dump_cmd(__get_str(cmd), rq);
@@ -128,9 +125,8 @@ TRACE_EVENT(block_rq_requeue,
128 125
129 TP_fast_assign( 126 TP_fast_assign(
130 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 127 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
131 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 128 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
132 __entry->nr_sector = blk_pc_request(rq) ? 129 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
133 0 : rq->hard_nr_sectors;
134 __entry->errors = rq->errors; 130 __entry->errors = rq->errors;
135 131
136 blk_fill_rwbs_rq(__entry->rwbs, rq); 132 blk_fill_rwbs_rq(__entry->rwbs, rq);
@@ -161,9 +157,8 @@ TRACE_EVENT(block_rq_complete,
161 157
162 TP_fast_assign( 158 TP_fast_assign(
163 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 159 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
164 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 160 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
165 __entry->nr_sector = blk_pc_request(rq) ? 161 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
166 0 : rq->hard_nr_sectors;
167 __entry->errors = rq->errors; 162 __entry->errors = rq->errors;
168 163
169 blk_fill_rwbs_rq(__entry->rwbs, rq); 164 blk_fill_rwbs_rq(__entry->rwbs, rq);