diff options
| -rw-r--r-- | drivers/block/ll_rw_blk.c | 67 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 6 |
2 files changed, 8 insertions, 65 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index fd94ea27d594..fc86d53fe783 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | static void blk_unplug_work(void *data); | 38 | static void blk_unplug_work(void *data); |
| 39 | static void blk_unplug_timeout(unsigned long data); | 39 | static void blk_unplug_timeout(unsigned long data); |
| 40 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | ||
| 40 | 41 | ||
| 41 | /* | 42 | /* |
| 42 | * For the allocated request tables | 43 | * For the allocated request tables |
| @@ -1137,7 +1138,7 @@ new_hw_segment: | |||
| 1137 | } | 1138 | } |
| 1138 | 1139 | ||
| 1139 | 1140 | ||
| 1140 | int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, | 1141 | static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, |
| 1141 | struct bio *nxt) | 1142 | struct bio *nxt) |
| 1142 | { | 1143 | { |
| 1143 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | 1144 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) |
| @@ -1158,9 +1159,7 @@ int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, | |||
| 1158 | return 0; | 1159 | return 0; |
| 1159 | } | 1160 | } |
| 1160 | 1161 | ||
| 1161 | EXPORT_SYMBOL(blk_phys_contig_segment); | 1162 | static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, |
| 1162 | |||
| 1163 | int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, | ||
| 1164 | struct bio *nxt) | 1163 | struct bio *nxt) |
| 1165 | { | 1164 | { |
| 1166 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 1165 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) |
| @@ -1176,8 +1175,6 @@ int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, | |||
| 1176 | return 1; | 1175 | return 1; |
| 1177 | } | 1176 | } |
| 1178 | 1177 | ||
| 1179 | EXPORT_SYMBOL(blk_hw_contig_segment); | ||
| 1180 | |||
| 1181 | /* | 1178 | /* |
| 1182 | * map a request to scatterlist, return number of sg entries setup. Caller | 1179 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 1183 | * must make sure sg can hold rq->nr_phys_segments entries | 1180 | * must make sure sg can hold rq->nr_phys_segments entries |
| @@ -1825,7 +1822,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) | |||
| 1825 | * is the behaviour we want though - once it gets a wakeup it should be given | 1822 | * is the behaviour we want though - once it gets a wakeup it should be given |
| 1826 | * a nice run. | 1823 | * a nice run. |
| 1827 | */ | 1824 | */ |
| 1828 | void ioc_set_batching(request_queue_t *q, struct io_context *ioc) | 1825 | static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) |
| 1829 | { | 1826 | { |
| 1830 | if (!ioc || ioc_batching(q, ioc)) | 1827 | if (!ioc || ioc_batching(q, ioc)) |
| 1831 | return; | 1828 | return; |
| @@ -2254,45 +2251,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | |||
| 2254 | 2251 | ||
| 2255 | EXPORT_SYMBOL(blkdev_issue_flush); | 2252 | EXPORT_SYMBOL(blkdev_issue_flush); |
| 2256 | 2253 | ||
| 2257 | /** | 2254 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) |
| 2258 | * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices | ||
| 2259 | * @q: device queue | ||
| 2260 | * @disk: gendisk | ||
| 2261 | * @error_sector: error offset | ||
| 2262 | * | ||
| 2263 | * Description: | ||
| 2264 | * Devices understanding the SCSI command set, can use this function as | ||
| 2265 | * a helper for issuing a cache flush. Note: driver is required to store | ||
| 2266 | * the error offset (in case of error flushing) in ->sector of struct | ||
| 2267 | * request. | ||
| 2268 | */ | ||
| 2269 | int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, | ||
| 2270 | sector_t *error_sector) | ||
| 2271 | { | ||
| 2272 | struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT); | ||
| 2273 | int ret; | ||
| 2274 | |||
| 2275 | rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; | ||
| 2276 | rq->sector = 0; | ||
| 2277 | memset(rq->cmd, 0, sizeof(rq->cmd)); | ||
| 2278 | rq->cmd[0] = 0x35; | ||
| 2279 | rq->cmd_len = 12; | ||
| 2280 | rq->data = NULL; | ||
| 2281 | rq->data_len = 0; | ||
| 2282 | rq->timeout = 60 * HZ; | ||
| 2283 | |||
| 2284 | ret = blk_execute_rq(q, disk, rq); | ||
| 2285 | |||
| 2286 | if (ret && error_sector) | ||
| 2287 | *error_sector = rq->sector; | ||
| 2288 | |||
| 2289 | blk_put_request(rq); | ||
| 2290 | return ret; | ||
| 2291 | } | ||
| 2292 | |||
| 2293 | EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn); | ||
| 2294 | |||
| 2295 | void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | ||
| 2296 | { | 2255 | { |
| 2297 | int rw = rq_data_dir(rq); | 2256 | int rw = rq_data_dir(rq); |
| 2298 | 2257 | ||
| @@ -2551,16 +2510,6 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq) | |||
| 2551 | 2510 | ||
| 2552 | EXPORT_SYMBOL(blk_attempt_remerge); | 2511 | EXPORT_SYMBOL(blk_attempt_remerge); |
| 2553 | 2512 | ||
| 2554 | /* | ||
| 2555 | * Non-locking blk_attempt_remerge variant. | ||
| 2556 | */ | ||
| 2557 | void __blk_attempt_remerge(request_queue_t *q, struct request *rq) | ||
| 2558 | { | ||
| 2559 | attempt_back_merge(q, rq); | ||
| 2560 | } | ||
| 2561 | |||
| 2562 | EXPORT_SYMBOL(__blk_attempt_remerge); | ||
| 2563 | |||
| 2564 | static int __make_request(request_queue_t *q, struct bio *bio) | 2513 | static int __make_request(request_queue_t *q, struct bio *bio) |
| 2565 | { | 2514 | { |
| 2566 | struct request *req, *freereq = NULL; | 2515 | struct request *req, *freereq = NULL; |
| @@ -2971,7 +2920,7 @@ void submit_bio(int rw, struct bio *bio) | |||
| 2971 | 2920 | ||
| 2972 | EXPORT_SYMBOL(submit_bio); | 2921 | EXPORT_SYMBOL(submit_bio); |
| 2973 | 2922 | ||
| 2974 | void blk_recalc_rq_segments(struct request *rq) | 2923 | static void blk_recalc_rq_segments(struct request *rq) |
| 2975 | { | 2924 | { |
| 2976 | struct bio *bio, *prevbio = NULL; | 2925 | struct bio *bio, *prevbio = NULL; |
| 2977 | int nr_phys_segs, nr_hw_segs; | 2926 | int nr_phys_segs, nr_hw_segs; |
| @@ -3013,7 +2962,7 @@ void blk_recalc_rq_segments(struct request *rq) | |||
| 3013 | rq->nr_hw_segments = nr_hw_segs; | 2962 | rq->nr_hw_segments = nr_hw_segs; |
| 3014 | } | 2963 | } |
| 3015 | 2964 | ||
| 3016 | void blk_recalc_rq_sectors(struct request *rq, int nsect) | 2965 | static void blk_recalc_rq_sectors(struct request *rq, int nsect) |
| 3017 | { | 2966 | { |
| 3018 | if (blk_fs_request(rq)) { | 2967 | if (blk_fs_request(rq)) { |
| 3019 | rq->hard_sector += nsect; | 2968 | rq->hard_sector += nsect; |
| @@ -3601,7 +3550,7 @@ static struct sysfs_ops queue_sysfs_ops = { | |||
| 3601 | .store = queue_attr_store, | 3550 | .store = queue_attr_store, |
| 3602 | }; | 3551 | }; |
| 3603 | 3552 | ||
| 3604 | struct kobj_type queue_ktype = { | 3553 | static struct kobj_type queue_ktype = { |
| 3605 | .sysfs_ops = &queue_sysfs_ops, | 3554 | .sysfs_ops = &queue_sysfs_ops, |
| 3606 | .default_attrs = default_attrs, | 3555 | .default_attrs = default_attrs, |
| 3607 | }; | 3556 | }; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 60272141ff19..b54a0348a890 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -539,15 +539,12 @@ extern void generic_make_request(struct bio *bio); | |||
| 539 | extern void blk_put_request(struct request *); | 539 | extern void blk_put_request(struct request *); |
| 540 | extern void blk_end_sync_rq(struct request *rq); | 540 | extern void blk_end_sync_rq(struct request *rq); |
| 541 | extern void blk_attempt_remerge(request_queue_t *, struct request *); | 541 | extern void blk_attempt_remerge(request_queue_t *, struct request *); |
| 542 | extern void __blk_attempt_remerge(request_queue_t *, struct request *); | ||
| 543 | extern struct request *blk_get_request(request_queue_t *, int, int); | 542 | extern struct request *blk_get_request(request_queue_t *, int, int); |
| 544 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 543 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); |
| 545 | extern void blk_requeue_request(request_queue_t *, struct request *); | 544 | extern void blk_requeue_request(request_queue_t *, struct request *); |
| 546 | extern void blk_plug_device(request_queue_t *); | 545 | extern void blk_plug_device(request_queue_t *); |
| 547 | extern int blk_remove_plug(request_queue_t *); | 546 | extern int blk_remove_plug(request_queue_t *); |
| 548 | extern void blk_recount_segments(request_queue_t *, struct bio *); | 547 | extern void blk_recount_segments(request_queue_t *, struct bio *); |
| 549 | extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *); | ||
| 550 | extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *); | ||
| 551 | extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); | 548 | extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); |
| 552 | extern void blk_start_queue(request_queue_t *q); | 549 | extern void blk_start_queue(request_queue_t *q); |
| 553 | extern void blk_stop_queue(request_queue_t *q); | 550 | extern void blk_stop_queue(request_queue_t *q); |
| @@ -631,7 +628,6 @@ extern void blk_queue_dma_alignment(request_queue_t *, int); | |||
| 631 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 628 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| 632 | extern void blk_queue_ordered(request_queue_t *, int); | 629 | extern void blk_queue_ordered(request_queue_t *, int); |
| 633 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); | 630 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); |
| 634 | extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *); | ||
| 635 | extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); | 631 | extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); |
| 636 | extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); | 632 | extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); |
| 637 | extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); | 633 | extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); |
| @@ -675,8 +671,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); | |||
| 675 | 671 | ||
| 676 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 672 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
| 677 | 673 | ||
| 678 | extern void drive_stat_acct(struct request *, int, int); | ||
| 679 | |||
| 680 | static inline int queue_hardsect_size(request_queue_t *q) | 674 | static inline int queue_hardsect_size(request_queue_t *q) |
| 681 | { | 675 | { |
| 682 | int retval = 512; | 676 | int retval = 512; |
