aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@stusta.de>2005-06-25 17:59:10 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:25:05 -0400
commit93d17d3d84b7147e8f07aeeb15ec01aa92c6b564 (patch)
tree5a6448e7d30d4d6443f5ab298f2e8faae788df30 /drivers/block/ll_rw_blk.c
parente8e1c7292ee9b64c35b3f6d7f905ca5e854aea95 (diff)
[PATCH] drivers/block/ll_rw_blk.c: cleanups
This patch contains the following cleanups: - make needlessly global code static - remove the following unused global functions: - blkdev_scsi_issue_flush_fn - __blk_attempt_remerge - remove the following unused EXPORT_SYMBOL's: - blk_phys_contig_segment - blk_hw_contig_segment - blkdev_scsi_issue_flush_fn - __blk_attempt_remerge Signed-off-by: Adrian Bunk <bunk@stusta.de> Acked-by: Jens Axboe <axboe@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r--drivers/block/ll_rw_blk.c67
1 files changed, 8 insertions, 59 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index fd94ea27d594..fc86d53fe783 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -37,6 +37,7 @@
37 37
38static void blk_unplug_work(void *data); 38static void blk_unplug_work(void *data);
39static void blk_unplug_timeout(unsigned long data); 39static void blk_unplug_timeout(unsigned long data);
40static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
40 41
41/* 42/*
42 * For the allocated request tables 43 * For the allocated request tables
@@ -1137,7 +1138,7 @@ new_hw_segment:
1137} 1138}
1138 1139
1139 1140
1140int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, 1141static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
1141 struct bio *nxt) 1142 struct bio *nxt)
1142{ 1143{
1143 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) 1144 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1158,9 +1159,7 @@ int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
1158 return 0; 1159 return 0;
1159} 1160}
1160 1161
1161EXPORT_SYMBOL(blk_phys_contig_segment); 1162static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
1162
1163int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
1164 struct bio *nxt) 1163 struct bio *nxt)
1165{ 1164{
1166 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 1165 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1176,8 +1175,6 @@ int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
1176 return 1; 1175 return 1;
1177} 1176}
1178 1177
1179EXPORT_SYMBOL(blk_hw_contig_segment);
1180
1181/* 1178/*
1182 * map a request to scatterlist, return number of sg entries setup. Caller 1179 * map a request to scatterlist, return number of sg entries setup. Caller
1183 * must make sure sg can hold rq->nr_phys_segments entries 1180 * must make sure sg can hold rq->nr_phys_segments entries
@@ -1825,7 +1822,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
1825 * is the behaviour we want though - once it gets a wakeup it should be given 1822 * is the behaviour we want though - once it gets a wakeup it should be given
1826 * a nice run. 1823 * a nice run.
1827 */ 1824 */
1828void ioc_set_batching(request_queue_t *q, struct io_context *ioc) 1825static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
1829{ 1826{
1830 if (!ioc || ioc_batching(q, ioc)) 1827 if (!ioc || ioc_batching(q, ioc))
1831 return; 1828 return;
@@ -2254,45 +2251,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2254 2251
2255EXPORT_SYMBOL(blkdev_issue_flush); 2252EXPORT_SYMBOL(blkdev_issue_flush);
2256 2253
2257/** 2254static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2258 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2259 * @q: device queue
2260 * @disk: gendisk
2261 * @error_sector: error offset
2262 *
2263 * Description:
2264 * Devices understanding the SCSI command set, can use this function as
2265 * a helper for issuing a cache flush. Note: driver is required to store
2266 * the error offset (in case of error flushing) in ->sector of struct
2267 * request.
2268 */
2269int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2270 sector_t *error_sector)
2271{
2272 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2273 int ret;
2274
2275 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2276 rq->sector = 0;
2277 memset(rq->cmd, 0, sizeof(rq->cmd));
2278 rq->cmd[0] = 0x35;
2279 rq->cmd_len = 12;
2280 rq->data = NULL;
2281 rq->data_len = 0;
2282 rq->timeout = 60 * HZ;
2283
2284 ret = blk_execute_rq(q, disk, rq);
2285
2286 if (ret && error_sector)
2287 *error_sector = rq->sector;
2288
2289 blk_put_request(rq);
2290 return ret;
2291}
2292
2293EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2294
2295void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2296{ 2255{
2297 int rw = rq_data_dir(rq); 2256 int rw = rq_data_dir(rq);
2298 2257
@@ -2551,16 +2510,6 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
2551 2510
2552EXPORT_SYMBOL(blk_attempt_remerge); 2511EXPORT_SYMBOL(blk_attempt_remerge);
2553 2512
2554/*
2555 * Non-locking blk_attempt_remerge variant.
2556 */
2557void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
2558{
2559 attempt_back_merge(q, rq);
2560}
2561
2562EXPORT_SYMBOL(__blk_attempt_remerge);
2563
2564static int __make_request(request_queue_t *q, struct bio *bio) 2513static int __make_request(request_queue_t *q, struct bio *bio)
2565{ 2514{
2566 struct request *req, *freereq = NULL; 2515 struct request *req, *freereq = NULL;
@@ -2971,7 +2920,7 @@ void submit_bio(int rw, struct bio *bio)
2971 2920
2972EXPORT_SYMBOL(submit_bio); 2921EXPORT_SYMBOL(submit_bio);
2973 2922
2974void blk_recalc_rq_segments(struct request *rq) 2923static void blk_recalc_rq_segments(struct request *rq)
2975{ 2924{
2976 struct bio *bio, *prevbio = NULL; 2925 struct bio *bio, *prevbio = NULL;
2977 int nr_phys_segs, nr_hw_segs; 2926 int nr_phys_segs, nr_hw_segs;
@@ -3013,7 +2962,7 @@ void blk_recalc_rq_segments(struct request *rq)
3013 rq->nr_hw_segments = nr_hw_segs; 2962 rq->nr_hw_segments = nr_hw_segs;
3014} 2963}
3015 2964
3016void blk_recalc_rq_sectors(struct request *rq, int nsect) 2965static void blk_recalc_rq_sectors(struct request *rq, int nsect)
3017{ 2966{
3018 if (blk_fs_request(rq)) { 2967 if (blk_fs_request(rq)) {
3019 rq->hard_sector += nsect; 2968 rq->hard_sector += nsect;
@@ -3601,7 +3550,7 @@ static struct sysfs_ops queue_sysfs_ops = {
3601 .store = queue_attr_store, 3550 .store = queue_attr_store,
3602}; 3551};
3603 3552
3604struct kobj_type queue_ktype = { 3553static struct kobj_type queue_ktype = {
3605 .sysfs_ops = &queue_sysfs_ops, 3554 .sysfs_ops = &queue_sysfs_ops,
3606 .default_attrs = default_attrs, 3555 .default_attrs = default_attrs,
3607}; 3556};