aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-01-29 07:54:41 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-01-29 15:55:09 -0500
commit26b8256e2bb930a8e4d4d10aa74950d8921376b8 (patch)
tree36fc1011aa68526dc1fb5b237e330ca2c27c9939 /block
parent86db1e29772372155db08ff48a9ceb76e11a2ad1 (diff)
block: get rid of unnecessary forward declarations in blk-core.c
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c62
1 files changed, 30 insertions, 32 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2c73ed1a8131..3d415ec10fb8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,9 +33,7 @@
33 33
34#include "blk.h" 34#include "blk.h"
35 35
36static void drive_stat_acct(struct request *rq, int new_io);
37static int __make_request(struct request_queue *q, struct bio *bio); 36static int __make_request(struct request_queue *q, struct bio *bio);
38static void blk_recalc_rq_segments(struct request *rq);
39 37
40/* 38/*
41 * For the allocated request tables 39 * For the allocated request tables
@@ -54,6 +52,21 @@ static struct workqueue_struct *kblockd_workqueue;
54 52
55static DEFINE_PER_CPU(struct list_head, blk_cpu_done); 53static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
56 54
55static void drive_stat_acct(struct request *rq, int new_io)
56{
57 int rw = rq_data_dir(rq);
58
59 if (!blk_fs_request(rq) || !rq->rq_disk)
60 return;
61
62 if (!new_io) {
63 __disk_stat_inc(rq->rq_disk, merges[rw]);
64 } else {
65 disk_round_stats(rq->rq_disk);
66 rq->rq_disk->in_flight++;
67 }
68}
69
57void blk_queue_congestion_threshold(struct request_queue *q) 70void blk_queue_congestion_threshold(struct request_queue *q)
58{ 71{
59 int nr; 72 int nr;
@@ -168,21 +181,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
168 181
169EXPORT_SYMBOL(blk_dump_rq_flags); 182EXPORT_SYMBOL(blk_dump_rq_flags);
170 183
171void blk_recount_segments(struct request_queue *q, struct bio *bio)
172{
173 struct request rq;
174 struct bio *nxt = bio->bi_next;
175 rq.q = q;
176 rq.bio = rq.biotail = bio;
177 bio->bi_next = NULL;
178 blk_recalc_rq_segments(&rq);
179 bio->bi_next = nxt;
180 bio->bi_phys_segments = rq.nr_phys_segments;
181 bio->bi_hw_segments = rq.nr_hw_segments;
182 bio->bi_flags |= (1 << BIO_SEG_VALID);
183}
184EXPORT_SYMBOL(blk_recount_segments);
185
186static void blk_recalc_rq_segments(struct request *rq) 184static void blk_recalc_rq_segments(struct request *rq)
187{ 185{
188 int nr_phys_segs; 186 int nr_phys_segs;
@@ -255,6 +253,21 @@ new_hw_segment:
255 rq->nr_hw_segments = nr_hw_segs; 253 rq->nr_hw_segments = nr_hw_segs;
256} 254}
257 255
256void blk_recount_segments(struct request_queue *q, struct bio *bio)
257{
258 struct request rq;
259 struct bio *nxt = bio->bi_next;
260 rq.q = q;
261 rq.bio = rq.biotail = bio;
262 bio->bi_next = NULL;
263 blk_recalc_rq_segments(&rq);
264 bio->bi_next = nxt;
265 bio->bi_phys_segments = rq.nr_phys_segments;
266 bio->bi_hw_segments = rq.nr_hw_segments;
267 bio->bi_flags |= (1 << BIO_SEG_VALID);
268}
269EXPORT_SYMBOL(blk_recount_segments);
270
258static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 271static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
259 struct bio *nxt) 272 struct bio *nxt)
260{ 273{
@@ -1305,21 +1318,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
1305 1318
1306EXPORT_SYMBOL(blk_insert_request); 1319EXPORT_SYMBOL(blk_insert_request);
1307 1320
1308static void drive_stat_acct(struct request *rq, int new_io)
1309{
1310 int rw = rq_data_dir(rq);
1311
1312 if (!blk_fs_request(rq) || !rq->rq_disk)
1313 return;
1314
1315 if (!new_io) {
1316 __disk_stat_inc(rq->rq_disk, merges[rw]);
1317 } else {
1318 disk_round_stats(rq->rq_disk);
1319 rq->rq_disk->in_flight++;
1320 }
1321}
1322
1323/* 1321/*
1324 * add-request adds a request to the linked list. 1322 * add-request adds a request to the linked list.
1325 * queue lock is held and interrupts disabled, as we muck with the 1323 * queue lock is held and interrupts disabled, as we muck with the