aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2006-12-19 02:33:11 -0500
committerJens Axboe <jens.axboe@oracle.com>2006-12-19 02:33:11 -0500
commit1aa4f24fe96938cabe7a1e9da8bc3bfbd1dfe3fa (patch)
tree922149707b2741abc8dca53a3ae56e681ef0fbf6 /block
parent2985259b0e3928d4cd0723ac5aad0d1190ab7717 (diff)
[PATCH] Remove queue merging hooks
We have full flexibility of merging parameters now, so we can remove the hooks that define back/front/request merge strategies. Nobody is using them anymore. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/ll_rw_blk.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 71a78a7e42fd..433797934bd5 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
1405 return 1; 1405 return 1;
1406} 1406}
1407 1407
1408static int ll_back_merge_fn(request_queue_t *q, struct request *req, 1408int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
1409 struct bio *bio)
1410{ 1409{
1411 unsigned short max_sectors; 1410 unsigned short max_sectors;
1412 int len; 1411 int len;
@@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1442 1441
1443 return ll_new_hw_segment(q, req, bio); 1442 return ll_new_hw_segment(q, req, bio);
1444} 1443}
1444EXPORT_SYMBOL(ll_back_merge_fn);
1445 1445
1446static int ll_front_merge_fn(request_queue_t *q, struct request *req, 1446static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1447 struct bio *bio) 1447 struct bio *bio)
@@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1912 } 1912 }
1913 1913
1914 q->request_fn = rfn; 1914 q->request_fn = rfn;
1915 q->back_merge_fn = ll_back_merge_fn;
1916 q->front_merge_fn = ll_front_merge_fn;
1917 q->merge_requests_fn = ll_merge_requests_fn;
1918 q->prep_rq_fn = NULL; 1915 q->prep_rq_fn = NULL;
1919 q->unplug_fn = generic_unplug_device; 1916 q->unplug_fn = generic_unplug_device;
1920 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); 1917 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
@@ -2371,7 +2368,7 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
2371 spin_lock_irq(q->queue_lock); 2368 spin_lock_irq(q->queue_lock);
2372 if (!rq->bio) 2369 if (!rq->bio)
2373 blk_rq_bio_prep(q, rq, bio); 2370 blk_rq_bio_prep(q, rq, bio);
2374 else if (!q->back_merge_fn(q, rq, bio)) { 2371 else if (!ll_back_merge_fn(q, rq, bio)) {
2375 ret = -EINVAL; 2372 ret = -EINVAL;
2376 spin_unlock_irq(q->queue_lock); 2373 spin_unlock_irq(q->queue_lock);
2377 goto unmap_bio; 2374 goto unmap_bio;
@@ -2820,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
2820 * will have updated segment counts, update sector 2817 * will have updated segment counts, update sector
2821 * counts here. 2818 * counts here.
2822 */ 2819 */
2823 if (!q->merge_requests_fn(q, req, next)) 2820 if (!ll_merge_requests_fn(q, req, next))
2824 return 0; 2821 return 0;
2825 2822
2826 /* 2823 /*
@@ -2937,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2937 case ELEVATOR_BACK_MERGE: 2934 case ELEVATOR_BACK_MERGE:
2938 BUG_ON(!rq_mergeable(req)); 2935 BUG_ON(!rq_mergeable(req));
2939 2936
2940 if (!q->back_merge_fn(q, req, bio)) 2937 if (!ll_back_merge_fn(q, req, bio))
2941 break; 2938 break;
2942 2939
2943 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 2940 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
@@ -2954,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2954 case ELEVATOR_FRONT_MERGE: 2951 case ELEVATOR_FRONT_MERGE:
2955 BUG_ON(!rq_mergeable(req)); 2952 BUG_ON(!rq_mergeable(req));
2956 2953
2957 if (!q->front_merge_fn(q, req, bio)) 2954 if (!ll_front_merge_fn(q, req, bio))
2958 break; 2955 break;
2959 2956
2960 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 2957 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);