diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-17 22:14:45 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:29:34 -0400 |
commit | 51da90fcb6acd580e87280eaf4eb1f788021807d (patch) | |
tree | fea3d2266942557f0b061aea022bbb0f5e383dec /block/ll_rw_blk.c | |
parent | cb78b285c8f9d59b0d4e4f6a54c2977ce1d9b880 (diff) |
[PATCH] ll_rw_blk: cleanup __make_request()
- Don't assign variables that are only used once.
- Kill spin_lock() prefetching, it's opportunistic at best.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 22 |
1 files changed, 7 insertions, 15 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b1ea941f6dc3..e25b4cd2dcd1 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -2885,17 +2885,11 @@ static void init_request_from_bio(struct request *req, struct bio *bio) | |||
2885 | static int __make_request(request_queue_t *q, struct bio *bio) | 2885 | static int __make_request(request_queue_t *q, struct bio *bio) |
2886 | { | 2886 | { |
2887 | struct request *req; | 2887 | struct request *req; |
2888 | int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; | 2888 | int el_ret, nr_sectors, barrier, err; |
2889 | unsigned short prio; | 2889 | const unsigned short prio = bio_prio(bio); |
2890 | sector_t sector; | 2890 | const int sync = bio_sync(bio); |
2891 | 2891 | ||
2892 | sector = bio->bi_sector; | ||
2893 | nr_sectors = bio_sectors(bio); | 2892 | nr_sectors = bio_sectors(bio); |
2894 | cur_nr_sectors = bio_cur_sectors(bio); | ||
2895 | prio = bio_prio(bio); | ||
2896 | |||
2897 | rw = bio_data_dir(bio); | ||
2898 | sync = bio_sync(bio); | ||
2899 | 2893 | ||
2900 | /* | 2894 | /* |
2901 | * low level driver can indicate that it wants pages above a | 2895 | * low level driver can indicate that it wants pages above a |
@@ -2904,8 +2898,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2904 | */ | 2898 | */ |
2905 | blk_queue_bounce(q, &bio); | 2899 | blk_queue_bounce(q, &bio); |
2906 | 2900 | ||
2907 | spin_lock_prefetch(q->queue_lock); | ||
2908 | |||
2909 | barrier = bio_barrier(bio); | 2901 | barrier = bio_barrier(bio); |
2910 | if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { | 2902 | if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { |
2911 | err = -EOPNOTSUPP; | 2903 | err = -EOPNOTSUPP; |
@@ -2953,9 +2945,9 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2953 | * not touch req->buffer either... | 2945 | * not touch req->buffer either... |
2954 | */ | 2946 | */ |
2955 | req->buffer = bio_data(bio); | 2947 | req->buffer = bio_data(bio); |
2956 | req->current_nr_sectors = cur_nr_sectors; | 2948 | req->current_nr_sectors = bio_cur_sectors(bio); |
2957 | req->hard_cur_sectors = cur_nr_sectors; | 2949 | req->hard_cur_sectors = req->current_nr_sectors; |
2958 | req->sector = req->hard_sector = sector; | 2950 | req->sector = req->hard_sector = bio->bi_sector; |
2959 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 2951 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
2960 | req->ioprio = ioprio_best(req->ioprio, prio); | 2952 | req->ioprio = ioprio_best(req->ioprio, prio); |
2961 | drive_stat_acct(req, nr_sectors, 0); | 2953 | drive_stat_acct(req, nr_sectors, 0); |
@@ -2973,7 +2965,7 @@ get_rq: | |||
2973 | * Grab a free request. This is might sleep but can not fail. | 2965 | * Grab a free request. This is might sleep but can not fail. |
2974 | * Returns with the queue unlocked. | 2966 | * Returns with the queue unlocked. |
2975 | */ | 2967 | */ |
2976 | req = get_request_wait(q, rw, bio); | 2968 | req = get_request_wait(q, bio_data_dir(bio), bio); |
2977 | 2969 | ||
2978 | /* | 2970 | /* |
2979 | * After dropping the lock and possibly sleeping here, our request | 2971 | * After dropping the lock and possibly sleeping here, our request |