aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-04-25 06:26:28 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-04-29 03:50:34 -0400
commit1afb20f30151dd4160877c827f5b7203f98627fb (patch)
tree066e99694fbf0396ccd6b2a15e59747a81869f04
parentc3270e577c18b3d0e984c3371493205a4807db9d (diff)
block: make rq_init() do a full memset()
This requires moving rq_init() from get_request() to blk_alloc_request(). The upside is that we can now require an rq_init() from any path that wishes to hand the request to the block layer. rq_init() will be exported for the code that uses struct request without blk_get_request. This is a preparation for large command support, which needs to initialize struct request in a proper way (that is, just doing a memset() will not work). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-barrier.c7
-rw-r--r--block/blk-core.c30
2 files changed, 5 insertions, 32 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 55c5f1fc4f1f..722140ac175a 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -143,10 +143,8 @@ static void queue_flush(struct request_queue *q, unsigned which)
143 end_io = post_flush_end_io; 143 end_io = post_flush_end_io;
144 } 144 }
145 145
146 rq->cmd_flags = REQ_HARDBARRIER;
147 rq_init(q, rq); 146 rq_init(q, rq);
148 rq->elevator_private = NULL; 147 rq->cmd_flags = REQ_HARDBARRIER;
149 rq->elevator_private2 = NULL;
150 rq->rq_disk = q->bar_rq.rq_disk; 148 rq->rq_disk = q->bar_rq.rq_disk;
151 rq->end_io = end_io; 149 rq->end_io = end_io;
152 q->prepare_flush_fn(q, rq); 150 q->prepare_flush_fn(q, rq);
@@ -167,14 +165,11 @@ static inline struct request *start_ordered(struct request_queue *q,
167 blkdev_dequeue_request(rq); 165 blkdev_dequeue_request(rq);
168 q->orig_bar_rq = rq; 166 q->orig_bar_rq = rq;
169 rq = &q->bar_rq; 167 rq = &q->bar_rq;
170 rq->cmd_flags = 0;
171 rq_init(q, rq); 168 rq_init(q, rq);
172 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 169 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
173 rq->cmd_flags |= REQ_RW; 170 rq->cmd_flags |= REQ_RW;
174 if (q->ordered & QUEUE_ORDERED_FUA) 171 if (q->ordered & QUEUE_ORDERED_FUA)
175 rq->cmd_flags |= REQ_FUA; 172 rq->cmd_flags |= REQ_FUA;
176 rq->elevator_private = NULL;
177 rq->elevator_private2 = NULL;
178 init_request_from_bio(rq, q->orig_bar_rq->bio); 173 init_request_from_bio(rq, q->orig_bar_rq->bio);
179 rq->end_io = bar_end_io; 174 rq->end_io = bar_end_io;
180 175
diff --git a/block/blk-core.c b/block/blk-core.c
index 2a438a93f723..e447799256d6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -107,40 +107,18 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
107} 107}
108EXPORT_SYMBOL(blk_get_backing_dev_info); 108EXPORT_SYMBOL(blk_get_backing_dev_info);
109 109
110/*
111 * We can't just memset() the structure, since the allocation path
112 * already stored some information in the request.
113 */
114void rq_init(struct request_queue *q, struct request *rq) 110void rq_init(struct request_queue *q, struct request *rq)
115{ 111{
112 memset(rq, 0, sizeof(*rq));
113
116 INIT_LIST_HEAD(&rq->queuelist); 114 INIT_LIST_HEAD(&rq->queuelist);
117 INIT_LIST_HEAD(&rq->donelist); 115 INIT_LIST_HEAD(&rq->donelist);
118 rq->q = q; 116 rq->q = q;
119 rq->sector = rq->hard_sector = (sector_t) -1; 117 rq->sector = rq->hard_sector = (sector_t) -1;
120 rq->nr_sectors = rq->hard_nr_sectors = 0;
121 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
122 rq->bio = rq->biotail = NULL;
123 INIT_HLIST_NODE(&rq->hash); 118 INIT_HLIST_NODE(&rq->hash);
124 RB_CLEAR_NODE(&rq->rb_node); 119 RB_CLEAR_NODE(&rq->rb_node);
125 rq->rq_disk = NULL;
126 rq->nr_phys_segments = 0;
127 rq->nr_hw_segments = 0;
128 rq->ioprio = 0;
129 rq->special = NULL;
130 rq->buffer = NULL;
131 rq->tag = -1; 120 rq->tag = -1;
132 rq->errors = 0;
133 rq->ref_count = 1; 121 rq->ref_count = 1;
134 rq->cmd_len = 0;
135 memset(rq->cmd, 0, sizeof(rq->cmd));
136 rq->data_len = 0;
137 rq->extra_len = 0;
138 rq->sense_len = 0;
139 rq->data = NULL;
140 rq->sense = NULL;
141 rq->end_io = NULL;
142 rq->end_io_data = NULL;
143 rq->next_rq = NULL;
144} 122}
145 123
146static void req_bio_endio(struct request *rq, struct bio *bio, 124static void req_bio_endio(struct request *rq, struct bio *bio,
@@ -607,6 +585,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
607 if (!rq) 585 if (!rq)
608 return NULL; 586 return NULL;
609 587
588 rq_init(q, rq);
589
610 /* 590 /*
611 * first three bits are identical in rq->cmd_flags and bio->bi_rw, 591 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
612 * see bio.h and blkdev.h 592 * see bio.h and blkdev.h
@@ -789,8 +769,6 @@ rq_starved:
789 if (ioc_batching(q, ioc)) 769 if (ioc_batching(q, ioc))
790 ioc->nr_batch_requests--; 770 ioc->nr_batch_requests--;
791 771
792 rq_init(q, rq);
793
794 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); 772 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
795out: 773out:
796 return rq; 774 return rq;