aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-ioc.c37
-rw-r--r--block/blk-map.c20
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c9
-rw-r--r--block/bsg.c8
-rw-r--r--block/cfq-iosched.c38
-rw-r--r--block/elevator.c15
-rw-r--r--block/scsi_ioctl.c3
9 files changed, 83 insertions, 57 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e9754dc98ec4..775c8516abf5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -38,7 +38,7 @@ static int __make_request(struct request_queue *q, struct bio *bio);
38/* 38/*
39 * For the allocated request tables 39 * For the allocated request tables
40 */ 40 */
41struct kmem_cache *request_cachep; 41static struct kmem_cache *request_cachep;
42 42
43/* 43/*
44 * For queue allocation 44 * For queue allocation
@@ -127,6 +127,7 @@ void rq_init(struct request_queue *q, struct request *rq)
127 rq->nr_hw_segments = 0; 127 rq->nr_hw_segments = 0;
128 rq->ioprio = 0; 128 rq->ioprio = 0;
129 rq->special = NULL; 129 rq->special = NULL;
130 rq->raw_data_len = 0;
130 rq->buffer = NULL; 131 rq->buffer = NULL;
131 rq->tag = -1; 132 rq->tag = -1;
132 rq->errors = 0; 133 rq->errors = 0;
@@ -2015,6 +2016,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2015 rq->hard_cur_sectors = rq->current_nr_sectors; 2016 rq->hard_cur_sectors = rq->current_nr_sectors;
2016 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 2017 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2017 rq->buffer = bio_data(bio); 2018 rq->buffer = bio_data(bio);
2019 rq->raw_data_len = bio->bi_size;
2018 rq->data_len = bio->bi_size; 2020 rq->data_len = bio->bi_size;
2019 2021
2020 rq->bio = rq->biotail = bio; 2022 rq->bio = rq->biotail = bio;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 80245dc30c75..e34df7c9fc36 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -17,17 +17,13 @@ static struct kmem_cache *iocontext_cachep;
17 17
18static void cfq_dtor(struct io_context *ioc) 18static void cfq_dtor(struct io_context *ioc)
19{ 19{
20 struct cfq_io_context *cic[1]; 20 if (!hlist_empty(&ioc->cic_list)) {
21 int r; 21 struct cfq_io_context *cic;
22 22
23 /* 23 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
24 * We don't have a specific key to lookup with, so use the gang 24 cic_list);
25 * lookup to just retrieve the first item stored. The cfq exit 25 cic->dtor(ioc);
26 * function will iterate the full tree, so any member will do. 26 }
27 */
28 r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
29 if (r > 0)
30 cic[0]->dtor(ioc);
31} 27}
32 28
33/* 29/*
@@ -57,18 +53,16 @@ EXPORT_SYMBOL(put_io_context);
57 53
58static void cfq_exit(struct io_context *ioc) 54static void cfq_exit(struct io_context *ioc)
59{ 55{
60 struct cfq_io_context *cic[1];
61 int r;
62
63 rcu_read_lock(); 56 rcu_read_lock();
64 /*
65 * See comment for cfq_dtor()
66 */
67 r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
68 rcu_read_unlock();
69 57
70 if (r > 0) 58 if (!hlist_empty(&ioc->cic_list)) {
71 cic[0]->exit(ioc); 59 struct cfq_io_context *cic;
60
61 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
62 cic_list);
63 cic->exit(ioc);
64 }
65 rcu_read_unlock();
72} 66}
73 67
74/* Called by the exitting task */ 68/* Called by the exitting task */
@@ -105,6 +99,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
105 ret->nr_batch_requests = 0; /* because this is 0 */ 99 ret->nr_batch_requests = 0; /* because this is 0 */
106 ret->aic = NULL; 100 ret->aic = NULL;
107 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 101 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
102 INIT_HLIST_HEAD(&ret->cic_list);
108 ret->ioc_data = NULL; 103 ret->ioc_data = NULL;
109 } 104 }
110 105
@@ -176,7 +171,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
176} 171}
177EXPORT_SYMBOL(copy_io_context); 172EXPORT_SYMBOL(copy_io_context);
178 173
179int __init blk_ioc_init(void) 174static int __init blk_ioc_init(void)
180{ 175{
181 iocontext_cachep = kmem_cache_create("blkdev_ioc", 176 iocontext_cachep = kmem_cache_create("blkdev_ioc",
182 sizeof(struct io_context), 0, SLAB_PANIC, NULL); 177 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
diff --git a/block/blk-map.c b/block/blk-map.c
index 955d75c1a58f..09f7fd0bcb73 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -19,6 +19,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
19 rq->biotail->bi_next = bio; 19 rq->biotail->bi_next = bio;
20 rq->biotail = bio; 20 rq->biotail = bio;
21 21
22 rq->raw_data_len += bio->bi_size;
22 rq->data_len += bio->bi_size; 23 rq->data_len += bio->bi_size;
23 } 24 }
24 return 0; 25 return 0;
@@ -139,10 +140,29 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
139 ubuf += ret; 140 ubuf += ret;
140 } 141 }
141 142
143 /*
144 * __blk_rq_map_user() copies the buffers if starting address
145 * or length isn't aligned. As the copied buffer is always
146 * page aligned, we know that there's enough room for padding.
147 * Extend the last bio and update rq->data_len accordingly.
148 *
149 * On unmap, bio_uncopy_user() will use unmodified
150 * bio_map_data pointed to by bio->bi_private.
151 */
152 if (len & queue_dma_alignment(q)) {
153 unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
154 struct bio *bio = rq->biotail;
155
156 bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
157 bio->bi_size += pad_len;
158 rq->data_len += pad_len;
159 }
160
142 rq->buffer = rq->data = NULL; 161 rq->buffer = rq->data = NULL;
143 return 0; 162 return 0;
144unmap_rq: 163unmap_rq:
145 blk_rq_unmap_user(bio); 164 blk_rq_unmap_user(bio);
165 rq->bio = NULL;
146 return ret; 166 return ret;
147} 167}
148EXPORT_SYMBOL(blk_rq_map_user); 168EXPORT_SYMBOL(blk_rq_map_user);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index d3b84bbb776a..7506c4fe0264 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -220,7 +220,10 @@ new_segment:
220 bvprv = bvec; 220 bvprv = bvec;
221 } /* segments in rq */ 221 } /* segments in rq */
222 222
223 if (q->dma_drain_size) { 223 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
224 if (rq->cmd_flags & REQ_RW)
225 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
226
224 sg->page_link &= ~0x02; 227 sg->page_link &= ~0x02;
225 sg = sg_next(sg); 228 sg = sg_next(sg);
226 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 229 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
@@ -228,6 +231,7 @@ new_segment:
228 ((unsigned long)q->dma_drain_buffer) & 231 ((unsigned long)q->dma_drain_buffer) &
229 (PAGE_SIZE - 1)); 232 (PAGE_SIZE - 1));
230 nsegs++; 233 nsegs++;
234 rq->data_len += q->dma_drain_size;
231 } 235 }
232 236
233 if (sg) 237 if (sg)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c8d0c5724098..9a8ffdd0ce3d 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
296 * blk_queue_dma_drain - Set up a drain buffer for excess dma. 296 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
297 * 297 *
298 * @q: the request queue for the device 298 * @q: the request queue for the device
299 * @dma_drain_needed: fn which returns non-zero if drain is necessary
299 * @buf: physically contiguous buffer 300 * @buf: physically contiguous buffer
300 * @size: size of the buffer in bytes 301 * @size: size of the buffer in bytes
301 * 302 *
@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
315 * device can support otherwise there won't be room for the drain 316 * device can support otherwise there won't be room for the drain
316 * buffer. 317 * buffer.
317 */ 318 */
318int blk_queue_dma_drain(struct request_queue *q, void *buf, 319extern int blk_queue_dma_drain(struct request_queue *q,
319 unsigned int size) 320 dma_drain_needed_fn *dma_drain_needed,
321 void *buf, unsigned int size)
320{ 322{
321 if (q->max_hw_segments < 2 || q->max_phys_segments < 2) 323 if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
322 return -EINVAL; 324 return -EINVAL;
323 /* make room for appending the drain */ 325 /* make room for appending the drain */
324 --q->max_hw_segments; 326 --q->max_hw_segments;
325 --q->max_phys_segments; 327 --q->max_phys_segments;
328 q->dma_drain_needed = dma_drain_needed;
326 q->dma_drain_buffer = buf; 329 q->dma_drain_buffer = buf;
327 q->dma_drain_size = size; 330 q->dma_drain_size = size;
328 331
@@ -386,7 +389,7 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
386} 389}
387EXPORT_SYMBOL(blk_queue_update_dma_alignment); 390EXPORT_SYMBOL(blk_queue_update_dma_alignment);
388 391
389int __init blk_settings_init(void) 392static int __init blk_settings_init(void)
390{ 393{
391 blk_max_low_pfn = max_low_pfn - 1; 394 blk_max_low_pfn = max_low_pfn - 1;
392 blk_max_pfn = max_pfn - 1; 395 blk_max_pfn = max_pfn - 1;
diff --git a/block/bsg.c b/block/bsg.c
index 8917c5174dc2..7f3c09549e4b 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
437 } 437 }
438 438
439 if (rq->next_rq) { 439 if (rq->next_rq) {
440 hdr->dout_resid = rq->data_len; 440 hdr->dout_resid = rq->raw_data_len;
441 hdr->din_resid = rq->next_rq->data_len; 441 hdr->din_resid = rq->next_rq->raw_data_len;
442 blk_rq_unmap_user(bidi_bio); 442 blk_rq_unmap_user(bidi_bio);
443 blk_put_request(rq->next_rq); 443 blk_put_request(rq->next_rq);
444 } else if (rq_data_dir(rq) == READ) 444 } else if (rq_data_dir(rq) == READ)
445 hdr->din_resid = rq->data_len; 445 hdr->din_resid = rq->raw_data_len;
446 else 446 else
447 hdr->dout_resid = rq->data_len; 447 hdr->dout_resid = rq->raw_data_len;
448 448
449 /* 449 /*
450 * If the request generated a negative error number, return it 450 * If the request generated a negative error number, return it
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ca198e61fa65..0f962ecae91f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1145,38 +1145,19 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1145/* 1145/*
1146 * Call func for each cic attached to this ioc. Returns number of cic's seen. 1146 * Call func for each cic attached to this ioc. Returns number of cic's seen.
1147 */ 1147 */
1148#define CIC_GANG_NR 16
1149static unsigned int 1148static unsigned int
1150call_for_each_cic(struct io_context *ioc, 1149call_for_each_cic(struct io_context *ioc,
1151 void (*func)(struct io_context *, struct cfq_io_context *)) 1150 void (*func)(struct io_context *, struct cfq_io_context *))
1152{ 1151{
1153 struct cfq_io_context *cics[CIC_GANG_NR]; 1152 struct cfq_io_context *cic;
1154 unsigned long index = 0; 1153 struct hlist_node *n;
1155 unsigned int called = 0; 1154 int called = 0;
1156 int nr;
1157 1155
1158 rcu_read_lock(); 1156 rcu_read_lock();
1159 1157 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) {
1160 do { 1158 func(ioc, cic);
1161 int i; 1159 called++;
1162 1160 }
1163 /*
1164 * Perhaps there's a better way - this just gang lookups from
1165 * 0 to the end, restarting after each CIC_GANG_NR from the
1166 * last key + 1.
1167 */
1168 nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics,
1169 index, CIC_GANG_NR);
1170 if (!nr)
1171 break;
1172
1173 called += nr;
1174 index = 1 + (unsigned long) cics[nr - 1]->key;
1175
1176 for (i = 0; i < nr; i++)
1177 func(ioc, cics[i]);
1178 } while (nr == CIC_GANG_NR);
1179
1180 rcu_read_unlock(); 1161 rcu_read_unlock();
1181 1162
1182 return called; 1163 return called;
@@ -1190,6 +1171,7 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1190 1171
1191 spin_lock_irqsave(&ioc->lock, flags); 1172 spin_lock_irqsave(&ioc->lock, flags);
1192 radix_tree_delete(&ioc->radix_root, cic->dead_key); 1173 radix_tree_delete(&ioc->radix_root, cic->dead_key);
1174 hlist_del_rcu(&cic->cic_list);
1193 spin_unlock_irqrestore(&ioc->lock, flags); 1175 spin_unlock_irqrestore(&ioc->lock, flags);
1194 1176
1195 kmem_cache_free(cfq_ioc_pool, cic); 1177 kmem_cache_free(cfq_ioc_pool, cic);
@@ -1280,6 +1262,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1280 if (cic) { 1262 if (cic) {
1281 cic->last_end_request = jiffies; 1263 cic->last_end_request = jiffies;
1282 INIT_LIST_HEAD(&cic->queue_list); 1264 INIT_LIST_HEAD(&cic->queue_list);
1265 INIT_HLIST_NODE(&cic->cic_list);
1283 cic->dtor = cfq_free_io_context; 1266 cic->dtor = cfq_free_io_context;
1284 cic->exit = cfq_exit_io_context; 1267 cic->exit = cfq_exit_io_context;
1285 elv_ioc_count_inc(ioc_count); 1268 elv_ioc_count_inc(ioc_count);
@@ -1501,6 +1484,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1501 rcu_assign_pointer(ioc->ioc_data, NULL); 1484 rcu_assign_pointer(ioc->ioc_data, NULL);
1502 1485
1503 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); 1486 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1487 hlist_del_rcu(&cic->cic_list);
1504 spin_unlock_irqrestore(&ioc->lock, flags); 1488 spin_unlock_irqrestore(&ioc->lock, flags);
1505 1489
1506 cfq_cic_free(cic); 1490 cfq_cic_free(cic);
@@ -1561,6 +1545,8 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1561 spin_lock_irqsave(&ioc->lock, flags); 1545 spin_lock_irqsave(&ioc->lock, flags);
1562 ret = radix_tree_insert(&ioc->radix_root, 1546 ret = radix_tree_insert(&ioc->radix_root,
1563 (unsigned long) cfqd, cic); 1547 (unsigned long) cfqd, cic);
1548 if (!ret)
1549 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
1564 spin_unlock_irqrestore(&ioc->lock, flags); 1550 spin_unlock_irqrestore(&ioc->lock, flags);
1565 1551
1566 radix_tree_preload_end(); 1552 radix_tree_preload_end();
diff --git a/block/elevator.c b/block/elevator.c
index bafbae0344d3..88318c383608 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -134,6 +134,21 @@ static struct elevator_type *elevator_get(const char *name)
134 spin_lock(&elv_list_lock); 134 spin_lock(&elv_list_lock);
135 135
136 e = elevator_find(name); 136 e = elevator_find(name);
137 if (!e) {
138 char elv[ELV_NAME_MAX + strlen("-iosched")];
139
140 spin_unlock(&elv_list_lock);
141
142 if (!strcmp(name, "anticipatory"))
143 sprintf(elv, "as-iosched");
144 else
145 sprintf(elv, "%s-iosched", name);
146
147 request_module(elv);
148 spin_lock(&elv_list_lock);
149 e = elevator_find(name);
150 }
151
137 if (e && !try_module_get(e->elevator_owner)) 152 if (e && !try_module_get(e->elevator_owner))
138 e = NULL; 153 e = NULL;
139 154
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 9675b34638d4..e993cac4911d 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
266 hdr->info = 0; 266 hdr->info = 0;
267 if (hdr->masked_status || hdr->host_status || hdr->driver_status) 267 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
268 hdr->info |= SG_INFO_CHECK; 268 hdr->info |= SG_INFO_CHECK;
269 hdr->resid = rq->data_len; 269 hdr->resid = rq->raw_data_len;
270 hdr->sb_len_wr = 0; 270 hdr->sb_len_wr = 0;
271 271
272 if (rq->sense_len && hdr->sbp) { 272 if (rq->sense_len && hdr->sbp) {
@@ -528,6 +528,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
528 rq = blk_get_request(q, WRITE, __GFP_WAIT); 528 rq = blk_get_request(q, WRITE, __GFP_WAIT);
529 rq->cmd_type = REQ_TYPE_BLOCK_PC; 529 rq->cmd_type = REQ_TYPE_BLOCK_PC;
530 rq->data = NULL; 530 rq->data = NULL;
531 rq->raw_data_len = 0;
531 rq->data_len = 0; 532 rq->data_len = 0;
532 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 533 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
533 memset(rq->cmd, 0, sizeof(rq->cmd)); 534 memset(rq->cmd, 0, sizeof(rq->cmd));