aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c3
-rw-r--r--block/blk-core.c31
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-merge.c12
-rw-r--r--block/blk-settings.c20
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/blk-tag.c17
-rw-r--r--block/blktrace.c2
-rw-r--r--block/bsg.c14
-rw-r--r--block/cfq-iosched.c23
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/elevator.c5
-rw-r--r--block/scsi_ioctl.c5
13 files changed, 80 insertions, 62 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 66e55288178..a09ead19f9c 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
26{ 26{
27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
28 prepare_flush_fn == NULL) { 28 prepare_flush_fn == NULL) {
29 printk(KERN_ERR "%s: prepare_flush_fn required\n", 29 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
30 __FUNCTION__);
31 return -EINVAL; 30 return -EINVAL;
32 } 31 }
33 32
diff --git a/block/blk-core.c b/block/blk-core.c
index 5d09f8c5602..2987fe47b5e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -54,15 +54,16 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
54 54
55static void drive_stat_acct(struct request *rq, int new_io) 55static void drive_stat_acct(struct request *rq, int new_io)
56{ 56{
57 struct hd_struct *part;
57 int rw = rq_data_dir(rq); 58 int rw = rq_data_dir(rq);
58 59
59 if (!blk_fs_request(rq) || !rq->rq_disk) 60 if (!blk_fs_request(rq) || !rq->rq_disk)
60 return; 61 return;
61 62
62 if (!new_io) { 63 part = get_part(rq->rq_disk, rq->sector);
63 __all_stat_inc(rq->rq_disk, merges[rw], rq->sector); 64 if (!new_io)
64 } else { 65 __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
65 struct hd_struct *part = get_part(rq->rq_disk, rq->sector); 66 else {
66 disk_round_stats(rq->rq_disk); 67 disk_round_stats(rq->rq_disk);
67 rq->rq_disk->in_flight++; 68 rq->rq_disk->in_flight++;
68 if (part) { 69 if (part) {
@@ -136,7 +137,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
136 137
137 if (unlikely(nbytes > bio->bi_size)) { 138 if (unlikely(nbytes > bio->bi_size)) {
138 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 139 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
139 __FUNCTION__, nbytes, bio->bi_size); 140 __func__, nbytes, bio->bi_size);
140 nbytes = bio->bi_size; 141 nbytes = bio->bi_size;
141 } 142 }
142 143
@@ -253,9 +254,11 @@ EXPORT_SYMBOL(__generic_unplug_device);
253 **/ 254 **/
254void generic_unplug_device(struct request_queue *q) 255void generic_unplug_device(struct request_queue *q)
255{ 256{
256 spin_lock_irq(q->queue_lock); 257 if (blk_queue_plugged(q)) {
257 __generic_unplug_device(q); 258 spin_lock_irq(q->queue_lock);
258 spin_unlock_irq(q->queue_lock); 259 __generic_unplug_device(q);
260 spin_unlock_irq(q->queue_lock);
261 }
259} 262}
260EXPORT_SYMBOL(generic_unplug_device); 263EXPORT_SYMBOL(generic_unplug_device);
261 264
@@ -1536,10 +1539,11 @@ static int __end_that_request_first(struct request *req, int error,
1536 } 1539 }
1537 1540
1538 if (blk_fs_request(req) && req->rq_disk) { 1541 if (blk_fs_request(req) && req->rq_disk) {
1542 struct hd_struct *part = get_part(req->rq_disk, req->sector);
1539 const int rw = rq_data_dir(req); 1543 const int rw = rq_data_dir(req);
1540 1544
1541 all_stat_add(req->rq_disk, sectors[rw], 1545 all_stat_add(req->rq_disk, part, sectors[rw],
1542 nr_bytes >> 9, req->sector); 1546 nr_bytes >> 9, req->sector);
1543 } 1547 }
1544 1548
1545 total_bytes = bio_nbytes = 0; 1549 total_bytes = bio_nbytes = 0;
@@ -1566,8 +1570,7 @@ static int __end_that_request_first(struct request *req, int error,
1566 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 1570 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1567 blk_dump_rq_flags(req, "__end_that"); 1571 blk_dump_rq_flags(req, "__end_that");
1568 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 1572 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1569 __FUNCTION__, bio->bi_idx, 1573 __func__, bio->bi_idx, bio->bi_vcnt);
1570 bio->bi_vcnt);
1571 break; 1574 break;
1572 } 1575 }
1573 1576
@@ -1726,8 +1729,8 @@ static void end_that_request_last(struct request *req, int error)
1726 const int rw = rq_data_dir(req); 1729 const int rw = rq_data_dir(req);
1727 struct hd_struct *part = get_part(disk, req->sector); 1730 struct hd_struct *part = get_part(disk, req->sector);
1728 1731
1729 __all_stat_inc(disk, ios[rw], req->sector); 1732 __all_stat_inc(disk, part, ios[rw], req->sector);
1730 __all_stat_add(disk, ticks[rw], duration, req->sector); 1733 __all_stat_add(disk, part, ticks[rw], duration, req->sector);
1731 disk_round_stats(disk); 1734 disk_round_stats(disk);
1732 disk->in_flight--; 1735 disk->in_flight--;
1733 if (part) { 1736 if (part) {
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index e34df7c9fc3..012f065ac8e 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc)
41 rcu_read_lock(); 41 rcu_read_lock();
42 if (ioc->aic && ioc->aic->dtor) 42 if (ioc->aic && ioc->aic->dtor)
43 ioc->aic->dtor(ioc->aic); 43 ioc->aic->dtor(ioc->aic);
44 rcu_read_unlock();
45 cfq_dtor(ioc); 44 cfq_dtor(ioc);
45 rcu_read_unlock();
46 46
47 kmem_cache_free(iocontext_cachep, ioc); 47 kmem_cache_free(iocontext_cachep, ioc);
48 return 1; 48 return 1;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 73b23562af2..651136aae76 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
149static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, 149static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
150 struct bio *nxt) 150 struct bio *nxt)
151{ 151{
152 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 152 if (!bio_flagged(bio, BIO_SEG_VALID))
153 blk_recount_segments(q, bio); 153 blk_recount_segments(q, bio);
154 if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) 154 if (!bio_flagged(nxt, BIO_SEG_VALID))
155 blk_recount_segments(q, nxt); 155 blk_recount_segments(q, nxt);
156 if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || 156 if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
157 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) 157 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
@@ -312,9 +312,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
312 q->last_merge = NULL; 312 q->last_merge = NULL;
313 return 0; 313 return 0;
314 } 314 }
315 if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) 315 if (!bio_flagged(req->biotail, BIO_SEG_VALID))
316 blk_recount_segments(q, req->biotail); 316 blk_recount_segments(q, req->biotail);
317 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 317 if (!bio_flagged(bio, BIO_SEG_VALID))
318 blk_recount_segments(q, bio); 318 blk_recount_segments(q, bio);
319 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; 319 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
320 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) 320 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
@@ -352,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
352 return 0; 352 return 0;
353 } 353 }
354 len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; 354 len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
355 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 355 if (!bio_flagged(bio, BIO_SEG_VALID))
356 blk_recount_segments(q, bio); 356 blk_recount_segments(q, bio);
357 if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) 357 if (!bio_flagged(req->bio, BIO_SEG_VALID))
358 blk_recount_segments(q, req->bio); 358 blk_recount_segments(q, req->bio);
359 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && 359 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
360 !BIOVEC_VIRT_OVERSIZE(len)) { 360 !BIOVEC_VIRT_OVERSIZE(len)) {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 6089384ab06..bb93d4c3277 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -168,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
168{ 168{
169 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 169 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
170 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 170 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
171 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 171 printk(KERN_INFO "%s: set to minimum %d\n",
172 max_sectors); 172 __func__, max_sectors);
173 } 173 }
174 174
175 if (BLK_DEF_MAX_SECTORS > max_sectors) 175 if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -196,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q,
196{ 196{
197 if (!max_segments) { 197 if (!max_segments) {
198 max_segments = 1; 198 max_segments = 1;
199 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 199 printk(KERN_INFO "%s: set to minimum %d\n",
200 max_segments); 200 __func__, max_segments);
201 } 201 }
202 202
203 q->max_phys_segments = max_segments; 203 q->max_phys_segments = max_segments;
@@ -220,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q,
220{ 220{
221 if (!max_segments) { 221 if (!max_segments) {
222 max_segments = 1; 222 max_segments = 1;
223 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 223 printk(KERN_INFO "%s: set to minimum %d\n",
224 max_segments); 224 __func__, max_segments);
225 } 225 }
226 226
227 q->max_hw_segments = max_segments; 227 q->max_hw_segments = max_segments;
@@ -241,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
241{ 241{
242 if (max_size < PAGE_CACHE_SIZE) { 242 if (max_size < PAGE_CACHE_SIZE) {
243 max_size = PAGE_CACHE_SIZE; 243 max_size = PAGE_CACHE_SIZE;
244 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 244 printk(KERN_INFO "%s: set to minimum %d\n",
245 max_size); 245 __func__, max_size);
246 } 246 }
247 247
248 q->max_segment_size = max_size; 248 q->max_segment_size = max_size;
@@ -357,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
357{ 357{
358 if (mask < PAGE_CACHE_SIZE - 1) { 358 if (mask < PAGE_CACHE_SIZE - 1) {
359 mask = PAGE_CACHE_SIZE - 1; 359 mask = PAGE_CACHE_SIZE - 1;
360 printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, 360 printk(KERN_INFO "%s: set to minimum %lx\n",
361 mask); 361 __func__, mask);
362 } 362 }
363 363
364 q->seg_boundary_mask = mask; 364 q->seg_boundary_mask = mask;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e85c4013e8a..304ec73ab82 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -146,11 +146,13 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
146 unsigned long nm; 146 unsigned long nm;
147 ssize_t ret = queue_var_store(&nm, page, count); 147 ssize_t ret = queue_var_store(&nm, page, count);
148 148
149 spin_lock_irq(q->queue_lock);
149 if (nm) 150 if (nm)
150 set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); 151 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
151 else 152 else
152 clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); 153 queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
153 154
155 spin_unlock_irq(q->queue_lock);
154 return ret; 156 return ret;
155} 157}
156 158
diff --git a/block/blk-tag.c b/block/blk-tag.c
index e176ddbe599..32667beb03e 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q)
70 __blk_free_tags(bqt); 70 __blk_free_tags(bqt);
71 71
72 q->queue_tags = NULL; 72 q->queue_tags = NULL;
73 queue_flag_clear(QUEUE_FLAG_QUEUED, q); 73 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
74} 74}
75 75
76/** 76/**
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags);
98 **/ 98 **/
99void blk_queue_free_tags(struct request_queue *q) 99void blk_queue_free_tags(struct request_queue *q)
100{ 100{
101 queue_flag_clear(QUEUE_FLAG_QUEUED, q); 101 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
102} 102}
103EXPORT_SYMBOL(blk_queue_free_tags); 103EXPORT_SYMBOL(blk_queue_free_tags);
104 104
@@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
112 if (q && depth > q->nr_requests * 2) { 112 if (q && depth > q->nr_requests * 2) {
113 depth = q->nr_requests * 2; 113 depth = q->nr_requests * 2;
114 printk(KERN_ERR "%s: adjusted depth to %d\n", 114 printk(KERN_ERR "%s: adjusted depth to %d\n",
115 __FUNCTION__, depth); 115 __func__, depth);
116 } 116 }
117 117
118 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); 118 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
@@ -171,6 +171,9 @@ EXPORT_SYMBOL(blk_init_tags);
171 * @q: the request queue for the device 171 * @q: the request queue for the device
172 * @depth: the maximum queue depth supported 172 * @depth: the maximum queue depth supported
173 * @tags: the tag to use 173 * @tags: the tag to use
174 *
175 * Queue lock must be held here if the function is called to resize an
176 * existing map.
174 **/ 177 **/
175int blk_queue_init_tags(struct request_queue *q, int depth, 178int blk_queue_init_tags(struct request_queue *q, int depth,
176 struct blk_queue_tag *tags) 179 struct blk_queue_tag *tags)
@@ -197,7 +200,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
197 * assign it, all done 200 * assign it, all done
198 */ 201 */
199 q->queue_tags = tags; 202 q->queue_tags = tags;
200 queue_flag_set(QUEUE_FLAG_QUEUED, q); 203 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
201 INIT_LIST_HEAD(&q->tag_busy_list); 204 INIT_LIST_HEAD(&q->tag_busy_list);
202 return 0; 205 return 0;
203fail: 206fail:
@@ -296,13 +299,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
296 299
297 if (unlikely(bqt->tag_index[tag] == NULL)) 300 if (unlikely(bqt->tag_index[tag] == NULL))
298 printk(KERN_ERR "%s: tag %d is missing\n", 301 printk(KERN_ERR "%s: tag %d is missing\n",
299 __FUNCTION__, tag); 302 __func__, tag);
300 303
301 bqt->tag_index[tag] = NULL; 304 bqt->tag_index[tag] = NULL;
302 305
303 if (unlikely(!test_bit(tag, bqt->tag_map))) { 306 if (unlikely(!test_bit(tag, bqt->tag_map))) {
304 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", 307 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
305 __FUNCTION__, tag); 308 __func__, tag);
306 return; 309 return;
307 } 310 }
308 /* 311 /*
@@ -340,7 +343,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
340 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 343 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
341 printk(KERN_ERR 344 printk(KERN_ERR
342 "%s: request %p for device [%s] already tagged %d", 345 "%s: request %p for device [%s] already tagged %d",
343 __FUNCTION__, rq, 346 __func__, rq,
344 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); 347 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
345 BUG(); 348 BUG();
346 } 349 }
diff --git a/block/blktrace.c b/block/blktrace.c
index 568588cd16b..b2cbb4e5d76 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -476,7 +476,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
476 476
477 switch (cmd) { 477 switch (cmd) {
478 case BLKTRACESETUP: 478 case BLKTRACESETUP:
479 strcpy(b, bdevname(bdev, b)); 479 bdevname(bdev, b);
480 ret = blk_trace_setup(q, b, bdev->bd_dev, arg); 480 ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
481 break; 481 break;
482 case BLKTRACESTART: 482 case BLKTRACESTART:
diff --git a/block/bsg.c b/block/bsg.c
index 23ea4fd1a66..f0b7cd34321 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -57,7 +57,7 @@ enum {
57#undef BSG_DEBUG 57#undef BSG_DEBUG
58 58
59#ifdef BSG_DEBUG 59#ifdef BSG_DEBUG
60#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) 60#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
61#else 61#else
62#define dprintk(fmt, args...) 62#define dprintk(fmt, args...)
63#endif 63#endif
@@ -174,7 +174,11 @@ unlock:
174static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, 174static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
175 struct sg_io_v4 *hdr, int has_write_perm) 175 struct sg_io_v4 *hdr, int has_write_perm)
176{ 176{
177 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 177 if (hdr->request_len > BLK_MAX_CDB) {
178 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
179 if (!rq->cmd)
180 return -ENOMEM;
181 }
178 182
179 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, 183 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
180 hdr->request_len)) 184 hdr->request_len))
@@ -211,8 +215,6 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
211 215
212 if (hdr->guard != 'Q') 216 if (hdr->guard != 'Q')
213 return -EINVAL; 217 return -EINVAL;
214 if (hdr->request_len > BLK_MAX_CDB)
215 return -EINVAL;
216 if (hdr->dout_xfer_len > (q->max_sectors << 9) || 218 if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
217 hdr->din_xfer_len > (q->max_sectors << 9)) 219 hdr->din_xfer_len > (q->max_sectors << 9))
218 return -EIO; 220 return -EIO;
@@ -302,6 +304,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
302 } 304 }
303 return rq; 305 return rq;
304out: 306out:
307 if (rq->cmd != rq->__cmd)
308 kfree(rq->cmd);
305 blk_put_request(rq); 309 blk_put_request(rq);
306 if (next_rq) { 310 if (next_rq) {
307 blk_rq_unmap_user(next_rq->bio); 311 blk_rq_unmap_user(next_rq->bio);
@@ -455,6 +459,8 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
455 ret = rq->errors; 459 ret = rq->errors;
456 460
457 blk_rq_unmap_user(bio); 461 blk_rq_unmap_user(bio);
462 if (rq->cmd != rq->__cmd)
463 kfree(rq->cmd);
458 blk_put_request(rq); 464 blk_put_request(rq);
459 465
460 return ret; 466 return ret;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f4e1006c253..b399c62936e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1142,6 +1142,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1142 kmem_cache_free(cfq_pool, cfqq); 1142 kmem_cache_free(cfq_pool, cfqq);
1143} 1143}
1144 1144
1145static void
1146__call_for_each_cic(struct io_context *ioc,
1147 void (*func)(struct io_context *, struct cfq_io_context *))
1148{
1149 struct cfq_io_context *cic;
1150 struct hlist_node *n;
1151
1152 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1153 func(ioc, cic);
1154}
1155
1145/* 1156/*
1146 * Call func for each cic attached to this ioc. 1157 * Call func for each cic attached to this ioc.
1147 */ 1158 */
@@ -1149,12 +1160,8 @@ static void
1149call_for_each_cic(struct io_context *ioc, 1160call_for_each_cic(struct io_context *ioc,
1150 void (*func)(struct io_context *, struct cfq_io_context *)) 1161 void (*func)(struct io_context *, struct cfq_io_context *))
1151{ 1162{
1152 struct cfq_io_context *cic;
1153 struct hlist_node *n;
1154
1155 rcu_read_lock(); 1163 rcu_read_lock();
1156 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 1164 __call_for_each_cic(ioc, func);
1157 func(ioc, cic);
1158 rcu_read_unlock(); 1165 rcu_read_unlock();
1159} 1166}
1160 1167
@@ -1198,7 +1205,7 @@ static void cfq_free_io_context(struct io_context *ioc)
1198 * should be ok to iterate over the known list, we will see all cic's 1205 * should be ok to iterate over the known list, we will see all cic's
1199 * since no new ones are added. 1206 * since no new ones are added.
1200 */ 1207 */
1201 call_for_each_cic(ioc, cic_free_func); 1208 __call_for_each_cic(ioc, cic_free_func);
1202} 1209}
1203 1210
1204static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1211static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
@@ -1296,10 +1303,10 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1296 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1303 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1297 case IOPRIO_CLASS_NONE: 1304 case IOPRIO_CLASS_NONE:
1298 /* 1305 /*
1299 * no prio set, place us in the middle of the BE classes 1306 * no prio set, inherit CPU scheduling settings
1300 */ 1307 */
1301 cfqq->ioprio = task_nice_ioprio(tsk); 1308 cfqq->ioprio = task_nice_ioprio(tsk);
1302 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1309 cfqq->ioprio_class = task_nice_ioclass(tsk);
1303 break; 1310 break;
1304 case IOPRIO_CLASS_RT: 1311 case IOPRIO_CLASS_RT:
1305 cfqq->ioprio = task_ioprio(ioc); 1312 cfqq->ioprio = task_ioprio(ioc);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index c70d0b6f666..c23177e4623 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -555,7 +555,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
555 if (copy_from_user(&cbuts, arg, sizeof(cbuts))) 555 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
556 return -EFAULT; 556 return -EFAULT;
557 557
558 strcpy(b, bdevname(bdev, b)); 558 bdevname(bdev, b);
559 559
560 buts = (struct blk_user_trace_setup) { 560 buts = (struct blk_user_trace_setup) {
561 .act_mask = cbuts.act_mask, 561 .act_mask = cbuts.act_mask,
diff --git a/block/elevator.c b/block/elevator.c
index ac5310ef827..980f8ae147b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -650,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
650 650
651 default: 651 default:
652 printk(KERN_ERR "%s: bad insertion point %d\n", 652 printk(KERN_ERR "%s: bad insertion point %d\n",
653 __FUNCTION__, where); 653 __func__, where);
654 BUG(); 654 BUG();
655 } 655 }
656 656
@@ -808,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q)
808 rq->cmd_flags |= REQ_QUIET; 808 rq->cmd_flags |= REQ_QUIET;
809 end_queued_request(rq, 0); 809 end_queued_request(rq, 0);
810 } else { 810 } else {
811 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, 811 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
812 ret);
813 break; 812 break;
814 } 813 }
815 } 814 }
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index ffa3720e6ca..78199c08ec9 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -33,13 +33,12 @@
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34 34
35/* Command group 3 is reserved and should never be used. */ 35/* Command group 3 is reserved and should never be used. */
36const unsigned char scsi_command_size[8] = 36const unsigned char scsi_command_size_tbl[8] =
37{ 37{
38 6, 10, 10, 12, 38 6, 10, 10, 12,
39 16, 12, 10, 10 39 16, 12, 10, 10
40}; 40};
41 41EXPORT_SYMBOL(scsi_command_size_tbl);
42EXPORT_SYMBOL(scsi_command_size);
43 42
44#include <scsi/sg.h> 43#include <scsi/sg.h>
45 44