aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/as-iosched.c24
-rw-r--r--block/blk-barrier.c5
-rw-r--r--block/blk-core.c169
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-ioc.c9
-rw-r--r--block/blk-map.c10
-rw-r--r--block/blk-merge.c12
-rw-r--r--block/blk-settings.c61
-rw-r--r--block/blk-sysfs.c5
-rw-r--r--block/blk-tag.c12
-rw-r--r--block/cfq-iosched.c83
-rw-r--r--block/elevator.c57
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--fs/splice.c4
-rw-r--r--include/linux/blkdev.h16
17 files changed, 220 insertions, 254 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 96036846a00..8c3946787db 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -170,11 +170,11 @@ static void free_as_io_context(struct as_io_context *aic)
170 170
171static void as_trim(struct io_context *ioc) 171static void as_trim(struct io_context *ioc)
172{ 172{
173 spin_lock(&ioc->lock); 173 spin_lock_irq(&ioc->lock);
174 if (ioc->aic) 174 if (ioc->aic)
175 free_as_io_context(ioc->aic); 175 free_as_io_context(ioc->aic);
176 ioc->aic = NULL; 176 ioc->aic = NULL;
177 spin_unlock(&ioc->lock); 177 spin_unlock_irq(&ioc->lock);
178} 178}
179 179
180/* Called when the task exits */ 180/* Called when the task exits */
@@ -235,10 +235,12 @@ static void as_put_io_context(struct request *rq)
235 aic = RQ_IOC(rq)->aic; 235 aic = RQ_IOC(rq)->aic;
236 236
237 if (rq_is_sync(rq) && aic) { 237 if (rq_is_sync(rq) && aic) {
238 spin_lock(&aic->lock); 238 unsigned long flags;
239
240 spin_lock_irqsave(&aic->lock, flags);
239 set_bit(AS_TASK_IORUNNING, &aic->state); 241 set_bit(AS_TASK_IORUNNING, &aic->state);
240 aic->last_end_request = jiffies; 242 aic->last_end_request = jiffies;
241 spin_unlock(&aic->lock); 243 spin_unlock_irqrestore(&aic->lock, flags);
242 } 244 }
243 245
244 put_io_context(RQ_IOC(rq)); 246 put_io_context(RQ_IOC(rq));
@@ -1266,22 +1268,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
1266 */ 1268 */
1267 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 1269 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
1268 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { 1270 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
1269 struct io_context *rioc = RQ_IOC(req);
1270 struct io_context *nioc = RQ_IOC(next);
1271
1272 list_move(&req->queuelist, &next->queuelist); 1271 list_move(&req->queuelist, &next->queuelist);
1273 rq_set_fifo_time(req, rq_fifo_time(next)); 1272 rq_set_fifo_time(req, rq_fifo_time(next));
1274 /*
1275 * Don't copy here but swap, because when anext is
1276 * removed below, it must contain the unused context
1277 */
1278 if (rioc != nioc) {
1279 double_spin_lock(&rioc->lock, &nioc->lock,
1280 rioc < nioc);
1281 swap_io_context(&rioc, &nioc);
1282 double_spin_unlock(&rioc->lock, &nioc->lock,
1283 rioc < nioc);
1284 }
1285 } 1273 }
1286 } 1274 }
1287 1275
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 5f74fec327d..6901eedeffc 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
26{ 26{
27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
28 prepare_flush_fn == NULL) { 28 prepare_flush_fn == NULL) {
29 printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); 29 printk(KERN_ERR "%s: prepare_flush_fn required\n",
30 __FUNCTION__);
30 return -EINVAL; 31 return -EINVAL;
31 } 32 }
32 33
@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
47 48
48 return 0; 49 return 0;
49} 50}
50
51EXPORT_SYMBOL(blk_queue_ordered); 51EXPORT_SYMBOL(blk_queue_ordered);
52 52
53/* 53/*
@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
315 bio_put(bio); 315 bio_put(bio);
316 return ret; 316 return ret;
317} 317}
318
319EXPORT_SYMBOL(blkdev_issue_flush); 318EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-core.c b/block/blk-core.c
index 8ff99440ee4..4afb39c8233 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3,7 +3,8 @@
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
7 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
8 */ 9 */
9 10
@@ -42,7 +43,7 @@ struct kmem_cache *request_cachep;
42/* 43/*
43 * For queue allocation 44 * For queue allocation
44 */ 45 */
45struct kmem_cache *blk_requestq_cachep = NULL; 46struct kmem_cache *blk_requestq_cachep;
46 47
47/* 48/*
48 * Controlling structure to kblockd 49 * Controlling structure to kblockd
@@ -137,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
137 error = -EIO; 138 error = -EIO;
138 139
139 if (unlikely(nbytes > bio->bi_size)) { 140 if (unlikely(nbytes > bio->bi_size)) {
140 printk("%s: want %u bytes done, only %u left\n", 141 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
141 __FUNCTION__, nbytes, bio->bi_size); 142 __FUNCTION__, nbytes, bio->bi_size);
142 nbytes = bio->bi_size; 143 nbytes = bio->bi_size;
143 } 144 }
@@ -161,23 +162,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
161{ 162{
162 int bit; 163 int bit;
163 164
164 printk("%s: dev %s: type=%x, flags=%x\n", msg, 165 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
165 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 166 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
166 rq->cmd_flags); 167 rq->cmd_flags);
167 168
168 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, 169 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
169 rq->nr_sectors, 170 (unsigned long long)rq->sector,
170 rq->current_nr_sectors); 171 rq->nr_sectors,
171 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); 172 rq->current_nr_sectors);
173 printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
174 rq->bio, rq->biotail,
175 rq->buffer, rq->data,
176 rq->data_len);
172 177
173 if (blk_pc_request(rq)) { 178 if (blk_pc_request(rq)) {
174 printk("cdb: "); 179 printk(KERN_INFO " cdb: ");
175 for (bit = 0; bit < sizeof(rq->cmd); bit++) 180 for (bit = 0; bit < sizeof(rq->cmd); bit++)
176 printk("%02x ", rq->cmd[bit]); 181 printk("%02x ", rq->cmd[bit]);
177 printk("\n"); 182 printk("\n");
178 } 183 }
179} 184}
180
181EXPORT_SYMBOL(blk_dump_rq_flags); 185EXPORT_SYMBOL(blk_dump_rq_flags);
182 186
183/* 187/*
@@ -204,7 +208,6 @@ void blk_plug_device(struct request_queue *q)
204 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 208 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
205 } 209 }
206} 210}
207
208EXPORT_SYMBOL(blk_plug_device); 211EXPORT_SYMBOL(blk_plug_device);
209 212
210/* 213/*
@@ -221,7 +224,6 @@ int blk_remove_plug(struct request_queue *q)
221 del_timer(&q->unplug_timer); 224 del_timer(&q->unplug_timer);
222 return 1; 225 return 1;
223} 226}
224
225EXPORT_SYMBOL(blk_remove_plug); 227EXPORT_SYMBOL(blk_remove_plug);
226 228
227/* 229/*
@@ -328,7 +330,6 @@ void blk_start_queue(struct request_queue *q)
328 kblockd_schedule_work(&q->unplug_work); 330 kblockd_schedule_work(&q->unplug_work);
329 } 331 }
330} 332}
331
332EXPORT_SYMBOL(blk_start_queue); 333EXPORT_SYMBOL(blk_start_queue);
333 334
334/** 335/**
@@ -408,7 +409,7 @@ void blk_put_queue(struct request_queue *q)
408} 409}
409EXPORT_SYMBOL(blk_put_queue); 410EXPORT_SYMBOL(blk_put_queue);
410 411
411void blk_cleanup_queue(struct request_queue * q) 412void blk_cleanup_queue(struct request_queue *q)
412{ 413{
413 mutex_lock(&q->sysfs_lock); 414 mutex_lock(&q->sysfs_lock);
414 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 415 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -419,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q)
419 420
420 blk_put_queue(q); 421 blk_put_queue(q);
421} 422}
422
423EXPORT_SYMBOL(blk_cleanup_queue); 423EXPORT_SYMBOL(blk_cleanup_queue);
424 424
425static int blk_init_free_list(struct request_queue *q) 425static int blk_init_free_list(struct request_queue *q)
@@ -575,7 +575,6 @@ int blk_get_queue(struct request_queue *q)
575 575
576 return 1; 576 return 1;
577} 577}
578
579EXPORT_SYMBOL(blk_get_queue); 578EXPORT_SYMBOL(blk_get_queue);
580 579
581static inline void blk_free_request(struct request_queue *q, struct request *rq) 580static inline void blk_free_request(struct request_queue *q, struct request *rq)
@@ -774,7 +773,7 @@ rq_starved:
774 */ 773 */
775 if (ioc_batching(q, ioc)) 774 if (ioc_batching(q, ioc))
776 ioc->nr_batch_requests--; 775 ioc->nr_batch_requests--;
777 776
778 rq_init(q, rq); 777 rq_init(q, rq);
779 778
780 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); 779 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
@@ -888,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
888 887
889 elv_requeue_request(q, rq); 888 elv_requeue_request(q, rq);
890} 889}
891
892EXPORT_SYMBOL(blk_requeue_request); 890EXPORT_SYMBOL(blk_requeue_request);
893 891
894/** 892/**
@@ -939,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
939 blk_start_queueing(q); 937 blk_start_queueing(q);
940 spin_unlock_irqrestore(q->queue_lock, flags); 938 spin_unlock_irqrestore(q->queue_lock, flags);
941} 939}
942
943EXPORT_SYMBOL(blk_insert_request); 940EXPORT_SYMBOL(blk_insert_request);
944 941
945/* 942/*
@@ -947,7 +944,7 @@ EXPORT_SYMBOL(blk_insert_request);
947 * queue lock is held and interrupts disabled, as we muck with the 944 * queue lock is held and interrupts disabled, as we muck with the
948 * request queue list. 945 * request queue list.
949 */ 946 */
950static inline void add_request(struct request_queue * q, struct request * req) 947static inline void add_request(struct request_queue *q, struct request *req)
951{ 948{
952 drive_stat_acct(req, 1); 949 drive_stat_acct(req, 1);
953 950
@@ -957,7 +954,7 @@ static inline void add_request(struct request_queue * q, struct request * req)
957 */ 954 */
958 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 955 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
959} 956}
960 957
961/* 958/*
962 * disk_round_stats() - Round off the performance stats on a struct 959 * disk_round_stats() - Round off the performance stats on a struct
963 * disk_stats. 960 * disk_stats.
@@ -987,7 +984,6 @@ void disk_round_stats(struct gendisk *disk)
987 } 984 }
988 disk->stamp = now; 985 disk->stamp = now;
989} 986}
990
991EXPORT_SYMBOL_GPL(disk_round_stats); 987EXPORT_SYMBOL_GPL(disk_round_stats);
992 988
993/* 989/*
@@ -1017,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1017 freed_request(q, rw, priv); 1013 freed_request(q, rw, priv);
1018 } 1014 }
1019} 1015}
1020
1021EXPORT_SYMBOL_GPL(__blk_put_request); 1016EXPORT_SYMBOL_GPL(__blk_put_request);
1022 1017
1023void blk_put_request(struct request *req) 1018void blk_put_request(struct request *req)
@@ -1035,7 +1030,6 @@ void blk_put_request(struct request *req)
1035 spin_unlock_irqrestore(q->queue_lock, flags); 1030 spin_unlock_irqrestore(q->queue_lock, flags);
1036 } 1031 }
1037} 1032}
1038
1039EXPORT_SYMBOL(blk_put_request); 1033EXPORT_SYMBOL(blk_put_request);
1040 1034
1041void init_request_from_bio(struct request *req, struct bio *bio) 1035void init_request_from_bio(struct request *req, struct bio *bio)
@@ -1096,53 +1090,53 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1096 1090
1097 el_ret = elv_merge(q, &req, bio); 1091 el_ret = elv_merge(q, &req, bio);
1098 switch (el_ret) { 1092 switch (el_ret) {
1099 case ELEVATOR_BACK_MERGE: 1093 case ELEVATOR_BACK_MERGE:
1100 BUG_ON(!rq_mergeable(req)); 1094 BUG_ON(!rq_mergeable(req));
1101 1095
1102 if (!ll_back_merge_fn(q, req, bio)) 1096 if (!ll_back_merge_fn(q, req, bio))
1103 break; 1097 break;
1104 1098
1105 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 1099 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1106 1100
1107 req->biotail->bi_next = bio; 1101 req->biotail->bi_next = bio;
1108 req->biotail = bio; 1102 req->biotail = bio;
1109 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1103 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1110 req->ioprio = ioprio_best(req->ioprio, prio); 1104 req->ioprio = ioprio_best(req->ioprio, prio);
1111 drive_stat_acct(req, 0); 1105 drive_stat_acct(req, 0);
1112 if (!attempt_back_merge(q, req)) 1106 if (!attempt_back_merge(q, req))
1113 elv_merged_request(q, req, el_ret); 1107 elv_merged_request(q, req, el_ret);
1114 goto out; 1108 goto out;
1115 1109
1116 case ELEVATOR_FRONT_MERGE: 1110 case ELEVATOR_FRONT_MERGE:
1117 BUG_ON(!rq_mergeable(req)); 1111 BUG_ON(!rq_mergeable(req));
1118 1112
1119 if (!ll_front_merge_fn(q, req, bio)) 1113 if (!ll_front_merge_fn(q, req, bio))
1120 break; 1114 break;
1121 1115
1122 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 1116 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1123 1117
1124 bio->bi_next = req->bio; 1118 bio->bi_next = req->bio;
1125 req->bio = bio; 1119 req->bio = bio;
1126 1120
1127 /* 1121 /*
1128 * may not be valid. if the low level driver said 1122 * may not be valid. if the low level driver said
1129 * it didn't need a bounce buffer then it better 1123 * it didn't need a bounce buffer then it better
1130 * not touch req->buffer either... 1124 * not touch req->buffer either...
1131 */ 1125 */
1132 req->buffer = bio_data(bio); 1126 req->buffer = bio_data(bio);
1133 req->current_nr_sectors = bio_cur_sectors(bio); 1127 req->current_nr_sectors = bio_cur_sectors(bio);
1134 req->hard_cur_sectors = req->current_nr_sectors; 1128 req->hard_cur_sectors = req->current_nr_sectors;
1135 req->sector = req->hard_sector = bio->bi_sector; 1129 req->sector = req->hard_sector = bio->bi_sector;
1136 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1130 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1137 req->ioprio = ioprio_best(req->ioprio, prio); 1131 req->ioprio = ioprio_best(req->ioprio, prio);
1138 drive_stat_acct(req, 0); 1132 drive_stat_acct(req, 0);
1139 if (!attempt_front_merge(q, req)) 1133 if (!attempt_front_merge(q, req))
1140 elv_merged_request(q, req, el_ret); 1134 elv_merged_request(q, req, el_ret);
1141 goto out; 1135 goto out;
1142 1136
1143 /* ELV_NO_MERGE: elevator says don't/can't merge. */ 1137 /* ELV_NO_MERGE: elevator says don't/can't merge. */
1144 default: 1138 default:
1145 ; 1139 ;
1146 } 1140 }
1147 1141
1148get_rq: 1142get_rq:
@@ -1350,7 +1344,7 @@ end_io:
1350 } 1344 }
1351 1345
1352 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1346 if (unlikely(nr_sectors > q->max_hw_sectors)) {
1353 printk("bio too big device %s (%u > %u)\n", 1347 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1354 bdevname(bio->bi_bdev, b), 1348 bdevname(bio->bi_bdev, b),
1355 bio_sectors(bio), 1349 bio_sectors(bio),
1356 q->max_hw_sectors); 1350 q->max_hw_sectors);
@@ -1439,7 +1433,6 @@ void generic_make_request(struct bio *bio)
1439 } while (bio); 1433 } while (bio);
1440 current->bio_tail = NULL; /* deactivate */ 1434 current->bio_tail = NULL; /* deactivate */
1441} 1435}
1442
1443EXPORT_SYMBOL(generic_make_request); 1436EXPORT_SYMBOL(generic_make_request);
1444 1437
1445/** 1438/**
@@ -1480,13 +1473,12 @@ void submit_bio(int rw, struct bio *bio)
1480 current->comm, task_pid_nr(current), 1473 current->comm, task_pid_nr(current),
1481 (rw & WRITE) ? "WRITE" : "READ", 1474 (rw & WRITE) ? "WRITE" : "READ",
1482 (unsigned long long)bio->bi_sector, 1475 (unsigned long long)bio->bi_sector,
1483 bdevname(bio->bi_bdev,b)); 1476 bdevname(bio->bi_bdev, b));
1484 } 1477 }
1485 } 1478 }
1486 1479
1487 generic_make_request(bio); 1480 generic_make_request(bio);
1488} 1481}
1489
1490EXPORT_SYMBOL(submit_bio); 1482EXPORT_SYMBOL(submit_bio);
1491 1483
1492/** 1484/**
@@ -1518,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error,
1518 if (!blk_pc_request(req)) 1510 if (!blk_pc_request(req))
1519 req->errors = 0; 1511 req->errors = 0;
1520 1512
1521 if (error) { 1513 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1522 if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) 1514 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1523 printk("end_request: I/O error, dev %s, sector %llu\n",
1524 req->rq_disk ? req->rq_disk->disk_name : "?", 1515 req->rq_disk ? req->rq_disk->disk_name : "?",
1525 (unsigned long long)req->sector); 1516 (unsigned long long)req->sector);
1526 } 1517 }
@@ -1554,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error,
1554 1545
1555 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 1546 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1556 blk_dump_rq_flags(req, "__end_that"); 1547 blk_dump_rq_flags(req, "__end_that");
1557 printk("%s: bio idx %d >= vcnt %d\n", 1548 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1558 __FUNCTION__, 1549 __FUNCTION__, bio->bi_idx,
1559 bio->bi_idx, bio->bi_vcnt); 1550 bio->bi_vcnt);
1560 break; 1551 break;
1561 } 1552 }
1562 1553
@@ -1582,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error,
1582 total_bytes += nbytes; 1573 total_bytes += nbytes;
1583 nr_bytes -= nbytes; 1574 nr_bytes -= nbytes;
1584 1575
1585 if ((bio = req->bio)) { 1576 bio = req->bio;
1577 if (bio) {
1586 /* 1578 /*
1587 * end more in this run, or just return 'not-done' 1579 * end more in this run, or just return 'not-done'
1588 */ 1580 */
@@ -1626,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h)
1626 local_irq_enable(); 1618 local_irq_enable();
1627 1619
1628 while (!list_empty(&local_list)) { 1620 while (!list_empty(&local_list)) {
1629 struct request *rq = list_entry(local_list.next, struct request, donelist); 1621 struct request *rq;
1630 1622
1623 rq = list_entry(local_list.next, struct request, donelist);
1631 list_del_init(&rq->donelist); 1624 list_del_init(&rq->donelist);
1632 rq->q->softirq_done_fn(rq); 1625 rq->q->softirq_done_fn(rq);
1633 } 1626 }
1634} 1627}
1635 1628
1636static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, 1629static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1637 void *hcpu) 1630 unsigned long action, void *hcpu)
1638{ 1631{
1639 /* 1632 /*
1640 * If a CPU goes away, splice its entries to the current CPU 1633 * If a CPU goes away, splice its entries to the current CPU
@@ -1676,7 +1669,7 @@ void blk_complete_request(struct request *req)
1676 unsigned long flags; 1669 unsigned long flags;
1677 1670
1678 BUG_ON(!req->q->softirq_done_fn); 1671 BUG_ON(!req->q->softirq_done_fn);
1679 1672
1680 local_irq_save(flags); 1673 local_irq_save(flags);
1681 1674
1682 cpu_list = &__get_cpu_var(blk_cpu_done); 1675 cpu_list = &__get_cpu_var(blk_cpu_done);
@@ -1685,9 +1678,8 @@ void blk_complete_request(struct request *req)
1685 1678
1686 local_irq_restore(flags); 1679 local_irq_restore(flags);
1687} 1680}
1688
1689EXPORT_SYMBOL(blk_complete_request); 1681EXPORT_SYMBOL(blk_complete_request);
1690 1682
1691/* 1683/*
1692 * queue lock must be held 1684 * queue lock must be held
1693 */ 1685 */
@@ -1846,8 +1838,9 @@ EXPORT_SYMBOL(end_request);
1846 * 0 - we are done with this request 1838 * 0 - we are done with this request
1847 * 1 - this request is not freed yet, it still has pending buffers. 1839 * 1 - this request is not freed yet, it still has pending buffers.
1848 **/ 1840 **/
1849static int blk_end_io(struct request *rq, int error, int nr_bytes, 1841static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1850 int bidi_bytes, int (drv_callback)(struct request *)) 1842 unsigned int bidi_bytes,
1843 int (drv_callback)(struct request *))
1851{ 1844{
1852 struct request_queue *q = rq->q; 1845 struct request_queue *q = rq->q;
1853 unsigned long flags = 0UL; 1846 unsigned long flags = 0UL;
@@ -1889,7 +1882,7 @@ static int blk_end_io(struct request *rq, int error, int nr_bytes,
1889 * 0 - we are done with this request 1882 * 0 - we are done with this request
1890 * 1 - still buffers pending for this request 1883 * 1 - still buffers pending for this request
1891 **/ 1884 **/
1892int blk_end_request(struct request *rq, int error, int nr_bytes) 1885int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1893{ 1886{
1894 return blk_end_io(rq, error, nr_bytes, 0, NULL); 1887 return blk_end_io(rq, error, nr_bytes, 0, NULL);
1895} 1888}
@@ -1908,7 +1901,7 @@ EXPORT_SYMBOL_GPL(blk_end_request);
1908 * 0 - we are done with this request 1901 * 0 - we are done with this request
1909 * 1 - still buffers pending for this request 1902 * 1 - still buffers pending for this request
1910 **/ 1903 **/
1911int __blk_end_request(struct request *rq, int error, int nr_bytes) 1904int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1912{ 1905{
1913 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1906 if (blk_fs_request(rq) || blk_pc_request(rq)) {
1914 if (__end_that_request_first(rq, error, nr_bytes)) 1907 if (__end_that_request_first(rq, error, nr_bytes))
@@ -1937,8 +1930,8 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
1937 * 0 - we are done with this request 1930 * 0 - we are done with this request
1938 * 1 - still buffers pending for this request 1931 * 1 - still buffers pending for this request
1939 **/ 1932 **/
1940int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, 1933int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1941 int bidi_bytes) 1934 unsigned int bidi_bytes)
1942{ 1935{
1943 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); 1936 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
1944} 1937}
@@ -1969,7 +1962,8 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1969 * this request still has pending buffers or 1962 * this request still has pending buffers or
1970 * the driver doesn't want to finish this request yet. 1963 * the driver doesn't want to finish this request yet.
1971 **/ 1964 **/
1972int blk_end_request_callback(struct request *rq, int error, int nr_bytes, 1965int blk_end_request_callback(struct request *rq, int error,
1966 unsigned int nr_bytes,
1973 int (drv_callback)(struct request *)) 1967 int (drv_callback)(struct request *))
1974{ 1968{
1975 return blk_end_io(rq, error, nr_bytes, 0, drv_callback); 1969 return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
@@ -2000,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work)
2000{ 1994{
2001 return queue_work(kblockd_workqueue, work); 1995 return queue_work(kblockd_workqueue, work);
2002} 1996}
2003
2004EXPORT_SYMBOL(kblockd_schedule_work); 1997EXPORT_SYMBOL(kblockd_schedule_work);
2005 1998
2006void kblockd_flush_work(struct work_struct *work) 1999void kblockd_flush_work(struct work_struct *work)
diff --git a/block/blk-exec.c b/block/blk-exec.c
index ebfb44e959a..391dd622489 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
101 101
102 return err; 102 return err;
103} 103}
104
105EXPORT_SYMBOL(blk_execute_rq); 104EXPORT_SYMBOL(blk_execute_rq);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 6d1675508eb..80245dc30c7 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -176,15 +176,6 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
176} 176}
177EXPORT_SYMBOL(copy_io_context); 177EXPORT_SYMBOL(copy_io_context);
178 178
179void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
180{
181 struct io_context *temp;
182 temp = *ioc1;
183 *ioc1 = *ioc2;
184 *ioc2 = temp;
185}
186EXPORT_SYMBOL(swap_io_context);
187
188int __init blk_ioc_init(void) 179int __init blk_ioc_init(void)
189{ 180{
190 iocontext_cachep = kmem_cache_create("blkdev_ioc", 181 iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index 916cfc96ffa..955d75c1a58 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
53 * direct dma. else, set up kernel bounce buffers 53 * direct dma. else, set up kernel bounce buffers
54 */ 54 */
55 uaddr = (unsigned long) ubuf; 55 uaddr = (unsigned long) ubuf;
56 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 56 if (!(uaddr & queue_dma_alignment(q)) &&
57 !(len & queue_dma_alignment(q)))
57 bio = bio_map_user(q, NULL, uaddr, len, reading); 58 bio = bio_map_user(q, NULL, uaddr, len, reading);
58 else 59 else
59 bio = bio_copy_user(q, uaddr, len, reading); 60 bio = bio_copy_user(q, uaddr, len, reading);
@@ -144,7 +145,6 @@ unmap_rq:
144 blk_rq_unmap_user(bio); 145 blk_rq_unmap_user(bio);
145 return ret; 146 return ret;
146} 147}
147
148EXPORT_SYMBOL(blk_rq_map_user); 148EXPORT_SYMBOL(blk_rq_map_user);
149 149
150/** 150/**
@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
179 /* we don't allow misaligned data like bio_map_user() does. If the 179 /* we don't allow misaligned data like bio_map_user() does. If the
180 * user is using sg, they're expected to know the alignment constraints 180 * user is using sg, they're expected to know the alignment constraints
181 * and respect them accordingly */ 181 * and respect them accordingly */
182 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); 182 bio = bio_map_user_iov(q, NULL, iov, iov_count,
183 rq_data_dir(rq) == READ);
183 if (IS_ERR(bio)) 184 if (IS_ERR(bio))
184 return PTR_ERR(bio); 185 return PTR_ERR(bio);
185 186
@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
194 rq->buffer = rq->data = NULL; 195 rq->buffer = rq->data = NULL;
195 return 0; 196 return 0;
196} 197}
197
198EXPORT_SYMBOL(blk_rq_map_user_iov); 198EXPORT_SYMBOL(blk_rq_map_user_iov);
199 199
200/** 200/**
@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio)
227 227
228 return ret; 228 return ret;
229} 229}
230
231EXPORT_SYMBOL(blk_rq_unmap_user); 230EXPORT_SYMBOL(blk_rq_unmap_user);
232 231
233/** 232/**
@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
260 rq->buffer = rq->data = NULL; 259 rq->buffer = rq->data = NULL;
261 return 0; 260 return 0;
262} 261}
263
264EXPORT_SYMBOL(blk_rq_map_kern); 262EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5023f0b0807..845ef813110 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
32 * size, something has gone terribly wrong 32 * size, something has gone terribly wrong
33 */ 33 */
34 if (rq->nr_sectors < rq->current_nr_sectors) { 34 if (rq->nr_sectors < rq->current_nr_sectors) {
35 printk("blk: request botched\n"); 35 printk(KERN_ERR "blk: request botched\n");
36 rq->nr_sectors = rq->current_nr_sectors; 36 rq->nr_sectors = rq->current_nr_sectors;
37 } 37 }
38 } 38 }
@@ -235,7 +235,6 @@ new_segment:
235 235
236 return nsegs; 236 return nsegs;
237} 237}
238
239EXPORT_SYMBOL(blk_rq_map_sg); 238EXPORT_SYMBOL(blk_rq_map_sg);
240 239
241static inline int ll_new_mergeable(struct request_queue *q, 240static inline int ll_new_mergeable(struct request_queue *q,
@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
305 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 304 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
306 blk_recount_segments(q, bio); 305 blk_recount_segments(q, bio);
307 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; 306 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
308 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && 307 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
309 !BIOVEC_VIRT_OVERSIZE(len)) { 308 && !BIOVEC_VIRT_OVERSIZE(len)) {
310 int mergeable = ll_new_mergeable(q, req, bio); 309 int mergeable = ll_new_mergeable(q, req, bio);
311 310
312 if (mergeable) { 311 if (mergeable) {
@@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
321 return ll_new_hw_segment(q, req, bio); 320 return ll_new_hw_segment(q, req, bio);
322} 321}
323 322
324int ll_front_merge_fn(struct request_queue *q, struct request *req, 323int ll_front_merge_fn(struct request_queue *q, struct request *req,
325 struct bio *bio) 324 struct bio *bio)
326{ 325{
327 unsigned short max_sectors; 326 unsigned short max_sectors;
@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
388 387
389 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 388 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
390 if (blk_hw_contig_segment(q, req->biotail, next->bio)) { 389 if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
391 int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; 390 int len = req->biotail->bi_hw_back_size +
391 next->bio->bi_hw_front_size;
392 /* 392 /*
393 * propagate the combined length to the end of the requests 393 * propagate the combined length to the end of the requests
394 */ 394 */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 4df09a1b8f4..c8d0c572409 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -10,8 +10,10 @@
10 10
11#include "blk.h" 11#include "blk.h"
12 12
13unsigned long blk_max_low_pfn, blk_max_pfn; 13unsigned long blk_max_low_pfn;
14EXPORT_SYMBOL(blk_max_low_pfn); 14EXPORT_SYMBOL(blk_max_low_pfn);
15
16unsigned long blk_max_pfn;
15EXPORT_SYMBOL(blk_max_pfn); 17EXPORT_SYMBOL(blk_max_pfn);
16 18
17/** 19/**
@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
29{ 31{
30 q->prep_rq_fn = pfn; 32 q->prep_rq_fn = pfn;
31} 33}
32
33EXPORT_SYMBOL(blk_queue_prep_rq); 34EXPORT_SYMBOL(blk_queue_prep_rq);
34 35
35/** 36/**
@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
52{ 53{
53 q->merge_bvec_fn = mbfn; 54 q->merge_bvec_fn = mbfn;
54} 55}
55
56EXPORT_SYMBOL(blk_queue_merge_bvec); 56EXPORT_SYMBOL(blk_queue_merge_bvec);
57 57
58void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) 58void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
59{ 59{
60 q->softirq_done_fn = fn; 60 q->softirq_done_fn = fn;
61} 61}
62
63EXPORT_SYMBOL(blk_queue_softirq_done); 62EXPORT_SYMBOL(blk_queue_softirq_done);
64 63
65/** 64/**
@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
84 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 83 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
85 * blk_queue_bounce() to create a buffer in normal memory. 84 * blk_queue_bounce() to create a buffer in normal memory.
86 **/ 85 **/
87void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) 86void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
88{ 87{
89 /* 88 /*
90 * set defaults 89 * set defaults
@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
93 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 92 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
94 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 93 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
95 q->make_request_fn = mfn; 94 q->make_request_fn = mfn;
96 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 95 q->backing_dev_info.ra_pages =
96 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
97 q->backing_dev_info.state = 0; 97 q->backing_dev_info.state = 0;
98 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 98 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
99 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 99 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
117 */ 117 */
118 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 118 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
119} 119}
120
121EXPORT_SYMBOL(blk_queue_make_request); 120EXPORT_SYMBOL(blk_queue_make_request);
122 121
123/** 122/**
@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
133 **/ 132 **/
134void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 133void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
135{ 134{
136 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 135 unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
137 int dma = 0; 136 int dma = 0;
138 137
139 q->bounce_gfp = GFP_NOIO; 138 q->bounce_gfp = GFP_NOIO;
@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
141 /* Assume anything <= 4GB can be handled by IOMMU. 140 /* Assume anything <= 4GB can be handled by IOMMU.
142 Actually some IOMMUs can handle everything, but I don't 141 Actually some IOMMUs can handle everything, but I don't
143 know of a way to test this here. */ 142 know of a way to test this here. */
144 if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 143 if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
145 dma = 1; 144 dma = 1;
146 q->bounce_pfn = max_low_pfn; 145 q->bounce_pfn = max_low_pfn;
147#else 146#else
148 if (bounce_pfn < blk_max_low_pfn) 147 if (b_pfn < blk_max_low_pfn)
149 dma = 1; 148 dma = 1;
150 q->bounce_pfn = bounce_pfn; 149 q->bounce_pfn = b_pfn;
151#endif 150#endif
152 if (dma) { 151 if (dma) {
153 init_emergency_isa_pool(); 152 init_emergency_isa_pool();
154 q->bounce_gfp = GFP_NOIO | GFP_DMA; 153 q->bounce_gfp = GFP_NOIO | GFP_DMA;
155 q->bounce_pfn = bounce_pfn; 154 q->bounce_pfn = b_pfn;
156 } 155 }
157} 156}
158
159EXPORT_SYMBOL(blk_queue_bounce_limit); 157EXPORT_SYMBOL(blk_queue_bounce_limit);
160 158
161/** 159/**
@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
171{ 169{
172 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 170 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
173 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 171 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
174 printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); 172 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
173 max_sectors);
175 } 174 }
176 175
177 if (BLK_DEF_MAX_SECTORS > max_sectors) 176 if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
181 q->max_hw_sectors = max_sectors; 180 q->max_hw_sectors = max_sectors;
182 } 181 }
183} 182}
184
185EXPORT_SYMBOL(blk_queue_max_sectors); 183EXPORT_SYMBOL(blk_queue_max_sectors);
186 184
187/** 185/**
@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q,
199{ 197{
200 if (!max_segments) { 198 if (!max_segments) {
201 max_segments = 1; 199 max_segments = 1;
202 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); 200 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
201 max_segments);
203 } 202 }
204 203
205 q->max_phys_segments = max_segments; 204 q->max_phys_segments = max_segments;
206} 205}
207
208EXPORT_SYMBOL(blk_queue_max_phys_segments); 206EXPORT_SYMBOL(blk_queue_max_phys_segments);
209 207
210/** 208/**
@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q,
223{ 221{
224 if (!max_segments) { 222 if (!max_segments) {
225 max_segments = 1; 223 max_segments = 1;
226 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); 224 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
225 max_segments);
227 } 226 }
228 227
229 q->max_hw_segments = max_segments; 228 q->max_hw_segments = max_segments;
230} 229}
231
232EXPORT_SYMBOL(blk_queue_max_hw_segments); 230EXPORT_SYMBOL(blk_queue_max_hw_segments);
233 231
234/** 232/**
@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
244{ 242{
245 if (max_size < PAGE_CACHE_SIZE) { 243 if (max_size < PAGE_CACHE_SIZE) {
246 max_size = PAGE_CACHE_SIZE; 244 max_size = PAGE_CACHE_SIZE;
247 printk("%s: set to minimum %d\n", __FUNCTION__, max_size); 245 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
246 max_size);
248 } 247 }
249 248
250 q->max_segment_size = max_size; 249 q->max_segment_size = max_size;
251} 250}
252
253EXPORT_SYMBOL(blk_queue_max_segment_size); 251EXPORT_SYMBOL(blk_queue_max_segment_size);
254 252
255/** 253/**
@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
267{ 265{
268 q->hardsect_size = size; 266 q->hardsect_size = size;
269} 267}
270
271EXPORT_SYMBOL(blk_queue_hardsect_size); 268EXPORT_SYMBOL(blk_queue_hardsect_size);
272 269
273/* 270/*
@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
283void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 280void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
284{ 281{
285 /* zero is "infinity" */ 282 /* zero is "infinity" */
286 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); 283 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
287 t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); 284 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
288 285
289 t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); 286 t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
290 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); 287 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
291 t->max_segment_size = min(t->max_segment_size,b->max_segment_size); 288 t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
292 t->hardsect_size = max(t->hardsect_size,b->hardsect_size); 289 t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
293 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 290 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
294 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); 291 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
295} 292}
296
297EXPORT_SYMBOL(blk_queue_stack_limits); 293EXPORT_SYMBOL(blk_queue_stack_limits);
298 294
299/** 295/**
@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf,
332 328
333 return 0; 329 return 0;
334} 330}
335
336EXPORT_SYMBOL_GPL(blk_queue_dma_drain); 331EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
337 332
338/** 333/**
@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
344{ 339{
345 if (mask < PAGE_CACHE_SIZE - 1) { 340 if (mask < PAGE_CACHE_SIZE - 1) {
346 mask = PAGE_CACHE_SIZE - 1; 341 mask = PAGE_CACHE_SIZE - 1;
347 printk("%s: set to minimum %lx\n", __FUNCTION__, mask); 342 printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
343 mask);
348 } 344 }
349 345
350 q->seg_boundary_mask = mask; 346 q->seg_boundary_mask = mask;
351} 347}
352
353EXPORT_SYMBOL(blk_queue_segment_boundary); 348EXPORT_SYMBOL(blk_queue_segment_boundary);
354 349
355/** 350/**
@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
366{ 361{
367 q->dma_alignment = mask; 362 q->dma_alignment = mask;
368} 363}
369
370EXPORT_SYMBOL(blk_queue_dma_alignment); 364EXPORT_SYMBOL(blk_queue_dma_alignment);
371 365
372/** 366/**
@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
390 if (mask > q->dma_alignment) 384 if (mask > q->dma_alignment)
391 q->dma_alignment = mask; 385 q->dma_alignment = mask;
392} 386}
393
394EXPORT_SYMBOL(blk_queue_update_dma_alignment); 387EXPORT_SYMBOL(blk_queue_update_dma_alignment);
395 388
396int __init blk_settings_init(void) 389int __init blk_settings_init(void)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index bc28776ba76..54d0db11615 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
207 const char *page, size_t length) 207 const char *page, size_t length)
208{ 208{
209 struct queue_sysfs_entry *entry = to_queue(attr); 209 struct queue_sysfs_entry *entry = to_queue(attr);
210 struct request_queue *q = container_of(kobj, struct request_queue, kobj); 210 struct request_queue *q;
211
212 ssize_t res; 211 ssize_t res;
213 212
214 if (!entry->store) 213 if (!entry->store)
215 return -EIO; 214 return -EIO;
215
216 q = container_of(kobj, struct request_queue, kobj);
216 mutex_lock(&q->sysfs_lock); 217 mutex_lock(&q->sysfs_lock);
217 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 218 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
218 mutex_unlock(&q->sysfs_lock); 219 mutex_unlock(&q->sysfs_lock);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index d1fd300e8ae..a8c37d4bbb3 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
21{ 21{
22 return blk_map_queue_find_tag(q->queue_tags, tag); 22 return blk_map_queue_find_tag(q->queue_tags, tag);
23} 23}
24
25EXPORT_SYMBOL(blk_queue_find_tag); 24EXPORT_SYMBOL(blk_queue_find_tag);
26 25
27/** 26/**
@@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q)
99{ 98{
100 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 99 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
101} 100}
102
103EXPORT_SYMBOL(blk_queue_free_tags); 101EXPORT_SYMBOL(blk_queue_free_tags);
104 102
105static int 103static int
@@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
185 if (!tags) 183 if (!tags)
186 goto fail; 184 goto fail;
187 } else if (q->queue_tags) { 185 } else if (q->queue_tags) {
188 if ((rc = blk_queue_resize_tags(q, depth))) 186 rc = blk_queue_resize_tags(q, depth);
187 if (rc)
189 return rc; 188 return rc;
190 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 189 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
191 return 0; 190 return 0;
@@ -203,7 +202,6 @@ fail:
203 kfree(tags); 202 kfree(tags);
204 return -ENOMEM; 203 return -ENOMEM;
205} 204}
206
207EXPORT_SYMBOL(blk_queue_init_tags); 205EXPORT_SYMBOL(blk_queue_init_tags);
208 206
209/** 207/**
@@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth)
260 kfree(tag_map); 258 kfree(tag_map);
261 return 0; 259 return 0;
262} 260}
263
264EXPORT_SYMBOL(blk_queue_resize_tags); 261EXPORT_SYMBOL(blk_queue_resize_tags);
265 262
266/** 263/**
@@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
313 clear_bit_unlock(tag, bqt->tag_map); 310 clear_bit_unlock(tag, bqt->tag_map);
314 bqt->busy--; 311 bqt->busy--;
315} 312}
316
317EXPORT_SYMBOL(blk_queue_end_tag); 313EXPORT_SYMBOL(blk_queue_end_tag);
318 314
319/** 315/**
@@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
340 int tag; 336 int tag;
341 337
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 338 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
343 printk(KERN_ERR 339 printk(KERN_ERR
344 "%s: request %p for device [%s] already tagged %d", 340 "%s: request %p for device [%s] already tagged %d",
345 __FUNCTION__, rq, 341 __FUNCTION__, rq,
346 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); 342 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
@@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
370 bqt->busy++; 366 bqt->busy++;
371 return 0; 367 return 0;
372} 368}
373
374EXPORT_SYMBOL(blk_queue_start_tag); 369EXPORT_SYMBOL(blk_queue_start_tag);
375 370
376/** 371/**
@@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q)
392 list_for_each_safe(tmp, n, &q->tag_busy_list) 387 list_for_each_safe(tmp, n, &q->tag_busy_list)
393 blk_requeue_request(q, list_entry_rq(tmp)); 388 blk_requeue_request(q, list_entry_rq(tmp));
394} 389}
395
396EXPORT_SYMBOL(blk_queue_invalidate_tags); 390EXPORT_SYMBOL(blk_queue_invalidate_tags);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f28d1fb3060..ca198e61fa6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,11 +15,13 @@
15/* 15/*
16 * tunables 16 * tunables
17 */ 17 */
18static const int cfq_quantum = 4; /* max queue in one round of service */ 18/* max queue in one round of service */
19static const int cfq_quantum = 4;
19static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 20static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
20static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 21/* maximum backwards seek, in KiB */
21static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ 22static const int cfq_back_max = 16 * 1024;
22 23/* penalty of a backwards seek */
24static const int cfq_back_penalty = 2;
23static const int cfq_slice_sync = HZ / 10; 25static const int cfq_slice_sync = HZ / 10;
24static int cfq_slice_async = HZ / 25; 26static int cfq_slice_async = HZ / 25;
25static const int cfq_slice_async_rq = 2; 27static const int cfq_slice_async_rq = 2;
@@ -37,7 +39,8 @@ static int cfq_slice_idle = HZ / 125;
37 39
38#define CFQ_SLICE_SCALE (5) 40#define CFQ_SLICE_SCALE (5)
39 41
40#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) 42#define RQ_CIC(rq) \
43 ((struct cfq_io_context *) (rq)->elevator_private)
41#define RQ_CFQQ(rq) ((rq)->elevator_private2) 44#define RQ_CFQQ(rq) ((rq)->elevator_private2)
42 45
43static struct kmem_cache *cfq_pool; 46static struct kmem_cache *cfq_pool;
@@ -171,15 +174,15 @@ enum cfqq_state_flags {
171#define CFQ_CFQQ_FNS(name) \ 174#define CFQ_CFQQ_FNS(name) \
172static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 175static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
173{ \ 176{ \
174 cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 177 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
175} \ 178} \
176static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 179static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
177{ \ 180{ \
178 cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 181 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
179} \ 182} \
180static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 183static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
181{ \ 184{ \
182 return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 185 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
183} 186}
184 187
185CFQ_CFQQ_FNS(on_rr); 188CFQ_CFQQ_FNS(on_rr);
@@ -1005,7 +1008,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1005 /* 1008 /*
1006 * follow expired path, else get first next available 1009 * follow expired path, else get first next available
1007 */ 1010 */
1008 if ((rq = cfq_check_fifo(cfqq)) == NULL) 1011 rq = cfq_check_fifo(cfqq);
1012 if (rq == NULL)
1009 rq = cfqq->next_rq; 1013 rq = cfqq->next_rq;
1010 1014
1011 /* 1015 /*
@@ -1294,28 +1298,28 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1294 1298
1295 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 1299 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1296 switch (ioprio_class) { 1300 switch (ioprio_class) {
1297 default: 1301 default:
1298 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1302 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1299 case IOPRIO_CLASS_NONE: 1303 case IOPRIO_CLASS_NONE:
1300 /* 1304 /*
1301 * no prio set, place us in the middle of the BE classes 1305 * no prio set, place us in the middle of the BE classes
1302 */ 1306 */
1303 cfqq->ioprio = task_nice_ioprio(tsk); 1307 cfqq->ioprio = task_nice_ioprio(tsk);
1304 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1308 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1305 break; 1309 break;
1306 case IOPRIO_CLASS_RT: 1310 case IOPRIO_CLASS_RT:
1307 cfqq->ioprio = task_ioprio(ioc); 1311 cfqq->ioprio = task_ioprio(ioc);
1308 cfqq->ioprio_class = IOPRIO_CLASS_RT; 1312 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1309 break; 1313 break;
1310 case IOPRIO_CLASS_BE: 1314 case IOPRIO_CLASS_BE:
1311 cfqq->ioprio = task_ioprio(ioc); 1315 cfqq->ioprio = task_ioprio(ioc);
1312 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1316 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1313 break; 1317 break;
1314 case IOPRIO_CLASS_IDLE: 1318 case IOPRIO_CLASS_IDLE:
1315 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1319 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1316 cfqq->ioprio = 7; 1320 cfqq->ioprio = 7;
1317 cfq_clear_cfqq_idle_window(cfqq); 1321 cfq_clear_cfqq_idle_window(cfqq);
1318 break; 1322 break;
1319 } 1323 }
1320 1324
1321 /* 1325 /*
@@ -1427,7 +1431,7 @@ out:
1427static struct cfq_queue ** 1431static struct cfq_queue **
1428cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 1432cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1429{ 1433{
1430 switch(ioprio_class) { 1434 switch (ioprio_class) {
1431 case IOPRIO_CLASS_RT: 1435 case IOPRIO_CLASS_RT:
1432 return &cfqd->async_cfqq[0][ioprio]; 1436 return &cfqd->async_cfqq[0][ioprio];
1433 case IOPRIO_CLASS_BE: 1437 case IOPRIO_CLASS_BE:
@@ -2018,7 +2022,8 @@ static void cfq_idle_slice_timer(unsigned long data)
2018 2022
2019 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2023 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2020 2024
2021 if ((cfqq = cfqd->active_queue) != NULL) { 2025 cfqq = cfqd->active_queue;
2026 if (cfqq) {
2022 timed_out = 0; 2027 timed_out = 0;
2023 2028
2024 /* 2029 /*
@@ -2212,14 +2217,18 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
2212 return ret; \ 2217 return ret; \
2213} 2218}
2214STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2219STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2215STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); 2220STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
2216STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); 2221 UINT_MAX, 1);
2222STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
2223 UINT_MAX, 1);
2217STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2224STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2218STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); 2225STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
2226 UINT_MAX, 0);
2219STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2227STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2220STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2228STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2221STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2229STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2222STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); 2230STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2231 UINT_MAX, 0);
2223#undef STORE_FUNCTION 2232#undef STORE_FUNCTION
2224 2233
2225#define CFQ_ATTR(name) \ 2234#define CFQ_ATTR(name) \
diff --git a/block/elevator.c b/block/elevator.c
index 8cd5775acd7..bafbae0344d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -45,7 +45,8 @@ static LIST_HEAD(elv_list);
45 */ 45 */
46static const int elv_hash_shift = 6; 46static const int elv_hash_shift = 6;
47#define ELV_HASH_BLOCK(sec) ((sec) >> 3) 47#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
48#define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 48#define ELV_HASH_FN(sec) \
49 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
49#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 50#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
50#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 51#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
@@ -224,15 +225,27 @@ int elevator_init(struct request_queue *q, char *name)
224 q->end_sector = 0; 225 q->end_sector = 0;
225 q->boundary_rq = NULL; 226 q->boundary_rq = NULL;
226 227
227 if (name && !(e = elevator_get(name))) 228 if (name) {
228 return -EINVAL; 229 e = elevator_get(name);
230 if (!e)
231 return -EINVAL;
232 }
229 233
230 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) 234 if (!e && *chosen_elevator) {
231 printk("I/O scheduler %s not found\n", chosen_elevator); 235 e = elevator_get(chosen_elevator);
236 if (!e)
237 printk(KERN_ERR "I/O scheduler %s not found\n",
238 chosen_elevator);
239 }
232 240
233 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { 241 if (!e) {
234 printk("Default I/O scheduler not found, using no-op\n"); 242 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
235 e = elevator_get("noop"); 243 if (!e) {
244 printk(KERN_ERR
245 "Default I/O scheduler not found. " \
246 "Using noop.\n");
247 e = elevator_get("noop");
248 }
236 } 249 }
237 250
238 eq = elevator_alloc(q, e); 251 eq = elevator_alloc(q, e);
@@ -248,7 +261,6 @@ int elevator_init(struct request_queue *q, char *name)
248 elevator_attach(q, eq, data); 261 elevator_attach(q, eq, data);
249 return ret; 262 return ret;
250} 263}
251
252EXPORT_SYMBOL(elevator_init); 264EXPORT_SYMBOL(elevator_init);
253 265
254void elevator_exit(elevator_t *e) 266void elevator_exit(elevator_t *e)
@@ -261,7 +273,6 @@ void elevator_exit(elevator_t *e)
261 273
262 kobject_put(&e->kobj); 274 kobject_put(&e->kobj);
263} 275}
264
265EXPORT_SYMBOL(elevator_exit); 276EXPORT_SYMBOL(elevator_exit);
266 277
267static void elv_activate_rq(struct request_queue *q, struct request *rq) 278static void elv_activate_rq(struct request_queue *q, struct request *rq)
@@ -353,7 +364,6 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
353 rb_insert_color(&rq->rb_node, root); 364 rb_insert_color(&rq->rb_node, root);
354 return NULL; 365 return NULL;
355} 366}
356
357EXPORT_SYMBOL(elv_rb_add); 367EXPORT_SYMBOL(elv_rb_add);
358 368
359void elv_rb_del(struct rb_root *root, struct request *rq) 369void elv_rb_del(struct rb_root *root, struct request *rq)
@@ -362,7 +372,6 @@ void elv_rb_del(struct rb_root *root, struct request *rq)
362 rb_erase(&rq->rb_node, root); 372 rb_erase(&rq->rb_node, root);
363 RB_CLEAR_NODE(&rq->rb_node); 373 RB_CLEAR_NODE(&rq->rb_node);
364} 374}
365
366EXPORT_SYMBOL(elv_rb_del); 375EXPORT_SYMBOL(elv_rb_del);
367 376
368struct request *elv_rb_find(struct rb_root *root, sector_t sector) 377struct request *elv_rb_find(struct rb_root *root, sector_t sector)
@@ -383,7 +392,6 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
383 392
384 return NULL; 393 return NULL;
385} 394}
386
387EXPORT_SYMBOL(elv_rb_find); 395EXPORT_SYMBOL(elv_rb_find);
388 396
389/* 397/*
@@ -395,6 +403,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
395{ 403{
396 sector_t boundary; 404 sector_t boundary;
397 struct list_head *entry; 405 struct list_head *entry;
406 int stop_flags;
398 407
399 if (q->last_merge == rq) 408 if (q->last_merge == rq)
400 q->last_merge = NULL; 409 q->last_merge = NULL;
@@ -404,13 +413,13 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
404 q->nr_sorted--; 413 q->nr_sorted--;
405 414
406 boundary = q->end_sector; 415 boundary = q->end_sector;
407 416 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
408 list_for_each_prev(entry, &q->queue_head) { 417 list_for_each_prev(entry, &q->queue_head) {
409 struct request *pos = list_entry_rq(entry); 418 struct request *pos = list_entry_rq(entry);
410 419
411 if (rq_data_dir(rq) != rq_data_dir(pos)) 420 if (rq_data_dir(rq) != rq_data_dir(pos))
412 break; 421 break;
413 if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) 422 if (pos->cmd_flags & stop_flags)
414 break; 423 break;
415 if (rq->sector >= boundary) { 424 if (rq->sector >= boundary) {
416 if (pos->sector < boundary) 425 if (pos->sector < boundary)
@@ -425,7 +434,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
425 434
426 list_add(&rq->queuelist, entry); 435 list_add(&rq->queuelist, entry);
427} 436}
428
429EXPORT_SYMBOL(elv_dispatch_sort); 437EXPORT_SYMBOL(elv_dispatch_sort);
430 438
431/* 439/*
@@ -446,7 +454,6 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
446 q->boundary_rq = rq; 454 q->boundary_rq = rq;
447 list_add_tail(&rq->queuelist, &q->queue_head); 455 list_add_tail(&rq->queuelist, &q->queue_head);
448} 456}
449
450EXPORT_SYMBOL(elv_dispatch_add_tail); 457EXPORT_SYMBOL(elv_dispatch_add_tail);
451 458
452int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) 459int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
@@ -665,7 +672,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
665 q->end_sector = rq_end_sector(rq); 672 q->end_sector = rq_end_sector(rq);
666 q->boundary_rq = rq; 673 q->boundary_rq = rq;
667 } 674 }
668 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) 675 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
676 where == ELEVATOR_INSERT_SORT)
669 where = ELEVATOR_INSERT_BACK; 677 where = ELEVATOR_INSERT_BACK;
670 678
671 if (plug) 679 if (plug)
@@ -673,7 +681,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
673 681
674 elv_insert(q, rq, where); 682 elv_insert(q, rq, where);
675} 683}
676
677EXPORT_SYMBOL(__elv_add_request); 684EXPORT_SYMBOL(__elv_add_request);
678 685
679void elv_add_request(struct request_queue *q, struct request *rq, int where, 686void elv_add_request(struct request_queue *q, struct request *rq, int where,
@@ -685,7 +692,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
685 __elv_add_request(q, rq, where, plug); 692 __elv_add_request(q, rq, where, plug);
686 spin_unlock_irqrestore(q->queue_lock, flags); 693 spin_unlock_irqrestore(q->queue_lock, flags);
687} 694}
688
689EXPORT_SYMBOL(elv_add_request); 695EXPORT_SYMBOL(elv_add_request);
690 696
691static inline struct request *__elv_next_request(struct request_queue *q) 697static inline struct request *__elv_next_request(struct request_queue *q)
@@ -792,7 +798,6 @@ struct request *elv_next_request(struct request_queue *q)
792 798
793 return rq; 799 return rq;
794} 800}
795
796EXPORT_SYMBOL(elv_next_request); 801EXPORT_SYMBOL(elv_next_request);
797 802
798void elv_dequeue_request(struct request_queue *q, struct request *rq) 803void elv_dequeue_request(struct request_queue *q, struct request *rq)
@@ -810,7 +815,6 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
810 if (blk_account_rq(rq)) 815 if (blk_account_rq(rq))
811 q->in_flight++; 816 q->in_flight++;
812} 817}
813
814EXPORT_SYMBOL(elv_dequeue_request); 818EXPORT_SYMBOL(elv_dequeue_request);
815 819
816int elv_queue_empty(struct request_queue *q) 820int elv_queue_empty(struct request_queue *q)
@@ -825,7 +829,6 @@ int elv_queue_empty(struct request_queue *q)
825 829
826 return 1; 830 return 1;
827} 831}
828
829EXPORT_SYMBOL(elv_queue_empty); 832EXPORT_SYMBOL(elv_queue_empty);
830 833
831struct request *elv_latter_request(struct request_queue *q, struct request *rq) 834struct request *elv_latter_request(struct request_queue *q, struct request *rq)
@@ -994,7 +997,8 @@ void elv_register(struct elevator_type *e)
994 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) 997 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
995 def = " (default)"; 998 def = " (default)";
996 999
997 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); 1000 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
1001 def);
998} 1002}
999EXPORT_SYMBOL_GPL(elv_register); 1003EXPORT_SYMBOL_GPL(elv_register);
1000 1004
@@ -1126,7 +1130,8 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1126 } 1130 }
1127 1131
1128 if (!elevator_switch(q, e)) 1132 if (!elevator_switch(q, e))
1129 printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); 1133 printk(KERN_ERR "elevator: switch to %s failed\n",
1134 elevator_name);
1130 return count; 1135 return count;
1131} 1136}
1132 1137
@@ -1160,7 +1165,6 @@ struct request *elv_rb_former_request(struct request_queue *q,
1160 1165
1161 return NULL; 1166 return NULL;
1162} 1167}
1163
1164EXPORT_SYMBOL(elv_rb_former_request); 1168EXPORT_SYMBOL(elv_rb_former_request);
1165 1169
1166struct request *elv_rb_latter_request(struct request_queue *q, 1170struct request *elv_rb_latter_request(struct request_queue *q,
@@ -1173,5 +1177,4 @@ struct request *elv_rb_latter_request(struct request_queue *q,
1173 1177
1174 return NULL; 1178 return NULL;
1175} 1179}
1176
1177EXPORT_SYMBOL(elv_rb_latter_request); 1180EXPORT_SYMBOL(elv_rb_latter_request);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 66e30155b0a..a8de037ecd4 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -732,7 +732,7 @@ static struct vio_driver_ops vdc_vio_ops = {
732 .handshake_complete = vdc_handshake_complete, 732 .handshake_complete = vdc_handshake_complete,
733}; 733};
734 734
735static void print_version(void) 735static void __devinit print_version(void)
736{ 736{
737 static int version_printed; 737 static int version_printed;
738 738
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5fee0566182..c511a831f0c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -667,8 +667,6 @@ void add_disk_randomness(struct gendisk *disk)
667 add_timer_randomness(disk->random, 667 add_timer_randomness(disk->random,
668 0x100 + MKDEV(disk->major, disk->first_minor)); 668 0x100 + MKDEV(disk->major, disk->first_minor));
669} 669}
670
671EXPORT_SYMBOL(add_disk_randomness);
672#endif 670#endif
673 671
674#define EXTRACT_SIZE 10 672#define EXTRACT_SIZE 10
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 74c6087ada3..bee05a3f52a 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1722,7 +1722,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1722 */ 1722 */
1723 if ((stat & DRQ_STAT) == 0) { 1723 if ((stat & DRQ_STAT) == 0) {
1724 spin_lock_irqsave(&ide_lock, flags); 1724 spin_lock_irqsave(&ide_lock, flags);
1725 if (__blk_end_request(rq, 0, 0)) 1725 if (__blk_end_request(rq, 0, rq->data_len))
1726 BUG(); 1726 BUG();
1727 HWGROUP(drive)->rq = NULL; 1727 HWGROUP(drive)->rq = NULL;
1728 spin_unlock_irqrestore(&ide_lock, flags); 1728 spin_unlock_irqrestore(&ide_lock, flags);
diff --git a/fs/splice.c b/fs/splice.c
index 1577a7391d2..4ee49e86edd 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1033,9 +1033,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1033 1033
1034done: 1034done:
1035 pipe->nrbufs = pipe->curbuf = 0; 1035 pipe->nrbufs = pipe->curbuf = 0;
1036 if (bytes > 0) 1036 file_accessed(in);
1037 file_accessed(in);
1038
1039 return bytes; 1037 return bytes;
1040 1038
1041out_release: 1039out_release:
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e18d4192f6e..90392a9d7a9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -39,7 +39,6 @@ void exit_io_context(void);
39struct io_context *get_io_context(gfp_t gfp_flags, int node); 39struct io_context *get_io_context(gfp_t gfp_flags, int node);
40struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 40struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
41void copy_io_context(struct io_context **pdst, struct io_context **psrc); 41void copy_io_context(struct io_context **pdst, struct io_context **psrc);
42void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
43 42
44struct request; 43struct request;
45typedef void (rq_end_io_fn)(struct request *, int); 44typedef void (rq_end_io_fn)(struct request *, int);
@@ -655,15 +654,18 @@ static inline void blk_run_address_space(struct address_space *mapping)
655 * blk_end_request() for parts of the original function. 654 * blk_end_request() for parts of the original function.
656 * This prevents code duplication in drivers. 655 * This prevents code duplication in drivers.
657 */ 656 */
658extern int blk_end_request(struct request *rq, int error, int nr_bytes); 657extern int blk_end_request(struct request *rq, int error,
659extern int __blk_end_request(struct request *rq, int error, int nr_bytes); 658 unsigned int nr_bytes);
660extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, 659extern int __blk_end_request(struct request *rq, int error,
661 int bidi_bytes); 660 unsigned int nr_bytes);
661extern int blk_end_bidi_request(struct request *rq, int error,
662 unsigned int nr_bytes, unsigned int bidi_bytes);
662extern void end_request(struct request *, int); 663extern void end_request(struct request *, int);
663extern void end_queued_request(struct request *, int); 664extern void end_queued_request(struct request *, int);
664extern void end_dequeued_request(struct request *, int); 665extern void end_dequeued_request(struct request *, int);
665extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes, 666extern int blk_end_request_callback(struct request *rq, int error,
666 int (drv_callback)(struct request *)); 667 unsigned int nr_bytes,
668 int (drv_callback)(struct request *));
667extern void blk_complete_request(struct request *); 669extern void blk_complete_request(struct request *);
668 670
669/* 671/*