aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c5
-rw-r--r--block/blk-core.c153
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-map.c10
-rw-r--r--block/blk-merge.c12
-rw-r--r--block/blk-settings.c61
-rw-r--r--block/blk-sysfs.c5
-rw-r--r--block/blk-tag.c12
8 files changed, 117 insertions, 142 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 5f74fec327d5..6901eedeffce 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
26{ 26{
27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
28 prepare_flush_fn == NULL) { 28 prepare_flush_fn == NULL) {
29 printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); 29 printk(KERN_ERR "%s: prepare_flush_fn required\n",
30 __FUNCTION__);
30 return -EINVAL; 31 return -EINVAL;
31 } 32 }
32 33
@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
47 48
48 return 0; 49 return 0;
49} 50}
50
51EXPORT_SYMBOL(blk_queue_ordered); 51EXPORT_SYMBOL(blk_queue_ordered);
52 52
53/* 53/*
@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
315 bio_put(bio); 315 bio_put(bio);
316 return ret; 316 return ret;
317} 317}
318
319EXPORT_SYMBOL(blkdev_issue_flush); 318EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-core.c b/block/blk-core.c
index 55cf293d907d..4afb39c82339 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3,7 +3,8 @@
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
7 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
8 */ 9 */
9 10
@@ -42,7 +43,7 @@ struct kmem_cache *request_cachep;
42/* 43/*
43 * For queue allocation 44 * For queue allocation
44 */ 45 */
45struct kmem_cache *blk_requestq_cachep = NULL; 46struct kmem_cache *blk_requestq_cachep;
46 47
47/* 48/*
48 * Controlling structure to kblockd 49 * Controlling structure to kblockd
@@ -137,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
137 error = -EIO; 138 error = -EIO;
138 139
139 if (unlikely(nbytes > bio->bi_size)) { 140 if (unlikely(nbytes > bio->bi_size)) {
140 printk("%s: want %u bytes done, only %u left\n", 141 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
141 __FUNCTION__, nbytes, bio->bi_size); 142 __FUNCTION__, nbytes, bio->bi_size);
142 nbytes = bio->bi_size; 143 nbytes = bio->bi_size;
143 } 144 }
@@ -161,23 +162,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
161{ 162{
162 int bit; 163 int bit;
163 164
164 printk("%s: dev %s: type=%x, flags=%x\n", msg, 165 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
165 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 166 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
166 rq->cmd_flags); 167 rq->cmd_flags);
167 168
168 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, 169 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
169 rq->nr_sectors, 170 (unsigned long long)rq->sector,
170 rq->current_nr_sectors); 171 rq->nr_sectors,
171 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); 172 rq->current_nr_sectors);
173 printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
174 rq->bio, rq->biotail,
175 rq->buffer, rq->data,
176 rq->data_len);
172 177
173 if (blk_pc_request(rq)) { 178 if (blk_pc_request(rq)) {
174 printk("cdb: "); 179 printk(KERN_INFO " cdb: ");
175 for (bit = 0; bit < sizeof(rq->cmd); bit++) 180 for (bit = 0; bit < sizeof(rq->cmd); bit++)
176 printk("%02x ", rq->cmd[bit]); 181 printk("%02x ", rq->cmd[bit]);
177 printk("\n"); 182 printk("\n");
178 } 183 }
179} 184}
180
181EXPORT_SYMBOL(blk_dump_rq_flags); 185EXPORT_SYMBOL(blk_dump_rq_flags);
182 186
183/* 187/*
@@ -204,7 +208,6 @@ void blk_plug_device(struct request_queue *q)
204 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 208 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
205 } 209 }
206} 210}
207
208EXPORT_SYMBOL(blk_plug_device); 211EXPORT_SYMBOL(blk_plug_device);
209 212
210/* 213/*
@@ -221,7 +224,6 @@ int blk_remove_plug(struct request_queue *q)
221 del_timer(&q->unplug_timer); 224 del_timer(&q->unplug_timer);
222 return 1; 225 return 1;
223} 226}
224
225EXPORT_SYMBOL(blk_remove_plug); 227EXPORT_SYMBOL(blk_remove_plug);
226 228
227/* 229/*
@@ -328,7 +330,6 @@ void blk_start_queue(struct request_queue *q)
328 kblockd_schedule_work(&q->unplug_work); 330 kblockd_schedule_work(&q->unplug_work);
329 } 331 }
330} 332}
331
332EXPORT_SYMBOL(blk_start_queue); 333EXPORT_SYMBOL(blk_start_queue);
333 334
334/** 335/**
@@ -408,7 +409,7 @@ void blk_put_queue(struct request_queue *q)
408} 409}
409EXPORT_SYMBOL(blk_put_queue); 410EXPORT_SYMBOL(blk_put_queue);
410 411
411void blk_cleanup_queue(struct request_queue * q) 412void blk_cleanup_queue(struct request_queue *q)
412{ 413{
413 mutex_lock(&q->sysfs_lock); 414 mutex_lock(&q->sysfs_lock);
414 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 415 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -419,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q)
419 420
420 blk_put_queue(q); 421 blk_put_queue(q);
421} 422}
422
423EXPORT_SYMBOL(blk_cleanup_queue); 423EXPORT_SYMBOL(blk_cleanup_queue);
424 424
425static int blk_init_free_list(struct request_queue *q) 425static int blk_init_free_list(struct request_queue *q)
@@ -575,7 +575,6 @@ int blk_get_queue(struct request_queue *q)
575 575
576 return 1; 576 return 1;
577} 577}
578
579EXPORT_SYMBOL(blk_get_queue); 578EXPORT_SYMBOL(blk_get_queue);
580 579
581static inline void blk_free_request(struct request_queue *q, struct request *rq) 580static inline void blk_free_request(struct request_queue *q, struct request *rq)
@@ -774,7 +773,7 @@ rq_starved:
774 */ 773 */
775 if (ioc_batching(q, ioc)) 774 if (ioc_batching(q, ioc))
776 ioc->nr_batch_requests--; 775 ioc->nr_batch_requests--;
777 776
778 rq_init(q, rq); 777 rq_init(q, rq);
779 778
780 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); 779 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
@@ -888,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
888 887
889 elv_requeue_request(q, rq); 888 elv_requeue_request(q, rq);
890} 889}
891
892EXPORT_SYMBOL(blk_requeue_request); 890EXPORT_SYMBOL(blk_requeue_request);
893 891
894/** 892/**
@@ -939,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
939 blk_start_queueing(q); 937 blk_start_queueing(q);
940 spin_unlock_irqrestore(q->queue_lock, flags); 938 spin_unlock_irqrestore(q->queue_lock, flags);
941} 939}
942
943EXPORT_SYMBOL(blk_insert_request); 940EXPORT_SYMBOL(blk_insert_request);
944 941
945/* 942/*
@@ -947,7 +944,7 @@ EXPORT_SYMBOL(blk_insert_request);
947 * queue lock is held and interrupts disabled, as we muck with the 944 * queue lock is held and interrupts disabled, as we muck with the
948 * request queue list. 945 * request queue list.
949 */ 946 */
950static inline void add_request(struct request_queue * q, struct request * req) 947static inline void add_request(struct request_queue *q, struct request *req)
951{ 948{
952 drive_stat_acct(req, 1); 949 drive_stat_acct(req, 1);
953 950
@@ -957,7 +954,7 @@ static inline void add_request(struct request_queue * q, struct request * req)
957 */ 954 */
958 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 955 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
959} 956}
960 957
961/* 958/*
962 * disk_round_stats() - Round off the performance stats on a struct 959 * disk_round_stats() - Round off the performance stats on a struct
963 * disk_stats. 960 * disk_stats.
@@ -987,7 +984,6 @@ void disk_round_stats(struct gendisk *disk)
987 } 984 }
988 disk->stamp = now; 985 disk->stamp = now;
989} 986}
990
991EXPORT_SYMBOL_GPL(disk_round_stats); 987EXPORT_SYMBOL_GPL(disk_round_stats);
992 988
993/* 989/*
@@ -1017,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1017 freed_request(q, rw, priv); 1013 freed_request(q, rw, priv);
1018 } 1014 }
1019} 1015}
1020
1021EXPORT_SYMBOL_GPL(__blk_put_request); 1016EXPORT_SYMBOL_GPL(__blk_put_request);
1022 1017
1023void blk_put_request(struct request *req) 1018void blk_put_request(struct request *req)
@@ -1035,7 +1030,6 @@ void blk_put_request(struct request *req)
1035 spin_unlock_irqrestore(q->queue_lock, flags); 1030 spin_unlock_irqrestore(q->queue_lock, flags);
1036 } 1031 }
1037} 1032}
1038
1039EXPORT_SYMBOL(blk_put_request); 1033EXPORT_SYMBOL(blk_put_request);
1040 1034
1041void init_request_from_bio(struct request *req, struct bio *bio) 1035void init_request_from_bio(struct request *req, struct bio *bio)
@@ -1096,53 +1090,53 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1096 1090
1097 el_ret = elv_merge(q, &req, bio); 1091 el_ret = elv_merge(q, &req, bio);
1098 switch (el_ret) { 1092 switch (el_ret) {
1099 case ELEVATOR_BACK_MERGE: 1093 case ELEVATOR_BACK_MERGE:
1100 BUG_ON(!rq_mergeable(req)); 1094 BUG_ON(!rq_mergeable(req));
1101 1095
1102 if (!ll_back_merge_fn(q, req, bio)) 1096 if (!ll_back_merge_fn(q, req, bio))
1103 break; 1097 break;
1104 1098
1105 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 1099 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1106 1100
1107 req->biotail->bi_next = bio; 1101 req->biotail->bi_next = bio;
1108 req->biotail = bio; 1102 req->biotail = bio;
1109 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1103 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1110 req->ioprio = ioprio_best(req->ioprio, prio); 1104 req->ioprio = ioprio_best(req->ioprio, prio);
1111 drive_stat_acct(req, 0); 1105 drive_stat_acct(req, 0);
1112 if (!attempt_back_merge(q, req)) 1106 if (!attempt_back_merge(q, req))
1113 elv_merged_request(q, req, el_ret); 1107 elv_merged_request(q, req, el_ret);
1114 goto out; 1108 goto out;
1115 1109
1116 case ELEVATOR_FRONT_MERGE: 1110 case ELEVATOR_FRONT_MERGE:
1117 BUG_ON(!rq_mergeable(req)); 1111 BUG_ON(!rq_mergeable(req));
1118 1112
1119 if (!ll_front_merge_fn(q, req, bio)) 1113 if (!ll_front_merge_fn(q, req, bio))
1120 break; 1114 break;
1121 1115
1122 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 1116 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1123 1117
1124 bio->bi_next = req->bio; 1118 bio->bi_next = req->bio;
1125 req->bio = bio; 1119 req->bio = bio;
1126 1120
1127 /* 1121 /*
1128 * may not be valid. if the low level driver said 1122 * may not be valid. if the low level driver said
1129 * it didn't need a bounce buffer then it better 1123 * it didn't need a bounce buffer then it better
1130 * not touch req->buffer either... 1124 * not touch req->buffer either...
1131 */ 1125 */
1132 req->buffer = bio_data(bio); 1126 req->buffer = bio_data(bio);
1133 req->current_nr_sectors = bio_cur_sectors(bio); 1127 req->current_nr_sectors = bio_cur_sectors(bio);
1134 req->hard_cur_sectors = req->current_nr_sectors; 1128 req->hard_cur_sectors = req->current_nr_sectors;
1135 req->sector = req->hard_sector = bio->bi_sector; 1129 req->sector = req->hard_sector = bio->bi_sector;
1136 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1130 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1137 req->ioprio = ioprio_best(req->ioprio, prio); 1131 req->ioprio = ioprio_best(req->ioprio, prio);
1138 drive_stat_acct(req, 0); 1132 drive_stat_acct(req, 0);
1139 if (!attempt_front_merge(q, req)) 1133 if (!attempt_front_merge(q, req))
1140 elv_merged_request(q, req, el_ret); 1134 elv_merged_request(q, req, el_ret);
1141 goto out; 1135 goto out;
1142 1136
1143 /* ELV_NO_MERGE: elevator says don't/can't merge. */ 1137 /* ELV_NO_MERGE: elevator says don't/can't merge. */
1144 default: 1138 default:
1145 ; 1139 ;
1146 } 1140 }
1147 1141
1148get_rq: 1142get_rq:
@@ -1350,7 +1344,7 @@ end_io:
1350 } 1344 }
1351 1345
1352 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1346 if (unlikely(nr_sectors > q->max_hw_sectors)) {
1353 printk("bio too big device %s (%u > %u)\n", 1347 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1354 bdevname(bio->bi_bdev, b), 1348 bdevname(bio->bi_bdev, b),
1355 bio_sectors(bio), 1349 bio_sectors(bio),
1356 q->max_hw_sectors); 1350 q->max_hw_sectors);
@@ -1439,7 +1433,6 @@ void generic_make_request(struct bio *bio)
1439 } while (bio); 1433 } while (bio);
1440 current->bio_tail = NULL; /* deactivate */ 1434 current->bio_tail = NULL; /* deactivate */
1441} 1435}
1442
1443EXPORT_SYMBOL(generic_make_request); 1436EXPORT_SYMBOL(generic_make_request);
1444 1437
1445/** 1438/**
@@ -1480,13 +1473,12 @@ void submit_bio(int rw, struct bio *bio)
1480 current->comm, task_pid_nr(current), 1473 current->comm, task_pid_nr(current),
1481 (rw & WRITE) ? "WRITE" : "READ", 1474 (rw & WRITE) ? "WRITE" : "READ",
1482 (unsigned long long)bio->bi_sector, 1475 (unsigned long long)bio->bi_sector,
1483 bdevname(bio->bi_bdev,b)); 1476 bdevname(bio->bi_bdev, b));
1484 } 1477 }
1485 } 1478 }
1486 1479
1487 generic_make_request(bio); 1480 generic_make_request(bio);
1488} 1481}
1489
1490EXPORT_SYMBOL(submit_bio); 1482EXPORT_SYMBOL(submit_bio);
1491 1483
1492/** 1484/**
@@ -1518,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error,
1518 if (!blk_pc_request(req)) 1510 if (!blk_pc_request(req))
1519 req->errors = 0; 1511 req->errors = 0;
1520 1512
1521 if (error) { 1513 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1522 if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) 1514 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1523 printk("end_request: I/O error, dev %s, sector %llu\n",
1524 req->rq_disk ? req->rq_disk->disk_name : "?", 1515 req->rq_disk ? req->rq_disk->disk_name : "?",
1525 (unsigned long long)req->sector); 1516 (unsigned long long)req->sector);
1526 } 1517 }
@@ -1554,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error,
1554 1545
1555 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 1546 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1556 blk_dump_rq_flags(req, "__end_that"); 1547 blk_dump_rq_flags(req, "__end_that");
1557 printk("%s: bio idx %d >= vcnt %d\n", 1548 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1558 __FUNCTION__, 1549 __FUNCTION__, bio->bi_idx,
1559 bio->bi_idx, bio->bi_vcnt); 1550 bio->bi_vcnt);
1560 break; 1551 break;
1561 } 1552 }
1562 1553
@@ -1582,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error,
1582 total_bytes += nbytes; 1573 total_bytes += nbytes;
1583 nr_bytes -= nbytes; 1574 nr_bytes -= nbytes;
1584 1575
1585 if ((bio = req->bio)) { 1576 bio = req->bio;
1577 if (bio) {
1586 /* 1578 /*
1587 * end more in this run, or just return 'not-done' 1579 * end more in this run, or just return 'not-done'
1588 */ 1580 */
@@ -1626,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h)
1626 local_irq_enable(); 1618 local_irq_enable();
1627 1619
1628 while (!list_empty(&local_list)) { 1620 while (!list_empty(&local_list)) {
1629 struct request *rq = list_entry(local_list.next, struct request, donelist); 1621 struct request *rq;
1630 1622
1623 rq = list_entry(local_list.next, struct request, donelist);
1631 list_del_init(&rq->donelist); 1624 list_del_init(&rq->donelist);
1632 rq->q->softirq_done_fn(rq); 1625 rq->q->softirq_done_fn(rq);
1633 } 1626 }
1634} 1627}
1635 1628
1636static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, 1629static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1637 void *hcpu) 1630 unsigned long action, void *hcpu)
1638{ 1631{
1639 /* 1632 /*
1640 * If a CPU goes away, splice its entries to the current CPU 1633 * If a CPU goes away, splice its entries to the current CPU
@@ -1676,7 +1669,7 @@ void blk_complete_request(struct request *req)
1676 unsigned long flags; 1669 unsigned long flags;
1677 1670
1678 BUG_ON(!req->q->softirq_done_fn); 1671 BUG_ON(!req->q->softirq_done_fn);
1679 1672
1680 local_irq_save(flags); 1673 local_irq_save(flags);
1681 1674
1682 cpu_list = &__get_cpu_var(blk_cpu_done); 1675 cpu_list = &__get_cpu_var(blk_cpu_done);
@@ -1685,9 +1678,8 @@ void blk_complete_request(struct request *req)
1685 1678
1686 local_irq_restore(flags); 1679 local_irq_restore(flags);
1687} 1680}
1688
1689EXPORT_SYMBOL(blk_complete_request); 1681EXPORT_SYMBOL(blk_complete_request);
1690 1682
1691/* 1683/*
1692 * queue lock must be held 1684 * queue lock must be held
1693 */ 1685 */
@@ -2002,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work)
2002{ 1994{
2003 return queue_work(kblockd_workqueue, work); 1995 return queue_work(kblockd_workqueue, work);
2004} 1996}
2005
2006EXPORT_SYMBOL(kblockd_schedule_work); 1997EXPORT_SYMBOL(kblockd_schedule_work);
2007 1998
2008void kblockd_flush_work(struct work_struct *work) 1999void kblockd_flush_work(struct work_struct *work)
diff --git a/block/blk-exec.c b/block/blk-exec.c
index ebfb44e959a9..391dd6224890 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
101 101
102 return err; 102 return err;
103} 103}
104
105EXPORT_SYMBOL(blk_execute_rq); 104EXPORT_SYMBOL(blk_execute_rq);
diff --git a/block/blk-map.c b/block/blk-map.c
index 916cfc96ffa0..955d75c1a58f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
53 * direct dma. else, set up kernel bounce buffers 53 * direct dma. else, set up kernel bounce buffers
54 */ 54 */
55 uaddr = (unsigned long) ubuf; 55 uaddr = (unsigned long) ubuf;
56 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 56 if (!(uaddr & queue_dma_alignment(q)) &&
57 !(len & queue_dma_alignment(q)))
57 bio = bio_map_user(q, NULL, uaddr, len, reading); 58 bio = bio_map_user(q, NULL, uaddr, len, reading);
58 else 59 else
59 bio = bio_copy_user(q, uaddr, len, reading); 60 bio = bio_copy_user(q, uaddr, len, reading);
@@ -144,7 +145,6 @@ unmap_rq:
144 blk_rq_unmap_user(bio); 145 blk_rq_unmap_user(bio);
145 return ret; 146 return ret;
146} 147}
147
148EXPORT_SYMBOL(blk_rq_map_user); 148EXPORT_SYMBOL(blk_rq_map_user);
149 149
150/** 150/**
@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
179 /* we don't allow misaligned data like bio_map_user() does. If the 179 /* we don't allow misaligned data like bio_map_user() does. If the
180 * user is using sg, they're expected to know the alignment constraints 180 * user is using sg, they're expected to know the alignment constraints
181 * and respect them accordingly */ 181 * and respect them accordingly */
182 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); 182 bio = bio_map_user_iov(q, NULL, iov, iov_count,
183 rq_data_dir(rq) == READ);
183 if (IS_ERR(bio)) 184 if (IS_ERR(bio))
184 return PTR_ERR(bio); 185 return PTR_ERR(bio);
185 186
@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
194 rq->buffer = rq->data = NULL; 195 rq->buffer = rq->data = NULL;
195 return 0; 196 return 0;
196} 197}
197
198EXPORT_SYMBOL(blk_rq_map_user_iov); 198EXPORT_SYMBOL(blk_rq_map_user_iov);
199 199
200/** 200/**
@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio)
227 227
228 return ret; 228 return ret;
229} 229}
230
231EXPORT_SYMBOL(blk_rq_unmap_user); 230EXPORT_SYMBOL(blk_rq_unmap_user);
232 231
233/** 232/**
@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
260 rq->buffer = rq->data = NULL; 259 rq->buffer = rq->data = NULL;
261 return 0; 260 return 0;
262} 261}
263
264EXPORT_SYMBOL(blk_rq_map_kern); 262EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5023f0b08073..845ef8131108 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
32 * size, something has gone terribly wrong 32 * size, something has gone terribly wrong
33 */ 33 */
34 if (rq->nr_sectors < rq->current_nr_sectors) { 34 if (rq->nr_sectors < rq->current_nr_sectors) {
35 printk("blk: request botched\n"); 35 printk(KERN_ERR "blk: request botched\n");
36 rq->nr_sectors = rq->current_nr_sectors; 36 rq->nr_sectors = rq->current_nr_sectors;
37 } 37 }
38 } 38 }
@@ -235,7 +235,6 @@ new_segment:
235 235
236 return nsegs; 236 return nsegs;
237} 237}
238
239EXPORT_SYMBOL(blk_rq_map_sg); 238EXPORT_SYMBOL(blk_rq_map_sg);
240 239
241static inline int ll_new_mergeable(struct request_queue *q, 240static inline int ll_new_mergeable(struct request_queue *q,
@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
305 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 304 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
306 blk_recount_segments(q, bio); 305 blk_recount_segments(q, bio);
307 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; 306 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
308 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && 307 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
309 !BIOVEC_VIRT_OVERSIZE(len)) { 308 && !BIOVEC_VIRT_OVERSIZE(len)) {
310 int mergeable = ll_new_mergeable(q, req, bio); 309 int mergeable = ll_new_mergeable(q, req, bio);
311 310
312 if (mergeable) { 311 if (mergeable) {
@@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
321 return ll_new_hw_segment(q, req, bio); 320 return ll_new_hw_segment(q, req, bio);
322} 321}
323 322
324int ll_front_merge_fn(struct request_queue *q, struct request *req, 323int ll_front_merge_fn(struct request_queue *q, struct request *req,
325 struct bio *bio) 324 struct bio *bio)
326{ 325{
327 unsigned short max_sectors; 326 unsigned short max_sectors;
@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
388 387
389 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 388 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
390 if (blk_hw_contig_segment(q, req->biotail, next->bio)) { 389 if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
391 int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; 390 int len = req->biotail->bi_hw_back_size +
391 next->bio->bi_hw_front_size;
392 /* 392 /*
393 * propagate the combined length to the end of the requests 393 * propagate the combined length to the end of the requests
394 */ 394 */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 4df09a1b8f43..c8d0c5724098 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -10,8 +10,10 @@
10 10
11#include "blk.h" 11#include "blk.h"
12 12
13unsigned long blk_max_low_pfn, blk_max_pfn; 13unsigned long blk_max_low_pfn;
14EXPORT_SYMBOL(blk_max_low_pfn); 14EXPORT_SYMBOL(blk_max_low_pfn);
15
16unsigned long blk_max_pfn;
15EXPORT_SYMBOL(blk_max_pfn); 17EXPORT_SYMBOL(blk_max_pfn);
16 18
17/** 19/**
@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
29{ 31{
30 q->prep_rq_fn = pfn; 32 q->prep_rq_fn = pfn;
31} 33}
32
33EXPORT_SYMBOL(blk_queue_prep_rq); 34EXPORT_SYMBOL(blk_queue_prep_rq);
34 35
35/** 36/**
@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
52{ 53{
53 q->merge_bvec_fn = mbfn; 54 q->merge_bvec_fn = mbfn;
54} 55}
55
56EXPORT_SYMBOL(blk_queue_merge_bvec); 56EXPORT_SYMBOL(blk_queue_merge_bvec);
57 57
58void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) 58void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
59{ 59{
60 q->softirq_done_fn = fn; 60 q->softirq_done_fn = fn;
61} 61}
62
63EXPORT_SYMBOL(blk_queue_softirq_done); 62EXPORT_SYMBOL(blk_queue_softirq_done);
64 63
65/** 64/**
@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
84 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 83 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
85 * blk_queue_bounce() to create a buffer in normal memory. 84 * blk_queue_bounce() to create a buffer in normal memory.
86 **/ 85 **/
87void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) 86void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
88{ 87{
89 /* 88 /*
90 * set defaults 89 * set defaults
@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
93 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 92 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
94 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 93 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
95 q->make_request_fn = mfn; 94 q->make_request_fn = mfn;
96 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 95 q->backing_dev_info.ra_pages =
96 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
97 q->backing_dev_info.state = 0; 97 q->backing_dev_info.state = 0;
98 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 98 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
99 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 99 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
117 */ 117 */
118 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 118 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
119} 119}
120
121EXPORT_SYMBOL(blk_queue_make_request); 120EXPORT_SYMBOL(blk_queue_make_request);
122 121
123/** 122/**
@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
133 **/ 132 **/
134void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 133void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
135{ 134{
136 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 135 unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
137 int dma = 0; 136 int dma = 0;
138 137
139 q->bounce_gfp = GFP_NOIO; 138 q->bounce_gfp = GFP_NOIO;
@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
141 /* Assume anything <= 4GB can be handled by IOMMU. 140 /* Assume anything <= 4GB can be handled by IOMMU.
142 Actually some IOMMUs can handle everything, but I don't 141 Actually some IOMMUs can handle everything, but I don't
143 know of a way to test this here. */ 142 know of a way to test this here. */
144 if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 143 if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
145 dma = 1; 144 dma = 1;
146 q->bounce_pfn = max_low_pfn; 145 q->bounce_pfn = max_low_pfn;
147#else 146#else
148 if (bounce_pfn < blk_max_low_pfn) 147 if (b_pfn < blk_max_low_pfn)
149 dma = 1; 148 dma = 1;
150 q->bounce_pfn = bounce_pfn; 149 q->bounce_pfn = b_pfn;
151#endif 150#endif
152 if (dma) { 151 if (dma) {
153 init_emergency_isa_pool(); 152 init_emergency_isa_pool();
154 q->bounce_gfp = GFP_NOIO | GFP_DMA; 153 q->bounce_gfp = GFP_NOIO | GFP_DMA;
155 q->bounce_pfn = bounce_pfn; 154 q->bounce_pfn = b_pfn;
156 } 155 }
157} 156}
158
159EXPORT_SYMBOL(blk_queue_bounce_limit); 157EXPORT_SYMBOL(blk_queue_bounce_limit);
160 158
161/** 159/**
@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
171{ 169{
172 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 170 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
173 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 171 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
174 printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); 172 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
173 max_sectors);
175 } 174 }
176 175
177 if (BLK_DEF_MAX_SECTORS > max_sectors) 176 if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
181 q->max_hw_sectors = max_sectors; 180 q->max_hw_sectors = max_sectors;
182 } 181 }
183} 182}
184
185EXPORT_SYMBOL(blk_queue_max_sectors); 183EXPORT_SYMBOL(blk_queue_max_sectors);
186 184
187/** 185/**
@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q,
199{ 197{
200 if (!max_segments) { 198 if (!max_segments) {
201 max_segments = 1; 199 max_segments = 1;
202 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); 200 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
201 max_segments);
203 } 202 }
204 203
205 q->max_phys_segments = max_segments; 204 q->max_phys_segments = max_segments;
206} 205}
207
208EXPORT_SYMBOL(blk_queue_max_phys_segments); 206EXPORT_SYMBOL(blk_queue_max_phys_segments);
209 207
210/** 208/**
@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q,
223{ 221{
224 if (!max_segments) { 222 if (!max_segments) {
225 max_segments = 1; 223 max_segments = 1;
226 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); 224 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
225 max_segments);
227 } 226 }
228 227
229 q->max_hw_segments = max_segments; 228 q->max_hw_segments = max_segments;
230} 229}
231
232EXPORT_SYMBOL(blk_queue_max_hw_segments); 230EXPORT_SYMBOL(blk_queue_max_hw_segments);
233 231
234/** 232/**
@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
244{ 242{
245 if (max_size < PAGE_CACHE_SIZE) { 243 if (max_size < PAGE_CACHE_SIZE) {
246 max_size = PAGE_CACHE_SIZE; 244 max_size = PAGE_CACHE_SIZE;
247 printk("%s: set to minimum %d\n", __FUNCTION__, max_size); 245 printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
246 max_size);
248 } 247 }
249 248
250 q->max_segment_size = max_size; 249 q->max_segment_size = max_size;
251} 250}
252
253EXPORT_SYMBOL(blk_queue_max_segment_size); 251EXPORT_SYMBOL(blk_queue_max_segment_size);
254 252
255/** 253/**
@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
267{ 265{
268 q->hardsect_size = size; 266 q->hardsect_size = size;
269} 267}
270
271EXPORT_SYMBOL(blk_queue_hardsect_size); 268EXPORT_SYMBOL(blk_queue_hardsect_size);
272 269
273/* 270/*
@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
283void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 280void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
284{ 281{
285 /* zero is "infinity" */ 282 /* zero is "infinity" */
286 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); 283 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
287 t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); 284 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
288 285
289 t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); 286 t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
290 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); 287 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
291 t->max_segment_size = min(t->max_segment_size,b->max_segment_size); 288 t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
292 t->hardsect_size = max(t->hardsect_size,b->hardsect_size); 289 t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
293 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 290 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
294 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); 291 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
295} 292}
296
297EXPORT_SYMBOL(blk_queue_stack_limits); 293EXPORT_SYMBOL(blk_queue_stack_limits);
298 294
299/** 295/**
@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf,
332 328
333 return 0; 329 return 0;
334} 330}
335
336EXPORT_SYMBOL_GPL(blk_queue_dma_drain); 331EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
337 332
338/** 333/**
@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
344{ 339{
345 if (mask < PAGE_CACHE_SIZE - 1) { 340 if (mask < PAGE_CACHE_SIZE - 1) {
346 mask = PAGE_CACHE_SIZE - 1; 341 mask = PAGE_CACHE_SIZE - 1;
347 printk("%s: set to minimum %lx\n", __FUNCTION__, mask); 342 printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
343 mask);
348 } 344 }
349 345
350 q->seg_boundary_mask = mask; 346 q->seg_boundary_mask = mask;
351} 347}
352
353EXPORT_SYMBOL(blk_queue_segment_boundary); 348EXPORT_SYMBOL(blk_queue_segment_boundary);
354 349
355/** 350/**
@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
366{ 361{
367 q->dma_alignment = mask; 362 q->dma_alignment = mask;
368} 363}
369
370EXPORT_SYMBOL(blk_queue_dma_alignment); 364EXPORT_SYMBOL(blk_queue_dma_alignment);
371 365
372/** 366/**
@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
390 if (mask > q->dma_alignment) 384 if (mask > q->dma_alignment)
391 q->dma_alignment = mask; 385 q->dma_alignment = mask;
392} 386}
393
394EXPORT_SYMBOL(blk_queue_update_dma_alignment); 387EXPORT_SYMBOL(blk_queue_update_dma_alignment);
395 388
396int __init blk_settings_init(void) 389int __init blk_settings_init(void)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index bc28776ba76a..54d0db116153 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
207 const char *page, size_t length) 207 const char *page, size_t length)
208{ 208{
209 struct queue_sysfs_entry *entry = to_queue(attr); 209 struct queue_sysfs_entry *entry = to_queue(attr);
210 struct request_queue *q = container_of(kobj, struct request_queue, kobj); 210 struct request_queue *q;
211
212 ssize_t res; 211 ssize_t res;
213 212
214 if (!entry->store) 213 if (!entry->store)
215 return -EIO; 214 return -EIO;
215
216 q = container_of(kobj, struct request_queue, kobj);
216 mutex_lock(&q->sysfs_lock); 217 mutex_lock(&q->sysfs_lock);
217 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 218 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
218 mutex_unlock(&q->sysfs_lock); 219 mutex_unlock(&q->sysfs_lock);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index d1fd300e8aea..a8c37d4bbb32 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
21{ 21{
22 return blk_map_queue_find_tag(q->queue_tags, tag); 22 return blk_map_queue_find_tag(q->queue_tags, tag);
23} 23}
24
25EXPORT_SYMBOL(blk_queue_find_tag); 24EXPORT_SYMBOL(blk_queue_find_tag);
26 25
27/** 26/**
@@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q)
99{ 98{
100 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 99 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
101} 100}
102
103EXPORT_SYMBOL(blk_queue_free_tags); 101EXPORT_SYMBOL(blk_queue_free_tags);
104 102
105static int 103static int
@@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
185 if (!tags) 183 if (!tags)
186 goto fail; 184 goto fail;
187 } else if (q->queue_tags) { 185 } else if (q->queue_tags) {
188 if ((rc = blk_queue_resize_tags(q, depth))) 186 rc = blk_queue_resize_tags(q, depth);
187 if (rc)
189 return rc; 188 return rc;
190 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 189 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
191 return 0; 190 return 0;
@@ -203,7 +202,6 @@ fail:
203 kfree(tags); 202 kfree(tags);
204 return -ENOMEM; 203 return -ENOMEM;
205} 204}
206
207EXPORT_SYMBOL(blk_queue_init_tags); 205EXPORT_SYMBOL(blk_queue_init_tags);
208 206
209/** 207/**
@@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth)
260 kfree(tag_map); 258 kfree(tag_map);
261 return 0; 259 return 0;
262} 260}
263
264EXPORT_SYMBOL(blk_queue_resize_tags); 261EXPORT_SYMBOL(blk_queue_resize_tags);
265 262
266/** 263/**
@@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
313 clear_bit_unlock(tag, bqt->tag_map); 310 clear_bit_unlock(tag, bqt->tag_map);
314 bqt->busy--; 311 bqt->busy--;
315} 312}
316
317EXPORT_SYMBOL(blk_queue_end_tag); 313EXPORT_SYMBOL(blk_queue_end_tag);
318 314
319/** 315/**
@@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
340 int tag; 336 int tag;
341 337
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 338 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
343 printk(KERN_ERR 339 printk(KERN_ERR
344 "%s: request %p for device [%s] already tagged %d", 340 "%s: request %p for device [%s] already tagged %d",
345 __FUNCTION__, rq, 341 __FUNCTION__, rq,
346 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); 342 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
@@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
370 bqt->busy++; 366 bqt->busy++;
371 return 0; 367 return 0;
372} 368}
373
374EXPORT_SYMBOL(blk_queue_start_tag); 369EXPORT_SYMBOL(blk_queue_start_tag);
375 370
376/** 371/**
@@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q)
392 list_for_each_safe(tmp, n, &q->tag_busy_list) 387 list_for_each_safe(tmp, n, &q->tag_busy_list)
393 blk_requeue_request(q, list_entry_rq(tmp)); 388 blk_requeue_request(q, list_entry_rq(tmp));
394} 389}
395
396EXPORT_SYMBOL(blk_queue_invalidate_tags); 390EXPORT_SYMBOL(blk_queue_invalidate_tags);