diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-01-31 07:03:55 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-02-01 03:26:33 -0500 |
commit | 6728cb0e6343d4068ccec13f07212e6382d3ff33 (patch) | |
tree | 5a7826dc91cf2d9cf54e8c24b40aa3c4d892c797 /block/blk-core.c | |
parent | 22b132102f1540dd40f3e41df88796829b685f1a (diff) |
block: make core bits checkpatch compliant
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 153 |
1 files changed, 72 insertions, 81 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 55cf293d907d..4afb39c82339 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics | 3 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics |
4 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | 4 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE |
5 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | 5 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> |
6 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 | 6 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> |
7 | * - July2000 | ||
7 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 | 8 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 |
8 | */ | 9 | */ |
9 | 10 | ||
@@ -42,7 +43,7 @@ struct kmem_cache *request_cachep; | |||
42 | /* | 43 | /* |
43 | * For queue allocation | 44 | * For queue allocation |
44 | */ | 45 | */ |
45 | struct kmem_cache *blk_requestq_cachep = NULL; | 46 | struct kmem_cache *blk_requestq_cachep; |
46 | 47 | ||
47 | /* | 48 | /* |
48 | * Controlling structure to kblockd | 49 | * Controlling structure to kblockd |
@@ -137,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
137 | error = -EIO; | 138 | error = -EIO; |
138 | 139 | ||
139 | if (unlikely(nbytes > bio->bi_size)) { | 140 | if (unlikely(nbytes > bio->bi_size)) { |
140 | printk("%s: want %u bytes done, only %u left\n", | 141 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", |
141 | __FUNCTION__, nbytes, bio->bi_size); | 142 | __FUNCTION__, nbytes, bio->bi_size); |
142 | nbytes = bio->bi_size; | 143 | nbytes = bio->bi_size; |
143 | } | 144 | } |
@@ -161,23 +162,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
161 | { | 162 | { |
162 | int bit; | 163 | int bit; |
163 | 164 | ||
164 | printk("%s: dev %s: type=%x, flags=%x\n", msg, | 165 | printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, |
165 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, | 166 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, |
166 | rq->cmd_flags); | 167 | rq->cmd_flags); |
167 | 168 | ||
168 | printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, | 169 | printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", |
169 | rq->nr_sectors, | 170 | (unsigned long long)rq->sector, |
170 | rq->current_nr_sectors); | 171 | rq->nr_sectors, |
171 | printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); | 172 | rq->current_nr_sectors); |
173 | printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", | ||
174 | rq->bio, rq->biotail, | ||
175 | rq->buffer, rq->data, | ||
176 | rq->data_len); | ||
172 | 177 | ||
173 | if (blk_pc_request(rq)) { | 178 | if (blk_pc_request(rq)) { |
174 | printk("cdb: "); | 179 | printk(KERN_INFO " cdb: "); |
175 | for (bit = 0; bit < sizeof(rq->cmd); bit++) | 180 | for (bit = 0; bit < sizeof(rq->cmd); bit++) |
176 | printk("%02x ", rq->cmd[bit]); | 181 | printk("%02x ", rq->cmd[bit]); |
177 | printk("\n"); | 182 | printk("\n"); |
178 | } | 183 | } |
179 | } | 184 | } |
180 | |||
181 | EXPORT_SYMBOL(blk_dump_rq_flags); | 185 | EXPORT_SYMBOL(blk_dump_rq_flags); |
182 | 186 | ||
183 | /* | 187 | /* |
@@ -204,7 +208,6 @@ void blk_plug_device(struct request_queue *q) | |||
204 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | 208 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); |
205 | } | 209 | } |
206 | } | 210 | } |
207 | |||
208 | EXPORT_SYMBOL(blk_plug_device); | 211 | EXPORT_SYMBOL(blk_plug_device); |
209 | 212 | ||
210 | /* | 213 | /* |
@@ -221,7 +224,6 @@ int blk_remove_plug(struct request_queue *q) | |||
221 | del_timer(&q->unplug_timer); | 224 | del_timer(&q->unplug_timer); |
222 | return 1; | 225 | return 1; |
223 | } | 226 | } |
224 | |||
225 | EXPORT_SYMBOL(blk_remove_plug); | 227 | EXPORT_SYMBOL(blk_remove_plug); |
226 | 228 | ||
227 | /* | 229 | /* |
@@ -328,7 +330,6 @@ void blk_start_queue(struct request_queue *q) | |||
328 | kblockd_schedule_work(&q->unplug_work); | 330 | kblockd_schedule_work(&q->unplug_work); |
329 | } | 331 | } |
330 | } | 332 | } |
331 | |||
332 | EXPORT_SYMBOL(blk_start_queue); | 333 | EXPORT_SYMBOL(blk_start_queue); |
333 | 334 | ||
334 | /** | 335 | /** |
@@ -408,7 +409,7 @@ void blk_put_queue(struct request_queue *q) | |||
408 | } | 409 | } |
409 | EXPORT_SYMBOL(blk_put_queue); | 410 | EXPORT_SYMBOL(blk_put_queue); |
410 | 411 | ||
411 | void blk_cleanup_queue(struct request_queue * q) | 412 | void blk_cleanup_queue(struct request_queue *q) |
412 | { | 413 | { |
413 | mutex_lock(&q->sysfs_lock); | 414 | mutex_lock(&q->sysfs_lock); |
414 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | 415 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); |
@@ -419,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q) | |||
419 | 420 | ||
420 | blk_put_queue(q); | 421 | blk_put_queue(q); |
421 | } | 422 | } |
422 | |||
423 | EXPORT_SYMBOL(blk_cleanup_queue); | 423 | EXPORT_SYMBOL(blk_cleanup_queue); |
424 | 424 | ||
425 | static int blk_init_free_list(struct request_queue *q) | 425 | static int blk_init_free_list(struct request_queue *q) |
@@ -575,7 +575,6 @@ int blk_get_queue(struct request_queue *q) | |||
575 | 575 | ||
576 | return 1; | 576 | return 1; |
577 | } | 577 | } |
578 | |||
579 | EXPORT_SYMBOL(blk_get_queue); | 578 | EXPORT_SYMBOL(blk_get_queue); |
580 | 579 | ||
581 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | 580 | static inline void blk_free_request(struct request_queue *q, struct request *rq) |
@@ -774,7 +773,7 @@ rq_starved: | |||
774 | */ | 773 | */ |
775 | if (ioc_batching(q, ioc)) | 774 | if (ioc_batching(q, ioc)) |
776 | ioc->nr_batch_requests--; | 775 | ioc->nr_batch_requests--; |
777 | 776 | ||
778 | rq_init(q, rq); | 777 | rq_init(q, rq); |
779 | 778 | ||
780 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | 779 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); |
@@ -888,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) | |||
888 | 887 | ||
889 | elv_requeue_request(q, rq); | 888 | elv_requeue_request(q, rq); |
890 | } | 889 | } |
891 | |||
892 | EXPORT_SYMBOL(blk_requeue_request); | 890 | EXPORT_SYMBOL(blk_requeue_request); |
893 | 891 | ||
894 | /** | 892 | /** |
@@ -939,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
939 | blk_start_queueing(q); | 937 | blk_start_queueing(q); |
940 | spin_unlock_irqrestore(q->queue_lock, flags); | 938 | spin_unlock_irqrestore(q->queue_lock, flags); |
941 | } | 939 | } |
942 | |||
943 | EXPORT_SYMBOL(blk_insert_request); | 940 | EXPORT_SYMBOL(blk_insert_request); |
944 | 941 | ||
945 | /* | 942 | /* |
@@ -947,7 +944,7 @@ EXPORT_SYMBOL(blk_insert_request); | |||
947 | * queue lock is held and interrupts disabled, as we muck with the | 944 | * queue lock is held and interrupts disabled, as we muck with the |
948 | * request queue list. | 945 | * request queue list. |
949 | */ | 946 | */ |
950 | static inline void add_request(struct request_queue * q, struct request * req) | 947 | static inline void add_request(struct request_queue *q, struct request *req) |
951 | { | 948 | { |
952 | drive_stat_acct(req, 1); | 949 | drive_stat_acct(req, 1); |
953 | 950 | ||
@@ -957,7 +954,7 @@ static inline void add_request(struct request_queue * q, struct request * req) | |||
957 | */ | 954 | */ |
958 | __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); | 955 | __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); |
959 | } | 956 | } |
960 | 957 | ||
961 | /* | 958 | /* |
962 | * disk_round_stats() - Round off the performance stats on a struct | 959 | * disk_round_stats() - Round off the performance stats on a struct |
963 | * disk_stats. | 960 | * disk_stats. |
@@ -987,7 +984,6 @@ void disk_round_stats(struct gendisk *disk) | |||
987 | } | 984 | } |
988 | disk->stamp = now; | 985 | disk->stamp = now; |
989 | } | 986 | } |
990 | |||
991 | EXPORT_SYMBOL_GPL(disk_round_stats); | 987 | EXPORT_SYMBOL_GPL(disk_round_stats); |
992 | 988 | ||
993 | /* | 989 | /* |
@@ -1017,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req) | |||
1017 | freed_request(q, rw, priv); | 1013 | freed_request(q, rw, priv); |
1018 | } | 1014 | } |
1019 | } | 1015 | } |
1020 | |||
1021 | EXPORT_SYMBOL_GPL(__blk_put_request); | 1016 | EXPORT_SYMBOL_GPL(__blk_put_request); |
1022 | 1017 | ||
1023 | void blk_put_request(struct request *req) | 1018 | void blk_put_request(struct request *req) |
@@ -1035,7 +1030,6 @@ void blk_put_request(struct request *req) | |||
1035 | spin_unlock_irqrestore(q->queue_lock, flags); | 1030 | spin_unlock_irqrestore(q->queue_lock, flags); |
1036 | } | 1031 | } |
1037 | } | 1032 | } |
1038 | |||
1039 | EXPORT_SYMBOL(blk_put_request); | 1033 | EXPORT_SYMBOL(blk_put_request); |
1040 | 1034 | ||
1041 | void init_request_from_bio(struct request *req, struct bio *bio) | 1035 | void init_request_from_bio(struct request *req, struct bio *bio) |
@@ -1096,53 +1090,53 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1096 | 1090 | ||
1097 | el_ret = elv_merge(q, &req, bio); | 1091 | el_ret = elv_merge(q, &req, bio); |
1098 | switch (el_ret) { | 1092 | switch (el_ret) { |
1099 | case ELEVATOR_BACK_MERGE: | 1093 | case ELEVATOR_BACK_MERGE: |
1100 | BUG_ON(!rq_mergeable(req)); | 1094 | BUG_ON(!rq_mergeable(req)); |
1101 | 1095 | ||
1102 | if (!ll_back_merge_fn(q, req, bio)) | 1096 | if (!ll_back_merge_fn(q, req, bio)) |
1103 | break; | 1097 | break; |
1104 | 1098 | ||
1105 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | 1099 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); |
1106 | 1100 | ||
1107 | req->biotail->bi_next = bio; | 1101 | req->biotail->bi_next = bio; |
1108 | req->biotail = bio; | 1102 | req->biotail = bio; |
1109 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 1103 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
1110 | req->ioprio = ioprio_best(req->ioprio, prio); | 1104 | req->ioprio = ioprio_best(req->ioprio, prio); |
1111 | drive_stat_acct(req, 0); | 1105 | drive_stat_acct(req, 0); |
1112 | if (!attempt_back_merge(q, req)) | 1106 | if (!attempt_back_merge(q, req)) |
1113 | elv_merged_request(q, req, el_ret); | 1107 | elv_merged_request(q, req, el_ret); |
1114 | goto out; | 1108 | goto out; |
1115 | 1109 | ||
1116 | case ELEVATOR_FRONT_MERGE: | 1110 | case ELEVATOR_FRONT_MERGE: |
1117 | BUG_ON(!rq_mergeable(req)); | 1111 | BUG_ON(!rq_mergeable(req)); |
1118 | 1112 | ||
1119 | if (!ll_front_merge_fn(q, req, bio)) | 1113 | if (!ll_front_merge_fn(q, req, bio)) |
1120 | break; | 1114 | break; |
1121 | 1115 | ||
1122 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | 1116 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); |
1123 | 1117 | ||
1124 | bio->bi_next = req->bio; | 1118 | bio->bi_next = req->bio; |
1125 | req->bio = bio; | 1119 | req->bio = bio; |
1126 | 1120 | ||
1127 | /* | 1121 | /* |
1128 | * may not be valid. if the low level driver said | 1122 | * may not be valid. if the low level driver said |
1129 | * it didn't need a bounce buffer then it better | 1123 | * it didn't need a bounce buffer then it better |
1130 | * not touch req->buffer either... | 1124 | * not touch req->buffer either... |
1131 | */ | 1125 | */ |
1132 | req->buffer = bio_data(bio); | 1126 | req->buffer = bio_data(bio); |
1133 | req->current_nr_sectors = bio_cur_sectors(bio); | 1127 | req->current_nr_sectors = bio_cur_sectors(bio); |
1134 | req->hard_cur_sectors = req->current_nr_sectors; | 1128 | req->hard_cur_sectors = req->current_nr_sectors; |
1135 | req->sector = req->hard_sector = bio->bi_sector; | 1129 | req->sector = req->hard_sector = bio->bi_sector; |
1136 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 1130 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
1137 | req->ioprio = ioprio_best(req->ioprio, prio); | 1131 | req->ioprio = ioprio_best(req->ioprio, prio); |
1138 | drive_stat_acct(req, 0); | 1132 | drive_stat_acct(req, 0); |
1139 | if (!attempt_front_merge(q, req)) | 1133 | if (!attempt_front_merge(q, req)) |
1140 | elv_merged_request(q, req, el_ret); | 1134 | elv_merged_request(q, req, el_ret); |
1141 | goto out; | 1135 | goto out; |
1142 | 1136 | ||
1143 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ | 1137 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ |
1144 | default: | 1138 | default: |
1145 | ; | 1139 | ; |
1146 | } | 1140 | } |
1147 | 1141 | ||
1148 | get_rq: | 1142 | get_rq: |
@@ -1350,7 +1344,7 @@ end_io: | |||
1350 | } | 1344 | } |
1351 | 1345 | ||
1352 | if (unlikely(nr_sectors > q->max_hw_sectors)) { | 1346 | if (unlikely(nr_sectors > q->max_hw_sectors)) { |
1353 | printk("bio too big device %s (%u > %u)\n", | 1347 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", |
1354 | bdevname(bio->bi_bdev, b), | 1348 | bdevname(bio->bi_bdev, b), |
1355 | bio_sectors(bio), | 1349 | bio_sectors(bio), |
1356 | q->max_hw_sectors); | 1350 | q->max_hw_sectors); |
@@ -1439,7 +1433,6 @@ void generic_make_request(struct bio *bio) | |||
1439 | } while (bio); | 1433 | } while (bio); |
1440 | current->bio_tail = NULL; /* deactivate */ | 1434 | current->bio_tail = NULL; /* deactivate */ |
1441 | } | 1435 | } |
1442 | |||
1443 | EXPORT_SYMBOL(generic_make_request); | 1436 | EXPORT_SYMBOL(generic_make_request); |
1444 | 1437 | ||
1445 | /** | 1438 | /** |
@@ -1480,13 +1473,12 @@ void submit_bio(int rw, struct bio *bio) | |||
1480 | current->comm, task_pid_nr(current), | 1473 | current->comm, task_pid_nr(current), |
1481 | (rw & WRITE) ? "WRITE" : "READ", | 1474 | (rw & WRITE) ? "WRITE" : "READ", |
1482 | (unsigned long long)bio->bi_sector, | 1475 | (unsigned long long)bio->bi_sector, |
1483 | bdevname(bio->bi_bdev,b)); | 1476 | bdevname(bio->bi_bdev, b)); |
1484 | } | 1477 | } |
1485 | } | 1478 | } |
1486 | 1479 | ||
1487 | generic_make_request(bio); | 1480 | generic_make_request(bio); |
1488 | } | 1481 | } |
1489 | |||
1490 | EXPORT_SYMBOL(submit_bio); | 1482 | EXPORT_SYMBOL(submit_bio); |
1491 | 1483 | ||
1492 | /** | 1484 | /** |
@@ -1518,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error, | |||
1518 | if (!blk_pc_request(req)) | 1510 | if (!blk_pc_request(req)) |
1519 | req->errors = 0; | 1511 | req->errors = 0; |
1520 | 1512 | ||
1521 | if (error) { | 1513 | if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { |
1522 | if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) | 1514 | printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", |
1523 | printk("end_request: I/O error, dev %s, sector %llu\n", | ||
1524 | req->rq_disk ? req->rq_disk->disk_name : "?", | 1515 | req->rq_disk ? req->rq_disk->disk_name : "?", |
1525 | (unsigned long long)req->sector); | 1516 | (unsigned long long)req->sector); |
1526 | } | 1517 | } |
@@ -1554,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error, | |||
1554 | 1545 | ||
1555 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | 1546 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { |
1556 | blk_dump_rq_flags(req, "__end_that"); | 1547 | blk_dump_rq_flags(req, "__end_that"); |
1557 | printk("%s: bio idx %d >= vcnt %d\n", | 1548 | printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", |
1558 | __FUNCTION__, | 1549 | __FUNCTION__, bio->bi_idx, |
1559 | bio->bi_idx, bio->bi_vcnt); | 1550 | bio->bi_vcnt); |
1560 | break; | 1551 | break; |
1561 | } | 1552 | } |
1562 | 1553 | ||
@@ -1582,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error, | |||
1582 | total_bytes += nbytes; | 1573 | total_bytes += nbytes; |
1583 | nr_bytes -= nbytes; | 1574 | nr_bytes -= nbytes; |
1584 | 1575 | ||
1585 | if ((bio = req->bio)) { | 1576 | bio = req->bio; |
1577 | if (bio) { | ||
1586 | /* | 1578 | /* |
1587 | * end more in this run, or just return 'not-done' | 1579 | * end more in this run, or just return 'not-done' |
1588 | */ | 1580 | */ |
@@ -1626,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h) | |||
1626 | local_irq_enable(); | 1618 | local_irq_enable(); |
1627 | 1619 | ||
1628 | while (!list_empty(&local_list)) { | 1620 | while (!list_empty(&local_list)) { |
1629 | struct request *rq = list_entry(local_list.next, struct request, donelist); | 1621 | struct request *rq; |
1630 | 1622 | ||
1623 | rq = list_entry(local_list.next, struct request, donelist); | ||
1631 | list_del_init(&rq->donelist); | 1624 | list_del_init(&rq->donelist); |
1632 | rq->q->softirq_done_fn(rq); | 1625 | rq->q->softirq_done_fn(rq); |
1633 | } | 1626 | } |
1634 | } | 1627 | } |
1635 | 1628 | ||
1636 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, | 1629 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, |
1637 | void *hcpu) | 1630 | unsigned long action, void *hcpu) |
1638 | { | 1631 | { |
1639 | /* | 1632 | /* |
1640 | * If a CPU goes away, splice its entries to the current CPU | 1633 | * If a CPU goes away, splice its entries to the current CPU |
@@ -1676,7 +1669,7 @@ void blk_complete_request(struct request *req) | |||
1676 | unsigned long flags; | 1669 | unsigned long flags; |
1677 | 1670 | ||
1678 | BUG_ON(!req->q->softirq_done_fn); | 1671 | BUG_ON(!req->q->softirq_done_fn); |
1679 | 1672 | ||
1680 | local_irq_save(flags); | 1673 | local_irq_save(flags); |
1681 | 1674 | ||
1682 | cpu_list = &__get_cpu_var(blk_cpu_done); | 1675 | cpu_list = &__get_cpu_var(blk_cpu_done); |
@@ -1685,9 +1678,8 @@ void blk_complete_request(struct request *req) | |||
1685 | 1678 | ||
1686 | local_irq_restore(flags); | 1679 | local_irq_restore(flags); |
1687 | } | 1680 | } |
1688 | |||
1689 | EXPORT_SYMBOL(blk_complete_request); | 1681 | EXPORT_SYMBOL(blk_complete_request); |
1690 | 1682 | ||
1691 | /* | 1683 | /* |
1692 | * queue lock must be held | 1684 | * queue lock must be held |
1693 | */ | 1685 | */ |
@@ -2002,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work) | |||
2002 | { | 1994 | { |
2003 | return queue_work(kblockd_workqueue, work); | 1995 | return queue_work(kblockd_workqueue, work); |
2004 | } | 1996 | } |
2005 | |||
2006 | EXPORT_SYMBOL(kblockd_schedule_work); | 1997 | EXPORT_SYMBOL(kblockd_schedule_work); |
2007 | 1998 | ||
2008 | void kblockd_flush_work(struct work_struct *work) | 1999 | void kblockd_flush_work(struct work_struct *work) |