diff options
author | David Woodhouse <dwmw2@infradead.org> | 2008-02-03 02:29:41 -0500 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2008-02-03 02:30:32 -0500 |
commit | c1f3ee120bb61045b1c0a3ead620d1d65af47130 (patch) | |
tree | 908430bf2b47fe8e96ac623ae7ab6dd5698d0938 /block | |
parent | e619a75ff6201b567a539e787aa9af9bc63a3187 (diff) | |
parent | 9135f1901ee6449dfe338adf6e40e9c2025b8150 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'block')
-rw-r--r-- | block/Makefile | 4 | ||||
-rw-r--r-- | block/as-iosched.c | 57 | ||||
-rw-r--r-- | block/blk-barrier.c | 318 | ||||
-rw-r--r-- | block/blk-core.c | 2027 | ||||
-rw-r--r-- | block/blk-exec.c | 104 | ||||
-rw-r--r-- | block/blk-ioc.c | 185 | ||||
-rw-r--r-- | block/blk-map.c | 262 | ||||
-rw-r--r-- | block/blk-merge.c | 485 | ||||
-rw-r--r-- | block/blk-settings.c | 395 | ||||
-rw-r--r-- | block/blk-sysfs.c | 310 | ||||
-rw-r--r-- | block/blk-tag.c | 390 | ||||
-rw-r--r-- | block/blk.h | 53 | ||||
-rw-r--r-- | block/blktrace.c | 114 | ||||
-rw-r--r-- | block/bsg.c | 15 | ||||
-rw-r--r-- | block/cfq-iosched.c | 519 | ||||
-rw-r--r-- | block/compat_ioctl.c | 7 | ||||
-rw-r--r-- | block/deadline-iosched.c | 57 | ||||
-rw-r--r-- | block/elevator.c | 94 | ||||
-rw-r--r-- | block/genhd.c | 424 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 4242 | ||||
-rw-r--r-- | block/noop-iosched.c | 4 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 4 |
22 files changed, 5151 insertions, 4919 deletions
diff --git a/block/Makefile b/block/Makefile index 826108190f00..5a43c7d79594 100644 --- a/block/Makefile +++ b/block/Makefile | |||
@@ -2,7 +2,9 @@ | |||
2 | # Makefile for the kernel block layer | 2 | # Makefile for the kernel block layer |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_BLOCK) := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o | 5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ |
6 | blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ | ||
7 | blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o | ||
6 | 8 | ||
7 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o | 9 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o |
8 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o | 10 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o |
diff --git a/block/as-iosched.c b/block/as-iosched.c index dc715a562e14..8c3946787dbb 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -170,9 +170,11 @@ static void free_as_io_context(struct as_io_context *aic) | |||
170 | 170 | ||
171 | static void as_trim(struct io_context *ioc) | 171 | static void as_trim(struct io_context *ioc) |
172 | { | 172 | { |
173 | spin_lock_irq(&ioc->lock); | ||
173 | if (ioc->aic) | 174 | if (ioc->aic) |
174 | free_as_io_context(ioc->aic); | 175 | free_as_io_context(ioc->aic); |
175 | ioc->aic = NULL; | 176 | ioc->aic = NULL; |
177 | spin_unlock_irq(&ioc->lock); | ||
176 | } | 178 | } |
177 | 179 | ||
178 | /* Called when the task exits */ | 180 | /* Called when the task exits */ |
@@ -233,10 +235,12 @@ static void as_put_io_context(struct request *rq) | |||
233 | aic = RQ_IOC(rq)->aic; | 235 | aic = RQ_IOC(rq)->aic; |
234 | 236 | ||
235 | if (rq_is_sync(rq) && aic) { | 237 | if (rq_is_sync(rq) && aic) { |
236 | spin_lock(&aic->lock); | 238 | unsigned long flags; |
239 | |||
240 | spin_lock_irqsave(&aic->lock, flags); | ||
237 | set_bit(AS_TASK_IORUNNING, &aic->state); | 241 | set_bit(AS_TASK_IORUNNING, &aic->state); |
238 | aic->last_end_request = jiffies; | 242 | aic->last_end_request = jiffies; |
239 | spin_unlock(&aic->lock); | 243 | spin_unlock_irqrestore(&aic->lock, flags); |
240 | } | 244 | } |
241 | 245 | ||
242 | put_io_context(RQ_IOC(rq)); | 246 | put_io_context(RQ_IOC(rq)); |
@@ -462,7 +466,9 @@ static void as_antic_timeout(unsigned long data) | |||
462 | spin_lock_irqsave(q->queue_lock, flags); | 466 | spin_lock_irqsave(q->queue_lock, flags); |
463 | if (ad->antic_status == ANTIC_WAIT_REQ | 467 | if (ad->antic_status == ANTIC_WAIT_REQ |
464 | || ad->antic_status == ANTIC_WAIT_NEXT) { | 468 | || ad->antic_status == ANTIC_WAIT_NEXT) { |
465 | struct as_io_context *aic = ad->io_context->aic; | 469 | struct as_io_context *aic; |
470 | spin_lock(&ad->io_context->lock); | ||
471 | aic = ad->io_context->aic; | ||
466 | 472 | ||
467 | ad->antic_status = ANTIC_FINISHED; | 473 | ad->antic_status = ANTIC_FINISHED; |
468 | kblockd_schedule_work(&ad->antic_work); | 474 | kblockd_schedule_work(&ad->antic_work); |
@@ -475,6 +481,7 @@ static void as_antic_timeout(unsigned long data) | |||
475 | /* process not "saved" by a cooperating request */ | 481 | /* process not "saved" by a cooperating request */ |
476 | ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8; | 482 | ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8; |
477 | } | 483 | } |
484 | spin_unlock(&ad->io_context->lock); | ||
478 | } | 485 | } |
479 | spin_unlock_irqrestore(q->queue_lock, flags); | 486 | spin_unlock_irqrestore(q->queue_lock, flags); |
480 | } | 487 | } |
@@ -635,9 +642,11 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq) | |||
635 | 642 | ||
636 | ioc = ad->io_context; | 643 | ioc = ad->io_context; |
637 | BUG_ON(!ioc); | 644 | BUG_ON(!ioc); |
645 | spin_lock(&ioc->lock); | ||
638 | 646 | ||
639 | if (rq && ioc == RQ_IOC(rq)) { | 647 | if (rq && ioc == RQ_IOC(rq)) { |
640 | /* request from same process */ | 648 | /* request from same process */ |
649 | spin_unlock(&ioc->lock); | ||
641 | return 1; | 650 | return 1; |
642 | } | 651 | } |
643 | 652 | ||
@@ -646,20 +655,25 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq) | |||
646 | * In this situation status should really be FINISHED, | 655 | * In this situation status should really be FINISHED, |
647 | * however the timer hasn't had the chance to run yet. | 656 | * however the timer hasn't had the chance to run yet. |
648 | */ | 657 | */ |
658 | spin_unlock(&ioc->lock); | ||
649 | return 1; | 659 | return 1; |
650 | } | 660 | } |
651 | 661 | ||
652 | aic = ioc->aic; | 662 | aic = ioc->aic; |
653 | if (!aic) | 663 | if (!aic) { |
664 | spin_unlock(&ioc->lock); | ||
654 | return 0; | 665 | return 0; |
666 | } | ||
655 | 667 | ||
656 | if (atomic_read(&aic->nr_queued) > 0) { | 668 | if (atomic_read(&aic->nr_queued) > 0) { |
657 | /* process has more requests queued */ | 669 | /* process has more requests queued */ |
670 | spin_unlock(&ioc->lock); | ||
658 | return 1; | 671 | return 1; |
659 | } | 672 | } |
660 | 673 | ||
661 | if (atomic_read(&aic->nr_dispatched) > 0) { | 674 | if (atomic_read(&aic->nr_dispatched) > 0) { |
662 | /* process has more requests dispatched */ | 675 | /* process has more requests dispatched */ |
676 | spin_unlock(&ioc->lock); | ||
663 | return 1; | 677 | return 1; |
664 | } | 678 | } |
665 | 679 | ||
@@ -680,6 +694,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq) | |||
680 | } | 694 | } |
681 | 695 | ||
682 | as_update_iohist(ad, aic, rq); | 696 | as_update_iohist(ad, aic, rq); |
697 | spin_unlock(&ioc->lock); | ||
683 | return 1; | 698 | return 1; |
684 | } | 699 | } |
685 | 700 | ||
@@ -688,20 +703,27 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq) | |||
688 | if (aic->ttime_samples == 0) | 703 | if (aic->ttime_samples == 0) |
689 | ad->exit_prob = (7*ad->exit_prob + 256)/8; | 704 | ad->exit_prob = (7*ad->exit_prob + 256)/8; |
690 | 705 | ||
691 | if (ad->exit_no_coop > 128) | 706 | if (ad->exit_no_coop > 128) { |
707 | spin_unlock(&ioc->lock); | ||
692 | return 1; | 708 | return 1; |
709 | } | ||
693 | } | 710 | } |
694 | 711 | ||
695 | if (aic->ttime_samples == 0) { | 712 | if (aic->ttime_samples == 0) { |
696 | if (ad->new_ttime_mean > ad->antic_expire) | 713 | if (ad->new_ttime_mean > ad->antic_expire) { |
714 | spin_unlock(&ioc->lock); | ||
697 | return 1; | 715 | return 1; |
698 | if (ad->exit_prob * ad->exit_no_coop > 128*256) | 716 | } |
717 | if (ad->exit_prob * ad->exit_no_coop > 128*256) { | ||
718 | spin_unlock(&ioc->lock); | ||
699 | return 1; | 719 | return 1; |
720 | } | ||
700 | } else if (aic->ttime_mean > ad->antic_expire) { | 721 | } else if (aic->ttime_mean > ad->antic_expire) { |
701 | /* the process thinks too much between requests */ | 722 | /* the process thinks too much between requests */ |
723 | spin_unlock(&ioc->lock); | ||
702 | return 1; | 724 | return 1; |
703 | } | 725 | } |
704 | 726 | spin_unlock(&ioc->lock); | |
705 | return 0; | 727 | return 0; |
706 | } | 728 | } |
707 | 729 | ||
@@ -880,7 +902,7 @@ static void as_remove_queued_request(struct request_queue *q, | |||
880 | } | 902 | } |
881 | 903 | ||
882 | /* | 904 | /* |
883 | * as_fifo_expired returns 0 if there are no expired reads on the fifo, | 905 | * as_fifo_expired returns 0 if there are no expired requests on the fifo, |
884 | * 1 otherwise. It is ratelimited so that we only perform the check once per | 906 | * 1 otherwise. It is ratelimited so that we only perform the check once per |
885 | * `fifo_expire' interval. Otherwise a large number of expired requests | 907 | * `fifo_expire' interval. Otherwise a large number of expired requests |
886 | * would create a hopeless seekstorm. | 908 | * would create a hopeless seekstorm. |
@@ -1097,7 +1119,8 @@ dispatch_writes: | |||
1097 | ad->batch_data_dir = REQ_ASYNC; | 1119 | ad->batch_data_dir = REQ_ASYNC; |
1098 | ad->current_write_count = ad->write_batch_count; | 1120 | ad->current_write_count = ad->write_batch_count; |
1099 | ad->write_batch_idled = 0; | 1121 | ad->write_batch_idled = 0; |
1100 | rq = ad->next_rq[ad->batch_data_dir]; | 1122 | rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next); |
1123 | ad->last_check_fifo[REQ_ASYNC] = jiffies; | ||
1101 | goto dispatch_request; | 1124 | goto dispatch_request; |
1102 | } | 1125 | } |
1103 | 1126 | ||
@@ -1159,7 +1182,7 @@ static void as_add_request(struct request_queue *q, struct request *rq) | |||
1159 | as_add_rq_rb(ad, rq); | 1182 | as_add_rq_rb(ad, rq); |
1160 | 1183 | ||
1161 | /* | 1184 | /* |
1162 | * set expire time (only used for reads) and add to fifo list | 1185 | * set expire time and add to fifo list |
1163 | */ | 1186 | */ |
1164 | rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); | 1187 | rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); |
1165 | list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); | 1188 | list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); |
@@ -1245,16 +1268,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req, | |||
1245 | */ | 1268 | */ |
1246 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | 1269 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { |
1247 | if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { | 1270 | if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { |
1248 | struct io_context *rioc = RQ_IOC(req); | ||
1249 | struct io_context *nioc = RQ_IOC(next); | ||
1250 | |||
1251 | list_move(&req->queuelist, &next->queuelist); | 1271 | list_move(&req->queuelist, &next->queuelist); |
1252 | rq_set_fifo_time(req, rq_fifo_time(next)); | 1272 | rq_set_fifo_time(req, rq_fifo_time(next)); |
1253 | /* | ||
1254 | * Don't copy here but swap, because when anext is | ||
1255 | * removed below, it must contain the unused context | ||
1256 | */ | ||
1257 | swap_io_context(&rioc, &nioc); | ||
1258 | } | 1273 | } |
1259 | } | 1274 | } |
1260 | 1275 | ||
@@ -1463,7 +1478,9 @@ static struct elevator_type iosched_as = { | |||
1463 | 1478 | ||
1464 | static int __init as_init(void) | 1479 | static int __init as_init(void) |
1465 | { | 1480 | { |
1466 | return elv_register(&iosched_as); | 1481 | elv_register(&iosched_as); |
1482 | |||
1483 | return 0; | ||
1467 | } | 1484 | } |
1468 | 1485 | ||
1469 | static void __exit as_exit(void) | 1486 | static void __exit as_exit(void) |
diff --git a/block/blk-barrier.c b/block/blk-barrier.c new file mode 100644 index 000000000000..6901eedeffce --- /dev/null +++ b/block/blk-barrier.c | |||
@@ -0,0 +1,318 @@ | |||
1 | /* | ||
2 | * Functions related to barrier IO handling | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | |||
9 | #include "blk.h" | ||
10 | |||
11 | /** | ||
12 | * blk_queue_ordered - does this queue support ordered writes | ||
13 | * @q: the request queue | ||
14 | * @ordered: one of QUEUE_ORDERED_* | ||
15 | * @prepare_flush_fn: rq setup helper for cache flush ordered writes | ||
16 | * | ||
17 | * Description: | ||
18 | * For journalled file systems, doing ordered writes on a commit | ||
19 | * block instead of explicitly doing wait_on_buffer (which is bad | ||
20 | * for performance) can be a big win. Block drivers supporting this | ||
21 | * feature should call this function and indicate so. | ||
22 | * | ||
23 | **/ | ||
24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, | ||
25 | prepare_flush_fn *prepare_flush_fn) | ||
26 | { | ||
27 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && | ||
28 | prepare_flush_fn == NULL) { | ||
29 | printk(KERN_ERR "%s: prepare_flush_fn required\n", | ||
30 | __FUNCTION__); | ||
31 | return -EINVAL; | ||
32 | } | ||
33 | |||
34 | if (ordered != QUEUE_ORDERED_NONE && | ||
35 | ordered != QUEUE_ORDERED_DRAIN && | ||
36 | ordered != QUEUE_ORDERED_DRAIN_FLUSH && | ||
37 | ordered != QUEUE_ORDERED_DRAIN_FUA && | ||
38 | ordered != QUEUE_ORDERED_TAG && | ||
39 | ordered != QUEUE_ORDERED_TAG_FLUSH && | ||
40 | ordered != QUEUE_ORDERED_TAG_FUA) { | ||
41 | printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); | ||
42 | return -EINVAL; | ||
43 | } | ||
44 | |||
45 | q->ordered = ordered; | ||
46 | q->next_ordered = ordered; | ||
47 | q->prepare_flush_fn = prepare_flush_fn; | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | EXPORT_SYMBOL(blk_queue_ordered); | ||
52 | |||
53 | /* | ||
54 | * Cache flushing for ordered writes handling | ||
55 | */ | ||
56 | inline unsigned blk_ordered_cur_seq(struct request_queue *q) | ||
57 | { | ||
58 | if (!q->ordseq) | ||
59 | return 0; | ||
60 | return 1 << ffz(q->ordseq); | ||
61 | } | ||
62 | |||
63 | unsigned blk_ordered_req_seq(struct request *rq) | ||
64 | { | ||
65 | struct request_queue *q = rq->q; | ||
66 | |||
67 | BUG_ON(q->ordseq == 0); | ||
68 | |||
69 | if (rq == &q->pre_flush_rq) | ||
70 | return QUEUE_ORDSEQ_PREFLUSH; | ||
71 | if (rq == &q->bar_rq) | ||
72 | return QUEUE_ORDSEQ_BAR; | ||
73 | if (rq == &q->post_flush_rq) | ||
74 | return QUEUE_ORDSEQ_POSTFLUSH; | ||
75 | |||
76 | /* | ||
77 | * !fs requests don't need to follow barrier ordering. Always | ||
78 | * put them at the front. This fixes the following deadlock. | ||
79 | * | ||
80 | * http://thread.gmane.org/gmane.linux.kernel/537473 | ||
81 | */ | ||
82 | if (!blk_fs_request(rq)) | ||
83 | return QUEUE_ORDSEQ_DRAIN; | ||
84 | |||
85 | if ((rq->cmd_flags & REQ_ORDERED_COLOR) == | ||
86 | (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) | ||
87 | return QUEUE_ORDSEQ_DRAIN; | ||
88 | else | ||
89 | return QUEUE_ORDSEQ_DONE; | ||
90 | } | ||
91 | |||
92 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | ||
93 | { | ||
94 | struct request *rq; | ||
95 | |||
96 | if (error && !q->orderr) | ||
97 | q->orderr = error; | ||
98 | |||
99 | BUG_ON(q->ordseq & seq); | ||
100 | q->ordseq |= seq; | ||
101 | |||
102 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) | ||
103 | return; | ||
104 | |||
105 | /* | ||
106 | * Okay, sequence complete. | ||
107 | */ | ||
108 | q->ordseq = 0; | ||
109 | rq = q->orig_bar_rq; | ||
110 | |||
111 | if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) | ||
112 | BUG(); | ||
113 | } | ||
114 | |||
115 | static void pre_flush_end_io(struct request *rq, int error) | ||
116 | { | ||
117 | elv_completed_request(rq->q, rq); | ||
118 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); | ||
119 | } | ||
120 | |||
121 | static void bar_end_io(struct request *rq, int error) | ||
122 | { | ||
123 | elv_completed_request(rq->q, rq); | ||
124 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); | ||
125 | } | ||
126 | |||
127 | static void post_flush_end_io(struct request *rq, int error) | ||
128 | { | ||
129 | elv_completed_request(rq->q, rq); | ||
130 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); | ||
131 | } | ||
132 | |||
133 | static void queue_flush(struct request_queue *q, unsigned which) | ||
134 | { | ||
135 | struct request *rq; | ||
136 | rq_end_io_fn *end_io; | ||
137 | |||
138 | if (which == QUEUE_ORDERED_PREFLUSH) { | ||
139 | rq = &q->pre_flush_rq; | ||
140 | end_io = pre_flush_end_io; | ||
141 | } else { | ||
142 | rq = &q->post_flush_rq; | ||
143 | end_io = post_flush_end_io; | ||
144 | } | ||
145 | |||
146 | rq->cmd_flags = REQ_HARDBARRIER; | ||
147 | rq_init(q, rq); | ||
148 | rq->elevator_private = NULL; | ||
149 | rq->elevator_private2 = NULL; | ||
150 | rq->rq_disk = q->bar_rq.rq_disk; | ||
151 | rq->end_io = end_io; | ||
152 | q->prepare_flush_fn(q, rq); | ||
153 | |||
154 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
155 | } | ||
156 | |||
157 | static inline struct request *start_ordered(struct request_queue *q, | ||
158 | struct request *rq) | ||
159 | { | ||
160 | q->orderr = 0; | ||
161 | q->ordered = q->next_ordered; | ||
162 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | ||
163 | |||
164 | /* | ||
165 | * Prep proxy barrier request. | ||
166 | */ | ||
167 | blkdev_dequeue_request(rq); | ||
168 | q->orig_bar_rq = rq; | ||
169 | rq = &q->bar_rq; | ||
170 | rq->cmd_flags = 0; | ||
171 | rq_init(q, rq); | ||
172 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | ||
173 | rq->cmd_flags |= REQ_RW; | ||
174 | if (q->ordered & QUEUE_ORDERED_FUA) | ||
175 | rq->cmd_flags |= REQ_FUA; | ||
176 | rq->elevator_private = NULL; | ||
177 | rq->elevator_private2 = NULL; | ||
178 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
179 | rq->end_io = bar_end_io; | ||
180 | |||
181 | /* | ||
182 | * Queue ordered sequence. As we stack them at the head, we | ||
183 | * need to queue in reverse order. Note that we rely on that | ||
184 | * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs | ||
185 | * request gets inbetween ordered sequence. If this request is | ||
186 | * an empty barrier, we don't need to do a postflush ever since | ||
187 | * there will be no data written between the pre and post flush. | ||
188 | * Hence a single flush will suffice. | ||
189 | */ | ||
190 | if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) | ||
191 | queue_flush(q, QUEUE_ORDERED_POSTFLUSH); | ||
192 | else | ||
193 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; | ||
194 | |||
195 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
196 | |||
197 | if (q->ordered & QUEUE_ORDERED_PREFLUSH) { | ||
198 | queue_flush(q, QUEUE_ORDERED_PREFLUSH); | ||
199 | rq = &q->pre_flush_rq; | ||
200 | } else | ||
201 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; | ||
202 | |||
203 | if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) | ||
204 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; | ||
205 | else | ||
206 | rq = NULL; | ||
207 | |||
208 | return rq; | ||
209 | } | ||
210 | |||
211 | int blk_do_ordered(struct request_queue *q, struct request **rqp) | ||
212 | { | ||
213 | struct request *rq = *rqp; | ||
214 | const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); | ||
215 | |||
216 | if (!q->ordseq) { | ||
217 | if (!is_barrier) | ||
218 | return 1; | ||
219 | |||
220 | if (q->next_ordered != QUEUE_ORDERED_NONE) { | ||
221 | *rqp = start_ordered(q, rq); | ||
222 | return 1; | ||
223 | } else { | ||
224 | /* | ||
225 | * This can happen when the queue switches to | ||
226 | * ORDERED_NONE while this request is on it. | ||
227 | */ | ||
228 | blkdev_dequeue_request(rq); | ||
229 | if (__blk_end_request(rq, -EOPNOTSUPP, | ||
230 | blk_rq_bytes(rq))) | ||
231 | BUG(); | ||
232 | *rqp = NULL; | ||
233 | return 0; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Ordered sequence in progress | ||
239 | */ | ||
240 | |||
241 | /* Special requests are not subject to ordering rules. */ | ||
242 | if (!blk_fs_request(rq) && | ||
243 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) | ||
244 | return 1; | ||
245 | |||
246 | if (q->ordered & QUEUE_ORDERED_TAG) { | ||
247 | /* Ordered by tag. Blocking the next barrier is enough. */ | ||
248 | if (is_barrier && rq != &q->bar_rq) | ||
249 | *rqp = NULL; | ||
250 | } else { | ||
251 | /* Ordered by draining. Wait for turn. */ | ||
252 | WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); | ||
253 | if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) | ||
254 | *rqp = NULL; | ||
255 | } | ||
256 | |||
257 | return 1; | ||
258 | } | ||
259 | |||
260 | static void bio_end_empty_barrier(struct bio *bio, int err) | ||
261 | { | ||
262 | if (err) | ||
263 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
264 | |||
265 | complete(bio->bi_private); | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * blkdev_issue_flush - queue a flush | ||
270 | * @bdev: blockdev to issue flush for | ||
271 | * @error_sector: error sector | ||
272 | * | ||
273 | * Description: | ||
274 | * Issue a flush for the block device in question. Caller can supply | ||
275 | * room for storing the error offset in case of a flush error, if they | ||
276 | * wish to. Caller must run wait_for_completion() on its own. | ||
277 | */ | ||
278 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | ||
279 | { | ||
280 | DECLARE_COMPLETION_ONSTACK(wait); | ||
281 | struct request_queue *q; | ||
282 | struct bio *bio; | ||
283 | int ret; | ||
284 | |||
285 | if (bdev->bd_disk == NULL) | ||
286 | return -ENXIO; | ||
287 | |||
288 | q = bdev_get_queue(bdev); | ||
289 | if (!q) | ||
290 | return -ENXIO; | ||
291 | |||
292 | bio = bio_alloc(GFP_KERNEL, 0); | ||
293 | if (!bio) | ||
294 | return -ENOMEM; | ||
295 | |||
296 | bio->bi_end_io = bio_end_empty_barrier; | ||
297 | bio->bi_private = &wait; | ||
298 | bio->bi_bdev = bdev; | ||
299 | submit_bio(1 << BIO_RW_BARRIER, bio); | ||
300 | |||
301 | wait_for_completion(&wait); | ||
302 | |||
303 | /* | ||
304 | * The driver must store the error location in ->bi_sector, if | ||
305 | * it supports it. For non-stacked drivers, this should be copied | ||
306 | * from rq->sector. | ||
307 | */ | ||
308 | if (error_sector) | ||
309 | *error_sector = bio->bi_sector; | ||
310 | |||
311 | ret = 0; | ||
312 | if (!bio_flagged(bio, BIO_UPTODATE)) | ||
313 | ret = -EIO; | ||
314 | |||
315 | bio_put(bio); | ||
316 | return ret; | ||
317 | } | ||
318 | EXPORT_SYMBOL(blkdev_issue_flush); | ||
diff --git a/block/blk-core.c b/block/blk-core.c new file mode 100644 index 000000000000..4afb39c82339 --- /dev/null +++ b/block/blk-core.c | |||
@@ -0,0 +1,2027 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics | ||
4 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | ||
5 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | ||
6 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> | ||
7 | * - July2000 | ||
8 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * This handles all read/write requests to block devices | ||
13 | */ | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/backing-dev.h> | ||
17 | #include <linux/bio.h> | ||
18 | #include <linux/blkdev.h> | ||
19 | #include <linux/highmem.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/kernel_stat.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/completion.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/swap.h> | ||
27 | #include <linux/writeback.h> | ||
28 | #include <linux/task_io_accounting_ops.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/cpu.h> | ||
31 | #include <linux/blktrace_api.h> | ||
32 | #include <linux/fault-inject.h> | ||
33 | |||
34 | #include "blk.h" | ||
35 | |||
36 | static int __make_request(struct request_queue *q, struct bio *bio); | ||
37 | |||
38 | /* | ||
39 | * For the allocated request tables | ||
40 | */ | ||
41 | struct kmem_cache *request_cachep; | ||
42 | |||
43 | /* | ||
44 | * For queue allocation | ||
45 | */ | ||
46 | struct kmem_cache *blk_requestq_cachep; | ||
47 | |||
48 | /* | ||
49 | * Controlling structure to kblockd | ||
50 | */ | ||
51 | static struct workqueue_struct *kblockd_workqueue; | ||
52 | |||
53 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | ||
54 | |||
55 | static void drive_stat_acct(struct request *rq, int new_io) | ||
56 | { | ||
57 | int rw = rq_data_dir(rq); | ||
58 | |||
59 | if (!blk_fs_request(rq) || !rq->rq_disk) | ||
60 | return; | ||
61 | |||
62 | if (!new_io) { | ||
63 | __disk_stat_inc(rq->rq_disk, merges[rw]); | ||
64 | } else { | ||
65 | disk_round_stats(rq->rq_disk); | ||
66 | rq->rq_disk->in_flight++; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | void blk_queue_congestion_threshold(struct request_queue *q) | ||
71 | { | ||
72 | int nr; | ||
73 | |||
74 | nr = q->nr_requests - (q->nr_requests / 8) + 1; | ||
75 | if (nr > q->nr_requests) | ||
76 | nr = q->nr_requests; | ||
77 | q->nr_congestion_on = nr; | ||
78 | |||
79 | nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; | ||
80 | if (nr < 1) | ||
81 | nr = 1; | ||
82 | q->nr_congestion_off = nr; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | ||
87 | * @bdev: device | ||
88 | * | ||
89 | * Locates the passed device's request queue and returns the address of its | ||
90 | * backing_dev_info | ||
91 | * | ||
92 | * Will return NULL if the request queue cannot be located. | ||
93 | */ | ||
94 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | ||
95 | { | ||
96 | struct backing_dev_info *ret = NULL; | ||
97 | struct request_queue *q = bdev_get_queue(bdev); | ||
98 | |||
99 | if (q) | ||
100 | ret = &q->backing_dev_info; | ||
101 | return ret; | ||
102 | } | ||
103 | EXPORT_SYMBOL(blk_get_backing_dev_info); | ||
104 | |||
105 | void rq_init(struct request_queue *q, struct request *rq) | ||
106 | { | ||
107 | INIT_LIST_HEAD(&rq->queuelist); | ||
108 | INIT_LIST_HEAD(&rq->donelist); | ||
109 | |||
110 | rq->errors = 0; | ||
111 | rq->bio = rq->biotail = NULL; | ||
112 | INIT_HLIST_NODE(&rq->hash); | ||
113 | RB_CLEAR_NODE(&rq->rb_node); | ||
114 | rq->ioprio = 0; | ||
115 | rq->buffer = NULL; | ||
116 | rq->ref_count = 1; | ||
117 | rq->q = q; | ||
118 | rq->special = NULL; | ||
119 | rq->data_len = 0; | ||
120 | rq->data = NULL; | ||
121 | rq->nr_phys_segments = 0; | ||
122 | rq->sense = NULL; | ||
123 | rq->end_io = NULL; | ||
124 | rq->end_io_data = NULL; | ||
125 | rq->completion_data = NULL; | ||
126 | rq->next_rq = NULL; | ||
127 | } | ||
128 | |||
129 | static void req_bio_endio(struct request *rq, struct bio *bio, | ||
130 | unsigned int nbytes, int error) | ||
131 | { | ||
132 | struct request_queue *q = rq->q; | ||
133 | |||
134 | if (&q->bar_rq != rq) { | ||
135 | if (error) | ||
136 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
137 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | ||
138 | error = -EIO; | ||
139 | |||
140 | if (unlikely(nbytes > bio->bi_size)) { | ||
141 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", | ||
142 | __FUNCTION__, nbytes, bio->bi_size); | ||
143 | nbytes = bio->bi_size; | ||
144 | } | ||
145 | |||
146 | bio->bi_size -= nbytes; | ||
147 | bio->bi_sector += (nbytes >> 9); | ||
148 | if (bio->bi_size == 0) | ||
149 | bio_endio(bio, error); | ||
150 | } else { | ||
151 | |||
152 | /* | ||
153 | * Okay, this is the barrier request in progress, just | ||
154 | * record the error; | ||
155 | */ | ||
156 | if (error && !q->orderr) | ||
157 | q->orderr = error; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | void blk_dump_rq_flags(struct request *rq, char *msg) | ||
162 | { | ||
163 | int bit; | ||
164 | |||
165 | printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, | ||
166 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, | ||
167 | rq->cmd_flags); | ||
168 | |||
169 | printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", | ||
170 | (unsigned long long)rq->sector, | ||
171 | rq->nr_sectors, | ||
172 | rq->current_nr_sectors); | ||
173 | printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", | ||
174 | rq->bio, rq->biotail, | ||
175 | rq->buffer, rq->data, | ||
176 | rq->data_len); | ||
177 | |||
178 | if (blk_pc_request(rq)) { | ||
179 | printk(KERN_INFO " cdb: "); | ||
180 | for (bit = 0; bit < sizeof(rq->cmd); bit++) | ||
181 | printk("%02x ", rq->cmd[bit]); | ||
182 | printk("\n"); | ||
183 | } | ||
184 | } | ||
185 | EXPORT_SYMBOL(blk_dump_rq_flags); | ||
186 | |||
187 | /* | ||
188 | * "plug" the device if there are no outstanding requests: this will | ||
189 | * force the transfer to start only after we have put all the requests | ||
190 | * on the list. | ||
191 | * | ||
192 | * This is called with interrupts off and no requests on the queue and | ||
193 | * with the queue lock held. | ||
194 | */ | ||
195 | void blk_plug_device(struct request_queue *q) | ||
196 | { | ||
197 | WARN_ON(!irqs_disabled()); | ||
198 | |||
199 | /* | ||
200 | * don't plug a stopped queue, it must be paired with blk_start_queue() | ||
201 | * which will restart the queueing | ||
202 | */ | ||
203 | if (blk_queue_stopped(q)) | ||
204 | return; | ||
205 | |||
206 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { | ||
207 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | ||
208 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | ||
209 | } | ||
210 | } | ||
211 | EXPORT_SYMBOL(blk_plug_device); | ||
212 | |||
213 | /* | ||
214 | * remove the queue from the plugged list, if present. called with | ||
215 | * queue lock held and interrupts disabled. | ||
216 | */ | ||
217 | int blk_remove_plug(struct request_queue *q) | ||
218 | { | ||
219 | WARN_ON(!irqs_disabled()); | ||
220 | |||
221 | if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | ||
222 | return 0; | ||
223 | |||
224 | del_timer(&q->unplug_timer); | ||
225 | return 1; | ||
226 | } | ||
227 | EXPORT_SYMBOL(blk_remove_plug); | ||
228 | |||
229 | /* | ||
230 | * remove the plug and let it rip.. | ||
231 | */ | ||
232 | void __generic_unplug_device(struct request_queue *q) | ||
233 | { | ||
234 | if (unlikely(blk_queue_stopped(q))) | ||
235 | return; | ||
236 | |||
237 | if (!blk_remove_plug(q)) | ||
238 | return; | ||
239 | |||
240 | q->request_fn(q); | ||
241 | } | ||
242 | EXPORT_SYMBOL(__generic_unplug_device); | ||
243 | |||
244 | /** | ||
245 | * generic_unplug_device - fire a request queue | ||
246 | * @q: The &struct request_queue in question | ||
247 | * | ||
248 | * Description: | ||
249 | * Linux uses plugging to build bigger requests queues before letting | ||
250 | * the device have at them. If a queue is plugged, the I/O scheduler | ||
251 | * is still adding and merging requests on the queue. Once the queue | ||
252 | * gets unplugged, the request_fn defined for the queue is invoked and | ||
253 | * transfers started. | ||
254 | **/ | ||
255 | void generic_unplug_device(struct request_queue *q) | ||
256 | { | ||
257 | spin_lock_irq(q->queue_lock); | ||
258 | __generic_unplug_device(q); | ||
259 | spin_unlock_irq(q->queue_lock); | ||
260 | } | ||
261 | EXPORT_SYMBOL(generic_unplug_device); | ||
262 | |||
263 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | ||
264 | struct page *page) | ||
265 | { | ||
266 | struct request_queue *q = bdi->unplug_io_data; | ||
267 | |||
268 | blk_unplug(q); | ||
269 | } | ||
270 | |||
271 | void blk_unplug_work(struct work_struct *work) | ||
272 | { | ||
273 | struct request_queue *q = | ||
274 | container_of(work, struct request_queue, unplug_work); | ||
275 | |||
276 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
277 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
278 | |||
279 | q->unplug_fn(q); | ||
280 | } | ||
281 | |||
282 | void blk_unplug_timeout(unsigned long data) | ||
283 | { | ||
284 | struct request_queue *q = (struct request_queue *)data; | ||
285 | |||
286 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, | ||
287 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
288 | |||
289 | kblockd_schedule_work(&q->unplug_work); | ||
290 | } | ||
291 | |||
292 | void blk_unplug(struct request_queue *q) | ||
293 | { | ||
294 | /* | ||
295 | * devices don't necessarily have an ->unplug_fn defined | ||
296 | */ | ||
297 | if (q->unplug_fn) { | ||
298 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
299 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
300 | |||
301 | q->unplug_fn(q); | ||
302 | } | ||
303 | } | ||
304 | EXPORT_SYMBOL(blk_unplug); | ||
305 | |||
306 | /** | ||
307 | * blk_start_queue - restart a previously stopped queue | ||
308 | * @q: The &struct request_queue in question | ||
309 | * | ||
310 | * Description: | ||
311 | * blk_start_queue() will clear the stop flag on the queue, and call | ||
312 | * the request_fn for the queue if it was in a stopped state when | ||
313 | * entered. Also see blk_stop_queue(). Queue lock must be held. | ||
314 | **/ | ||
315 | void blk_start_queue(struct request_queue *q) | ||
316 | { | ||
317 | WARN_ON(!irqs_disabled()); | ||
318 | |||
319 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | ||
320 | |||
321 | /* | ||
322 | * one level of recursion is ok and is much faster than kicking | ||
323 | * the unplug handling | ||
324 | */ | ||
325 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | ||
326 | q->request_fn(q); | ||
327 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | ||
328 | } else { | ||
329 | blk_plug_device(q); | ||
330 | kblockd_schedule_work(&q->unplug_work); | ||
331 | } | ||
332 | } | ||
333 | EXPORT_SYMBOL(blk_start_queue); | ||
334 | |||
335 | /** | ||
336 | * blk_stop_queue - stop a queue | ||
337 | * @q: The &struct request_queue in question | ||
338 | * | ||
339 | * Description: | ||
340 | * The Linux block layer assumes that a block driver will consume all | ||
341 | * entries on the request queue when the request_fn strategy is called. | ||
342 | * Often this will not happen, because of hardware limitations (queue | ||
343 | * depth settings). If a device driver gets a 'queue full' response, | ||
344 | * or if it simply chooses not to queue more I/O at one point, it can | ||
345 | * call this function to prevent the request_fn from being called until | ||
346 | * the driver has signalled it's ready to go again. This happens by calling | ||
347 | * blk_start_queue() to restart queue operations. Queue lock must be held. | ||
348 | **/ | ||
349 | void blk_stop_queue(struct request_queue *q) | ||
350 | { | ||
351 | blk_remove_plug(q); | ||
352 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | ||
353 | } | ||
354 | EXPORT_SYMBOL(blk_stop_queue); | ||
355 | |||
356 | /** | ||
357 | * blk_sync_queue - cancel any pending callbacks on a queue | ||
358 | * @q: the queue | ||
359 | * | ||
360 | * Description: | ||
361 | * The block layer may perform asynchronous callback activity | ||
362 | * on a queue, such as calling the unplug function after a timeout. | ||
363 | * A block device may call blk_sync_queue to ensure that any | ||
364 | * such activity is cancelled, thus allowing it to release resources | ||
365 | * that the callbacks might use. The caller must already have made sure | ||
366 | * that its ->make_request_fn will not re-add plugging prior to calling | ||
367 | * this function. | ||
368 | * | ||
369 | */ | ||
370 | void blk_sync_queue(struct request_queue *q) | ||
371 | { | ||
372 | del_timer_sync(&q->unplug_timer); | ||
373 | kblockd_flush_work(&q->unplug_work); | ||
374 | } | ||
375 | EXPORT_SYMBOL(blk_sync_queue); | ||
376 | |||
377 | /** | ||
378 | * blk_run_queue - run a single device queue | ||
379 | * @q: The queue to run | ||
380 | */ | ||
381 | void blk_run_queue(struct request_queue *q) | ||
382 | { | ||
383 | unsigned long flags; | ||
384 | |||
385 | spin_lock_irqsave(q->queue_lock, flags); | ||
386 | blk_remove_plug(q); | ||
387 | |||
388 | /* | ||
389 | * Only recurse once to avoid overrunning the stack, let the unplug | ||
390 | * handling reinvoke the handler shortly if we already got there. | ||
391 | */ | ||
392 | if (!elv_queue_empty(q)) { | ||
393 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | ||
394 | q->request_fn(q); | ||
395 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | ||
396 | } else { | ||
397 | blk_plug_device(q); | ||
398 | kblockd_schedule_work(&q->unplug_work); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
403 | } | ||
404 | EXPORT_SYMBOL(blk_run_queue); | ||
405 | |||
406 | void blk_put_queue(struct request_queue *q) | ||
407 | { | ||
408 | kobject_put(&q->kobj); | ||
409 | } | ||
410 | EXPORT_SYMBOL(blk_put_queue); | ||
411 | |||
412 | void blk_cleanup_queue(struct request_queue *q) | ||
413 | { | ||
414 | mutex_lock(&q->sysfs_lock); | ||
415 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | ||
416 | mutex_unlock(&q->sysfs_lock); | ||
417 | |||
418 | if (q->elevator) | ||
419 | elevator_exit(q->elevator); | ||
420 | |||
421 | blk_put_queue(q); | ||
422 | } | ||
423 | EXPORT_SYMBOL(blk_cleanup_queue); | ||
424 | |||
425 | static int blk_init_free_list(struct request_queue *q) | ||
426 | { | ||
427 | struct request_list *rl = &q->rq; | ||
428 | |||
429 | rl->count[READ] = rl->count[WRITE] = 0; | ||
430 | rl->starved[READ] = rl->starved[WRITE] = 0; | ||
431 | rl->elvpriv = 0; | ||
432 | init_waitqueue_head(&rl->wait[READ]); | ||
433 | init_waitqueue_head(&rl->wait[WRITE]); | ||
434 | |||
435 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | ||
436 | mempool_free_slab, request_cachep, q->node); | ||
437 | |||
438 | if (!rl->rq_pool) | ||
439 | return -ENOMEM; | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) | ||
445 | { | ||
446 | return blk_alloc_queue_node(gfp_mask, -1); | ||
447 | } | ||
448 | EXPORT_SYMBOL(blk_alloc_queue); | ||
449 | |||
450 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | ||
451 | { | ||
452 | struct request_queue *q; | ||
453 | int err; | ||
454 | |||
455 | q = kmem_cache_alloc_node(blk_requestq_cachep, | ||
456 | gfp_mask | __GFP_ZERO, node_id); | ||
457 | if (!q) | ||
458 | return NULL; | ||
459 | |||
460 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | ||
461 | q->backing_dev_info.unplug_io_data = q; | ||
462 | err = bdi_init(&q->backing_dev_info); | ||
463 | if (err) { | ||
464 | kmem_cache_free(blk_requestq_cachep, q); | ||
465 | return NULL; | ||
466 | } | ||
467 | |||
468 | init_timer(&q->unplug_timer); | ||
469 | |||
470 | kobject_init(&q->kobj, &blk_queue_ktype); | ||
471 | |||
472 | mutex_init(&q->sysfs_lock); | ||
473 | |||
474 | return q; | ||
475 | } | ||
476 | EXPORT_SYMBOL(blk_alloc_queue_node); | ||
477 | |||
478 | /** | ||
479 | * blk_init_queue - prepare a request queue for use with a block device | ||
480 | * @rfn: The function to be called to process requests that have been | ||
481 | * placed on the queue. | ||
482 | * @lock: Request queue spin lock | ||
483 | * | ||
484 | * Description: | ||
485 | * If a block device wishes to use the standard request handling procedures, | ||
486 | * which sorts requests and coalesces adjacent requests, then it must | ||
487 | * call blk_init_queue(). The function @rfn will be called when there | ||
488 | * are requests on the queue that need to be processed. If the device | ||
489 | * supports plugging, then @rfn may not be called immediately when requests | ||
490 | * are available on the queue, but may be called at some time later instead. | ||
491 | * Plugged queues are generally unplugged when a buffer belonging to one | ||
492 | * of the requests on the queue is needed, or due to memory pressure. | ||
493 | * | ||
494 | * @rfn is not required, or even expected, to remove all requests off the | ||
495 | * queue, but only as many as it can handle at a time. If it does leave | ||
496 | * requests on the queue, it is responsible for arranging that the requests | ||
497 | * get dealt with eventually. | ||
498 | * | ||
499 | * The queue spin lock must be held while manipulating the requests on the | ||
500 | * request queue; this lock will be taken also from interrupt context, so irq | ||
501 | * disabling is needed for it. | ||
502 | * | ||
503 | * Function returns a pointer to the initialized request queue, or NULL if | ||
504 | * it didn't succeed. | ||
505 | * | ||
506 | * Note: | ||
507 | * blk_init_queue() must be paired with a blk_cleanup_queue() call | ||
508 | * when the block device is deactivated (such as at module unload). | ||
509 | **/ | ||
510 | |||
511 | struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | ||
512 | { | ||
513 | return blk_init_queue_node(rfn, lock, -1); | ||
514 | } | ||
515 | EXPORT_SYMBOL(blk_init_queue); | ||
516 | |||
517 | struct request_queue * | ||
518 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | ||
519 | { | ||
520 | struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); | ||
521 | |||
522 | if (!q) | ||
523 | return NULL; | ||
524 | |||
525 | q->node = node_id; | ||
526 | if (blk_init_free_list(q)) { | ||
527 | kmem_cache_free(blk_requestq_cachep, q); | ||
528 | return NULL; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * if caller didn't supply a lock, they get per-queue locking with | ||
533 | * our embedded lock | ||
534 | */ | ||
535 | if (!lock) { | ||
536 | spin_lock_init(&q->__queue_lock); | ||
537 | lock = &q->__queue_lock; | ||
538 | } | ||
539 | |||
540 | q->request_fn = rfn; | ||
541 | q->prep_rq_fn = NULL; | ||
542 | q->unplug_fn = generic_unplug_device; | ||
543 | q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); | ||
544 | q->queue_lock = lock; | ||
545 | |||
546 | blk_queue_segment_boundary(q, 0xffffffff); | ||
547 | |||
548 | blk_queue_make_request(q, __make_request); | ||
549 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); | ||
550 | |||
551 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
552 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
553 | |||
554 | q->sg_reserved_size = INT_MAX; | ||
555 | |||
556 | /* | ||
557 | * all done | ||
558 | */ | ||
559 | if (!elevator_init(q, NULL)) { | ||
560 | blk_queue_congestion_threshold(q); | ||
561 | return q; | ||
562 | } | ||
563 | |||
564 | blk_put_queue(q); | ||
565 | return NULL; | ||
566 | } | ||
567 | EXPORT_SYMBOL(blk_init_queue_node); | ||
568 | |||
569 | int blk_get_queue(struct request_queue *q) | ||
570 | { | ||
571 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
572 | kobject_get(&q->kobj); | ||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | return 1; | ||
577 | } | ||
578 | EXPORT_SYMBOL(blk_get_queue); | ||
579 | |||
580 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | ||
581 | { | ||
582 | if (rq->cmd_flags & REQ_ELVPRIV) | ||
583 | elv_put_request(q, rq); | ||
584 | mempool_free(rq, q->rq.rq_pool); | ||
585 | } | ||
586 | |||
587 | static struct request * | ||
588 | blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) | ||
589 | { | ||
590 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | ||
591 | |||
592 | if (!rq) | ||
593 | return NULL; | ||
594 | |||
595 | /* | ||
596 | * first three bits are identical in rq->cmd_flags and bio->bi_rw, | ||
597 | * see bio.h and blkdev.h | ||
598 | */ | ||
599 | rq->cmd_flags = rw | REQ_ALLOCED; | ||
600 | |||
601 | if (priv) { | ||
602 | if (unlikely(elv_set_request(q, rq, gfp_mask))) { | ||
603 | mempool_free(rq, q->rq.rq_pool); | ||
604 | return NULL; | ||
605 | } | ||
606 | rq->cmd_flags |= REQ_ELVPRIV; | ||
607 | } | ||
608 | |||
609 | return rq; | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * ioc_batching returns true if the ioc is a valid batching request and | ||
614 | * should be given priority access to a request. | ||
615 | */ | ||
616 | static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) | ||
617 | { | ||
618 | if (!ioc) | ||
619 | return 0; | ||
620 | |||
621 | /* | ||
622 | * Make sure the process is able to allocate at least 1 request | ||
623 | * even if the batch times out, otherwise we could theoretically | ||
624 | * lose wakeups. | ||
625 | */ | ||
626 | return ioc->nr_batch_requests == q->nr_batching || | ||
627 | (ioc->nr_batch_requests > 0 | ||
628 | && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); | ||
629 | } | ||
630 | |||
631 | /* | ||
632 | * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This | ||
633 | * will cause the process to be a "batcher" on all queues in the system. This | ||
634 | * is the behaviour we want though - once it gets a wakeup it should be given | ||
635 | * a nice run. | ||
636 | */ | ||
637 | static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) | ||
638 | { | ||
639 | if (!ioc || ioc_batching(q, ioc)) | ||
640 | return; | ||
641 | |||
642 | ioc->nr_batch_requests = q->nr_batching; | ||
643 | ioc->last_waited = jiffies; | ||
644 | } | ||
645 | |||
646 | static void __freed_request(struct request_queue *q, int rw) | ||
647 | { | ||
648 | struct request_list *rl = &q->rq; | ||
649 | |||
650 | if (rl->count[rw] < queue_congestion_off_threshold(q)) | ||
651 | blk_clear_queue_congested(q, rw); | ||
652 | |||
653 | if (rl->count[rw] + 1 <= q->nr_requests) { | ||
654 | if (waitqueue_active(&rl->wait[rw])) | ||
655 | wake_up(&rl->wait[rw]); | ||
656 | |||
657 | blk_clear_queue_full(q, rw); | ||
658 | } | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * A request has just been released. Account for it, update the full and | ||
663 | * congestion status, wake up any waiters. Called under q->queue_lock. | ||
664 | */ | ||
665 | static void freed_request(struct request_queue *q, int rw, int priv) | ||
666 | { | ||
667 | struct request_list *rl = &q->rq; | ||
668 | |||
669 | rl->count[rw]--; | ||
670 | if (priv) | ||
671 | rl->elvpriv--; | ||
672 | |||
673 | __freed_request(q, rw); | ||
674 | |||
675 | if (unlikely(rl->starved[rw ^ 1])) | ||
676 | __freed_request(q, rw ^ 1); | ||
677 | } | ||
678 | |||
679 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) | ||
680 | /* | ||
681 | * Get a free request, queue_lock must be held. | ||
682 | * Returns NULL on failure, with queue_lock held. | ||
683 | * Returns !NULL on success, with queue_lock *not held*. | ||
684 | */ | ||
685 | static struct request *get_request(struct request_queue *q, int rw_flags, | ||
686 | struct bio *bio, gfp_t gfp_mask) | ||
687 | { | ||
688 | struct request *rq = NULL; | ||
689 | struct request_list *rl = &q->rq; | ||
690 | struct io_context *ioc = NULL; | ||
691 | const int rw = rw_flags & 0x01; | ||
692 | int may_queue, priv; | ||
693 | |||
694 | may_queue = elv_may_queue(q, rw_flags); | ||
695 | if (may_queue == ELV_MQUEUE_NO) | ||
696 | goto rq_starved; | ||
697 | |||
698 | if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { | ||
699 | if (rl->count[rw]+1 >= q->nr_requests) { | ||
700 | ioc = current_io_context(GFP_ATOMIC, q->node); | ||
701 | /* | ||
702 | * The queue will fill after this allocation, so set | ||
703 | * it as full, and mark this process as "batching". | ||
704 | * This process will be allowed to complete a batch of | ||
705 | * requests, others will be blocked. | ||
706 | */ | ||
707 | if (!blk_queue_full(q, rw)) { | ||
708 | ioc_set_batching(q, ioc); | ||
709 | blk_set_queue_full(q, rw); | ||
710 | } else { | ||
711 | if (may_queue != ELV_MQUEUE_MUST | ||
712 | && !ioc_batching(q, ioc)) { | ||
713 | /* | ||
714 | * The queue is full and the allocating | ||
715 | * process is not a "batcher", and not | ||
716 | * exempted by the IO scheduler | ||
717 | */ | ||
718 | goto out; | ||
719 | } | ||
720 | } | ||
721 | } | ||
722 | blk_set_queue_congested(q, rw); | ||
723 | } | ||
724 | |||
725 | /* | ||
726 | * Only allow batching queuers to allocate up to 50% over the defined | ||
727 | * limit of requests, otherwise we could have thousands of requests | ||
728 | * allocated with any setting of ->nr_requests | ||
729 | */ | ||
730 | if (rl->count[rw] >= (3 * q->nr_requests / 2)) | ||
731 | goto out; | ||
732 | |||
733 | rl->count[rw]++; | ||
734 | rl->starved[rw] = 0; | ||
735 | |||
736 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | ||
737 | if (priv) | ||
738 | rl->elvpriv++; | ||
739 | |||
740 | spin_unlock_irq(q->queue_lock); | ||
741 | |||
742 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); | ||
743 | if (unlikely(!rq)) { | ||
744 | /* | ||
745 | * Allocation failed presumably due to memory. Undo anything | ||
746 | * we might have messed up. | ||
747 | * | ||
748 | * Allocating task should really be put onto the front of the | ||
749 | * wait queue, but this is pretty rare. | ||
750 | */ | ||
751 | spin_lock_irq(q->queue_lock); | ||
752 | freed_request(q, rw, priv); | ||
753 | |||
754 | /* | ||
755 | * in the very unlikely event that allocation failed and no | ||
756 | * requests for this direction was pending, mark us starved | ||
757 | * so that freeing of a request in the other direction will | ||
758 | * notice us. another possible fix would be to split the | ||
759 | * rq mempool into READ and WRITE | ||
760 | */ | ||
761 | rq_starved: | ||
762 | if (unlikely(rl->count[rw] == 0)) | ||
763 | rl->starved[rw] = 1; | ||
764 | |||
765 | goto out; | ||
766 | } | ||
767 | |||
768 | /* | ||
769 | * ioc may be NULL here, and ioc_batching will be false. That's | ||
770 | * OK, if the queue is under the request limit then requests need | ||
771 | * not count toward the nr_batch_requests limit. There will always | ||
772 | * be some limit enforced by BLK_BATCH_TIME. | ||
773 | */ | ||
774 | if (ioc_batching(q, ioc)) | ||
775 | ioc->nr_batch_requests--; | ||
776 | |||
777 | rq_init(q, rq); | ||
778 | |||
779 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | ||
780 | out: | ||
781 | return rq; | ||
782 | } | ||
783 | |||
784 | /* | ||
785 | * No available requests for this queue, unplug the device and wait for some | ||
786 | * requests to become available. | ||
787 | * | ||
788 | * Called with q->queue_lock held, and returns with it unlocked. | ||
789 | */ | ||
790 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | ||
791 | struct bio *bio) | ||
792 | { | ||
793 | const int rw = rw_flags & 0x01; | ||
794 | struct request *rq; | ||
795 | |||
796 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | ||
797 | while (!rq) { | ||
798 | DEFINE_WAIT(wait); | ||
799 | struct request_list *rl = &q->rq; | ||
800 | |||
801 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | ||
802 | TASK_UNINTERRUPTIBLE); | ||
803 | |||
804 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | ||
805 | |||
806 | if (!rq) { | ||
807 | struct io_context *ioc; | ||
808 | |||
809 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | ||
810 | |||
811 | __generic_unplug_device(q); | ||
812 | spin_unlock_irq(q->queue_lock); | ||
813 | io_schedule(); | ||
814 | |||
815 | /* | ||
816 | * After sleeping, we become a "batching" process and | ||
817 | * will be able to allocate at least one request, and | ||
818 | * up to a big batch of them for a small period time. | ||
819 | * See ioc_batching, ioc_set_batching | ||
820 | */ | ||
821 | ioc = current_io_context(GFP_NOIO, q->node); | ||
822 | ioc_set_batching(q, ioc); | ||
823 | |||
824 | spin_lock_irq(q->queue_lock); | ||
825 | } | ||
826 | finish_wait(&rl->wait[rw], &wait); | ||
827 | } | ||
828 | |||
829 | return rq; | ||
830 | } | ||
831 | |||
832 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | ||
833 | { | ||
834 | struct request *rq; | ||
835 | |||
836 | BUG_ON(rw != READ && rw != WRITE); | ||
837 | |||
838 | spin_lock_irq(q->queue_lock); | ||
839 | if (gfp_mask & __GFP_WAIT) { | ||
840 | rq = get_request_wait(q, rw, NULL); | ||
841 | } else { | ||
842 | rq = get_request(q, rw, NULL, gfp_mask); | ||
843 | if (!rq) | ||
844 | spin_unlock_irq(q->queue_lock); | ||
845 | } | ||
846 | /* q->queue_lock is unlocked at this point */ | ||
847 | |||
848 | return rq; | ||
849 | } | ||
850 | EXPORT_SYMBOL(blk_get_request); | ||
851 | |||
852 | /** | ||
853 | * blk_start_queueing - initiate dispatch of requests to device | ||
854 | * @q: request queue to kick into gear | ||
855 | * | ||
856 | * This is basically a helper to remove the need to know whether a queue | ||
857 | * is plugged or not if someone just wants to initiate dispatch of requests | ||
858 | * for this queue. | ||
859 | * | ||
860 | * The queue lock must be held with interrupts disabled. | ||
861 | */ | ||
862 | void blk_start_queueing(struct request_queue *q) | ||
863 | { | ||
864 | if (!blk_queue_plugged(q)) | ||
865 | q->request_fn(q); | ||
866 | else | ||
867 | __generic_unplug_device(q); | ||
868 | } | ||
869 | EXPORT_SYMBOL(blk_start_queueing); | ||
870 | |||
871 | /** | ||
872 | * blk_requeue_request - put a request back on queue | ||
873 | * @q: request queue where request should be inserted | ||
874 | * @rq: request to be inserted | ||
875 | * | ||
876 | * Description: | ||
877 | * Drivers often keep queueing requests until the hardware cannot accept | ||
878 | * more, when that condition happens we need to put the request back | ||
879 | * on the queue. Must be called with queue lock held. | ||
880 | */ | ||
881 | void blk_requeue_request(struct request_queue *q, struct request *rq) | ||
882 | { | ||
883 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | ||
884 | |||
885 | if (blk_rq_tagged(rq)) | ||
886 | blk_queue_end_tag(q, rq); | ||
887 | |||
888 | elv_requeue_request(q, rq); | ||
889 | } | ||
890 | EXPORT_SYMBOL(blk_requeue_request); | ||
891 | |||
892 | /** | ||
893 | * blk_insert_request - insert a special request in to a request queue | ||
894 | * @q: request queue where request should be inserted | ||
895 | * @rq: request to be inserted | ||
896 | * @at_head: insert request at head or tail of queue | ||
897 | * @data: private data | ||
898 | * | ||
899 | * Description: | ||
900 | * Many block devices need to execute commands asynchronously, so they don't | ||
901 | * block the whole kernel from preemption during request execution. This is | ||
902 | * accomplished normally by inserting aritficial requests tagged as | ||
903 | * REQ_SPECIAL in to the corresponding request queue, and letting them be | ||
904 | * scheduled for actual execution by the request queue. | ||
905 | * | ||
906 | * We have the option of inserting the head or the tail of the queue. | ||
907 | * Typically we use the tail for new ioctls and so forth. We use the head | ||
908 | * of the queue for things like a QUEUE_FULL message from a device, or a | ||
909 | * host that is unable to accept a particular command. | ||
910 | */ | ||
911 | void blk_insert_request(struct request_queue *q, struct request *rq, | ||
912 | int at_head, void *data) | ||
913 | { | ||
914 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
915 | unsigned long flags; | ||
916 | |||
917 | /* | ||
918 | * tell I/O scheduler that this isn't a regular read/write (ie it | ||
919 | * must not attempt merges on this) and that it acts as a soft | ||
920 | * barrier | ||
921 | */ | ||
922 | rq->cmd_type = REQ_TYPE_SPECIAL; | ||
923 | rq->cmd_flags |= REQ_SOFTBARRIER; | ||
924 | |||
925 | rq->special = data; | ||
926 | |||
927 | spin_lock_irqsave(q->queue_lock, flags); | ||
928 | |||
929 | /* | ||
930 | * If command is tagged, release the tag | ||
931 | */ | ||
932 | if (blk_rq_tagged(rq)) | ||
933 | blk_queue_end_tag(q, rq); | ||
934 | |||
935 | drive_stat_acct(rq, 1); | ||
936 | __elv_add_request(q, rq, where, 0); | ||
937 | blk_start_queueing(q); | ||
938 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
939 | } | ||
940 | EXPORT_SYMBOL(blk_insert_request); | ||
941 | |||
942 | /* | ||
943 | * add-request adds a request to the linked list. | ||
944 | * queue lock is held and interrupts disabled, as we muck with the | ||
945 | * request queue list. | ||
946 | */ | ||
947 | static inline void add_request(struct request_queue *q, struct request *req) | ||
948 | { | ||
949 | drive_stat_acct(req, 1); | ||
950 | |||
951 | /* | ||
952 | * elevator indicated where it wants this request to be | ||
953 | * inserted at elevator_merge time | ||
954 | */ | ||
955 | __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); | ||
956 | } | ||
957 | |||
958 | /* | ||
959 | * disk_round_stats() - Round off the performance stats on a struct | ||
960 | * disk_stats. | ||
961 | * | ||
962 | * The average IO queue length and utilisation statistics are maintained | ||
963 | * by observing the current state of the queue length and the amount of | ||
964 | * time it has been in this state for. | ||
965 | * | ||
966 | * Normally, that accounting is done on IO completion, but that can result | ||
967 | * in more than a second's worth of IO being accounted for within any one | ||
968 | * second, leading to >100% utilisation. To deal with that, we call this | ||
969 | * function to do a round-off before returning the results when reading | ||
970 | * /proc/diskstats. This accounts immediately for all queue usage up to | ||
971 | * the current jiffies and restarts the counters again. | ||
972 | */ | ||
973 | void disk_round_stats(struct gendisk *disk) | ||
974 | { | ||
975 | unsigned long now = jiffies; | ||
976 | |||
977 | if (now == disk->stamp) | ||
978 | return; | ||
979 | |||
980 | if (disk->in_flight) { | ||
981 | __disk_stat_add(disk, time_in_queue, | ||
982 | disk->in_flight * (now - disk->stamp)); | ||
983 | __disk_stat_add(disk, io_ticks, (now - disk->stamp)); | ||
984 | } | ||
985 | disk->stamp = now; | ||
986 | } | ||
987 | EXPORT_SYMBOL_GPL(disk_round_stats); | ||
988 | |||
989 | /* | ||
990 | * queue lock must be held | ||
991 | */ | ||
992 | void __blk_put_request(struct request_queue *q, struct request *req) | ||
993 | { | ||
994 | if (unlikely(!q)) | ||
995 | return; | ||
996 | if (unlikely(--req->ref_count)) | ||
997 | return; | ||
998 | |||
999 | elv_completed_request(q, req); | ||
1000 | |||
1001 | /* | ||
1002 | * Request may not have originated from ll_rw_blk. if not, | ||
1003 | * it didn't come out of our reserved rq pools | ||
1004 | */ | ||
1005 | if (req->cmd_flags & REQ_ALLOCED) { | ||
1006 | int rw = rq_data_dir(req); | ||
1007 | int priv = req->cmd_flags & REQ_ELVPRIV; | ||
1008 | |||
1009 | BUG_ON(!list_empty(&req->queuelist)); | ||
1010 | BUG_ON(!hlist_unhashed(&req->hash)); | ||
1011 | |||
1012 | blk_free_request(q, req); | ||
1013 | freed_request(q, rw, priv); | ||
1014 | } | ||
1015 | } | ||
1016 | EXPORT_SYMBOL_GPL(__blk_put_request); | ||
1017 | |||
1018 | void blk_put_request(struct request *req) | ||
1019 | { | ||
1020 | unsigned long flags; | ||
1021 | struct request_queue *q = req->q; | ||
1022 | |||
1023 | /* | ||
1024 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the | ||
1025 | * following if (q) test. | ||
1026 | */ | ||
1027 | if (q) { | ||
1028 | spin_lock_irqsave(q->queue_lock, flags); | ||
1029 | __blk_put_request(q, req); | ||
1030 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1031 | } | ||
1032 | } | ||
1033 | EXPORT_SYMBOL(blk_put_request); | ||
1034 | |||
1035 | void init_request_from_bio(struct request *req, struct bio *bio) | ||
1036 | { | ||
1037 | req->cmd_type = REQ_TYPE_FS; | ||
1038 | |||
1039 | /* | ||
1040 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) | ||
1041 | */ | ||
1042 | if (bio_rw_ahead(bio) || bio_failfast(bio)) | ||
1043 | req->cmd_flags |= REQ_FAILFAST; | ||
1044 | |||
1045 | /* | ||
1046 | * REQ_BARRIER implies no merging, but lets make it explicit | ||
1047 | */ | ||
1048 | if (unlikely(bio_barrier(bio))) | ||
1049 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | ||
1050 | |||
1051 | if (bio_sync(bio)) | ||
1052 | req->cmd_flags |= REQ_RW_SYNC; | ||
1053 | if (bio_rw_meta(bio)) | ||
1054 | req->cmd_flags |= REQ_RW_META; | ||
1055 | |||
1056 | req->errors = 0; | ||
1057 | req->hard_sector = req->sector = bio->bi_sector; | ||
1058 | req->ioprio = bio_prio(bio); | ||
1059 | req->start_time = jiffies; | ||
1060 | blk_rq_bio_prep(req->q, req, bio); | ||
1061 | } | ||
1062 | |||
1063 | static int __make_request(struct request_queue *q, struct bio *bio) | ||
1064 | { | ||
1065 | struct request *req; | ||
1066 | int el_ret, nr_sectors, barrier, err; | ||
1067 | const unsigned short prio = bio_prio(bio); | ||
1068 | const int sync = bio_sync(bio); | ||
1069 | int rw_flags; | ||
1070 | |||
1071 | nr_sectors = bio_sectors(bio); | ||
1072 | |||
1073 | /* | ||
1074 | * low level driver can indicate that it wants pages above a | ||
1075 | * certain limit bounced to low memory (ie for highmem, or even | ||
1076 | * ISA dma in theory) | ||
1077 | */ | ||
1078 | blk_queue_bounce(q, &bio); | ||
1079 | |||
1080 | barrier = bio_barrier(bio); | ||
1081 | if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { | ||
1082 | err = -EOPNOTSUPP; | ||
1083 | goto end_io; | ||
1084 | } | ||
1085 | |||
1086 | spin_lock_irq(q->queue_lock); | ||
1087 | |||
1088 | if (unlikely(barrier) || elv_queue_empty(q)) | ||
1089 | goto get_rq; | ||
1090 | |||
1091 | el_ret = elv_merge(q, &req, bio); | ||
1092 | switch (el_ret) { | ||
1093 | case ELEVATOR_BACK_MERGE: | ||
1094 | BUG_ON(!rq_mergeable(req)); | ||
1095 | |||
1096 | if (!ll_back_merge_fn(q, req, bio)) | ||
1097 | break; | ||
1098 | |||
1099 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | ||
1100 | |||
1101 | req->biotail->bi_next = bio; | ||
1102 | req->biotail = bio; | ||
1103 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | ||
1104 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
1105 | drive_stat_acct(req, 0); | ||
1106 | if (!attempt_back_merge(q, req)) | ||
1107 | elv_merged_request(q, req, el_ret); | ||
1108 | goto out; | ||
1109 | |||
1110 | case ELEVATOR_FRONT_MERGE: | ||
1111 | BUG_ON(!rq_mergeable(req)); | ||
1112 | |||
1113 | if (!ll_front_merge_fn(q, req, bio)) | ||
1114 | break; | ||
1115 | |||
1116 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | ||
1117 | |||
1118 | bio->bi_next = req->bio; | ||
1119 | req->bio = bio; | ||
1120 | |||
1121 | /* | ||
1122 | * may not be valid. if the low level driver said | ||
1123 | * it didn't need a bounce buffer then it better | ||
1124 | * not touch req->buffer either... | ||
1125 | */ | ||
1126 | req->buffer = bio_data(bio); | ||
1127 | req->current_nr_sectors = bio_cur_sectors(bio); | ||
1128 | req->hard_cur_sectors = req->current_nr_sectors; | ||
1129 | req->sector = req->hard_sector = bio->bi_sector; | ||
1130 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | ||
1131 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
1132 | drive_stat_acct(req, 0); | ||
1133 | if (!attempt_front_merge(q, req)) | ||
1134 | elv_merged_request(q, req, el_ret); | ||
1135 | goto out; | ||
1136 | |||
1137 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ | ||
1138 | default: | ||
1139 | ; | ||
1140 | } | ||
1141 | |||
1142 | get_rq: | ||
1143 | /* | ||
1144 | * This sync check and mask will be re-done in init_request_from_bio(), | ||
1145 | * but we need to set it earlier to expose the sync flag to the | ||
1146 | * rq allocator and io schedulers. | ||
1147 | */ | ||
1148 | rw_flags = bio_data_dir(bio); | ||
1149 | if (sync) | ||
1150 | rw_flags |= REQ_RW_SYNC; | ||
1151 | |||
1152 | /* | ||
1153 | * Grab a free request. This is might sleep but can not fail. | ||
1154 | * Returns with the queue unlocked. | ||
1155 | */ | ||
1156 | req = get_request_wait(q, rw_flags, bio); | ||
1157 | |||
1158 | /* | ||
1159 | * After dropping the lock and possibly sleeping here, our request | ||
1160 | * may now be mergeable after it had proven unmergeable (above). | ||
1161 | * We don't worry about that case for efficiency. It won't happen | ||
1162 | * often, and the elevators are able to handle it. | ||
1163 | */ | ||
1164 | init_request_from_bio(req, bio); | ||
1165 | |||
1166 | spin_lock_irq(q->queue_lock); | ||
1167 | if (elv_queue_empty(q)) | ||
1168 | blk_plug_device(q); | ||
1169 | add_request(q, req); | ||
1170 | out: | ||
1171 | if (sync) | ||
1172 | __generic_unplug_device(q); | ||
1173 | |||
1174 | spin_unlock_irq(q->queue_lock); | ||
1175 | return 0; | ||
1176 | |||
1177 | end_io: | ||
1178 | bio_endio(bio, err); | ||
1179 | return 0; | ||
1180 | } | ||
1181 | |||
1182 | /* | ||
1183 | * If bio->bi_dev is a partition, remap the location | ||
1184 | */ | ||
1185 | static inline void blk_partition_remap(struct bio *bio) | ||
1186 | { | ||
1187 | struct block_device *bdev = bio->bi_bdev; | ||
1188 | |||
1189 | if (bio_sectors(bio) && bdev != bdev->bd_contains) { | ||
1190 | struct hd_struct *p = bdev->bd_part; | ||
1191 | const int rw = bio_data_dir(bio); | ||
1192 | |||
1193 | p->sectors[rw] += bio_sectors(bio); | ||
1194 | p->ios[rw]++; | ||
1195 | |||
1196 | bio->bi_sector += p->start_sect; | ||
1197 | bio->bi_bdev = bdev->bd_contains; | ||
1198 | |||
1199 | blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio, | ||
1200 | bdev->bd_dev, bio->bi_sector, | ||
1201 | bio->bi_sector - p->start_sect); | ||
1202 | } | ||
1203 | } | ||
1204 | |||
1205 | static void handle_bad_sector(struct bio *bio) | ||
1206 | { | ||
1207 | char b[BDEVNAME_SIZE]; | ||
1208 | |||
1209 | printk(KERN_INFO "attempt to access beyond end of device\n"); | ||
1210 | printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", | ||
1211 | bdevname(bio->bi_bdev, b), | ||
1212 | bio->bi_rw, | ||
1213 | (unsigned long long)bio->bi_sector + bio_sectors(bio), | ||
1214 | (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); | ||
1215 | |||
1216 | set_bit(BIO_EOF, &bio->bi_flags); | ||
1217 | } | ||
1218 | |||
1219 | #ifdef CONFIG_FAIL_MAKE_REQUEST | ||
1220 | |||
1221 | static DECLARE_FAULT_ATTR(fail_make_request); | ||
1222 | |||
1223 | static int __init setup_fail_make_request(char *str) | ||
1224 | { | ||
1225 | return setup_fault_attr(&fail_make_request, str); | ||
1226 | } | ||
1227 | __setup("fail_make_request=", setup_fail_make_request); | ||
1228 | |||
1229 | static int should_fail_request(struct bio *bio) | ||
1230 | { | ||
1231 | if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) || | ||
1232 | (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail)) | ||
1233 | return should_fail(&fail_make_request, bio->bi_size); | ||
1234 | |||
1235 | return 0; | ||
1236 | } | ||
1237 | |||
1238 | static int __init fail_make_request_debugfs(void) | ||
1239 | { | ||
1240 | return init_fault_attr_dentries(&fail_make_request, | ||
1241 | "fail_make_request"); | ||
1242 | } | ||
1243 | |||
1244 | late_initcall(fail_make_request_debugfs); | ||
1245 | |||
1246 | #else /* CONFIG_FAIL_MAKE_REQUEST */ | ||
1247 | |||
1248 | static inline int should_fail_request(struct bio *bio) | ||
1249 | { | ||
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ | ||
1254 | |||
1255 | /* | ||
1256 | * Check whether this bio extends beyond the end of the device. | ||
1257 | */ | ||
1258 | static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | ||
1259 | { | ||
1260 | sector_t maxsector; | ||
1261 | |||
1262 | if (!nr_sectors) | ||
1263 | return 0; | ||
1264 | |||
1265 | /* Test device or partition size, when known. */ | ||
1266 | maxsector = bio->bi_bdev->bd_inode->i_size >> 9; | ||
1267 | if (maxsector) { | ||
1268 | sector_t sector = bio->bi_sector; | ||
1269 | |||
1270 | if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { | ||
1271 | /* | ||
1272 | * This may well happen - the kernel calls bread() | ||
1273 | * without checking the size of the device, e.g., when | ||
1274 | * mounting a device. | ||
1275 | */ | ||
1276 | handle_bad_sector(bio); | ||
1277 | return 1; | ||
1278 | } | ||
1279 | } | ||
1280 | |||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1284 | /** | ||
1285 | * generic_make_request: hand a buffer to its device driver for I/O | ||
1286 | * @bio: The bio describing the location in memory and on the device. | ||
1287 | * | ||
1288 | * generic_make_request() is used to make I/O requests of block | ||
1289 | * devices. It is passed a &struct bio, which describes the I/O that needs | ||
1290 | * to be done. | ||
1291 | * | ||
1292 | * generic_make_request() does not return any status. The | ||
1293 | * success/failure status of the request, along with notification of | ||
1294 | * completion, is delivered asynchronously through the bio->bi_end_io | ||
1295 | * function described (one day) else where. | ||
1296 | * | ||
1297 | * The caller of generic_make_request must make sure that bi_io_vec | ||
1298 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | ||
1299 | * set to describe the device address, and the | ||
1300 | * bi_end_io and optionally bi_private are set to describe how | ||
1301 | * completion notification should be signaled. | ||
1302 | * | ||
1303 | * generic_make_request and the drivers it calls may use bi_next if this | ||
1304 | * bio happens to be merged with someone else, and may change bi_dev and | ||
1305 | * bi_sector for remaps as it sees fit. So the values of these fields | ||
1306 | * should NOT be depended on after the call to generic_make_request. | ||
1307 | */ | ||
1308 | static inline void __generic_make_request(struct bio *bio) | ||
1309 | { | ||
1310 | struct request_queue *q; | ||
1311 | sector_t old_sector; | ||
1312 | int ret, nr_sectors = bio_sectors(bio); | ||
1313 | dev_t old_dev; | ||
1314 | int err = -EIO; | ||
1315 | |||
1316 | might_sleep(); | ||
1317 | |||
1318 | if (bio_check_eod(bio, nr_sectors)) | ||
1319 | goto end_io; | ||
1320 | |||
1321 | /* | ||
1322 | * Resolve the mapping until finished. (drivers are | ||
1323 | * still free to implement/resolve their own stacking | ||
1324 | * by explicitly returning 0) | ||
1325 | * | ||
1326 | * NOTE: we don't repeat the blk_size check for each new device. | ||
1327 | * Stacking drivers are expected to know what they are doing. | ||
1328 | */ | ||
1329 | old_sector = -1; | ||
1330 | old_dev = 0; | ||
1331 | do { | ||
1332 | char b[BDEVNAME_SIZE]; | ||
1333 | |||
1334 | q = bdev_get_queue(bio->bi_bdev); | ||
1335 | if (!q) { | ||
1336 | printk(KERN_ERR | ||
1337 | "generic_make_request: Trying to access " | ||
1338 | "nonexistent block-device %s (%Lu)\n", | ||
1339 | bdevname(bio->bi_bdev, b), | ||
1340 | (long long) bio->bi_sector); | ||
1341 | end_io: | ||
1342 | bio_endio(bio, err); | ||
1343 | break; | ||
1344 | } | ||
1345 | |||
1346 | if (unlikely(nr_sectors > q->max_hw_sectors)) { | ||
1347 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | ||
1348 | bdevname(bio->bi_bdev, b), | ||
1349 | bio_sectors(bio), | ||
1350 | q->max_hw_sectors); | ||
1351 | goto end_io; | ||
1352 | } | ||
1353 | |||
1354 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
1355 | goto end_io; | ||
1356 | |||
1357 | if (should_fail_request(bio)) | ||
1358 | goto end_io; | ||
1359 | |||
1360 | /* | ||
1361 | * If this device has partitions, remap block n | ||
1362 | * of partition p to block n+start(p) of the disk. | ||
1363 | */ | ||
1364 | blk_partition_remap(bio); | ||
1365 | |||
1366 | if (old_sector != -1) | ||
1367 | blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, | ||
1368 | old_sector); | ||
1369 | |||
1370 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | ||
1371 | |||
1372 | old_sector = bio->bi_sector; | ||
1373 | old_dev = bio->bi_bdev->bd_dev; | ||
1374 | |||
1375 | if (bio_check_eod(bio, nr_sectors)) | ||
1376 | goto end_io; | ||
1377 | if (bio_empty_barrier(bio) && !q->prepare_flush_fn) { | ||
1378 | err = -EOPNOTSUPP; | ||
1379 | goto end_io; | ||
1380 | } | ||
1381 | |||
1382 | ret = q->make_request_fn(q, bio); | ||
1383 | } while (ret); | ||
1384 | } | ||
1385 | |||
1386 | /* | ||
1387 | * We only want one ->make_request_fn to be active at a time, | ||
1388 | * else stack usage with stacked devices could be a problem. | ||
1389 | * So use current->bio_{list,tail} to keep a list of requests | ||
1390 | * submited by a make_request_fn function. | ||
1391 | * current->bio_tail is also used as a flag to say if | ||
1392 | * generic_make_request is currently active in this task or not. | ||
1393 | * If it is NULL, then no make_request is active. If it is non-NULL, | ||
1394 | * then a make_request is active, and new requests should be added | ||
1395 | * at the tail | ||
1396 | */ | ||
1397 | void generic_make_request(struct bio *bio) | ||
1398 | { | ||
1399 | if (current->bio_tail) { | ||
1400 | /* make_request is active */ | ||
1401 | *(current->bio_tail) = bio; | ||
1402 | bio->bi_next = NULL; | ||
1403 | current->bio_tail = &bio->bi_next; | ||
1404 | return; | ||
1405 | } | ||
1406 | /* following loop may be a bit non-obvious, and so deserves some | ||
1407 | * explanation. | ||
1408 | * Before entering the loop, bio->bi_next is NULL (as all callers | ||
1409 | * ensure that) so we have a list with a single bio. | ||
1410 | * We pretend that we have just taken it off a longer list, so | ||
1411 | * we assign bio_list to the next (which is NULL) and bio_tail | ||
1412 | * to &bio_list, thus initialising the bio_list of new bios to be | ||
1413 | * added. __generic_make_request may indeed add some more bios | ||
1414 | * through a recursive call to generic_make_request. If it | ||
1415 | * did, we find a non-NULL value in bio_list and re-enter the loop | ||
1416 | * from the top. In this case we really did just take the bio | ||
1417 | * of the top of the list (no pretending) and so fixup bio_list and | ||
1418 | * bio_tail or bi_next, and call into __generic_make_request again. | ||
1419 | * | ||
1420 | * The loop was structured like this to make only one call to | ||
1421 | * __generic_make_request (which is important as it is large and | ||
1422 | * inlined) and to keep the structure simple. | ||
1423 | */ | ||
1424 | BUG_ON(bio->bi_next); | ||
1425 | do { | ||
1426 | current->bio_list = bio->bi_next; | ||
1427 | if (bio->bi_next == NULL) | ||
1428 | current->bio_tail = ¤t->bio_list; | ||
1429 | else | ||
1430 | bio->bi_next = NULL; | ||
1431 | __generic_make_request(bio); | ||
1432 | bio = current->bio_list; | ||
1433 | } while (bio); | ||
1434 | current->bio_tail = NULL; /* deactivate */ | ||
1435 | } | ||
1436 | EXPORT_SYMBOL(generic_make_request); | ||
1437 | |||
1438 | /** | ||
1439 | * submit_bio: submit a bio to the block device layer for I/O | ||
1440 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | ||
1441 | * @bio: The &struct bio which describes the I/O | ||
1442 | * | ||
1443 | * submit_bio() is very similar in purpose to generic_make_request(), and | ||
1444 | * uses that function to do most of the work. Both are fairly rough | ||
1445 | * interfaces, @bio must be presetup and ready for I/O. | ||
1446 | * | ||
1447 | */ | ||
1448 | void submit_bio(int rw, struct bio *bio) | ||
1449 | { | ||
1450 | int count = bio_sectors(bio); | ||
1451 | |||
1452 | bio->bi_rw |= rw; | ||
1453 | |||
1454 | /* | ||
1455 | * If it's a regular read/write or a barrier with data attached, | ||
1456 | * go through the normal accounting stuff before submission. | ||
1457 | */ | ||
1458 | if (!bio_empty_barrier(bio)) { | ||
1459 | |||
1460 | BIO_BUG_ON(!bio->bi_size); | ||
1461 | BIO_BUG_ON(!bio->bi_io_vec); | ||
1462 | |||
1463 | if (rw & WRITE) { | ||
1464 | count_vm_events(PGPGOUT, count); | ||
1465 | } else { | ||
1466 | task_io_account_read(bio->bi_size); | ||
1467 | count_vm_events(PGPGIN, count); | ||
1468 | } | ||
1469 | |||
1470 | if (unlikely(block_dump)) { | ||
1471 | char b[BDEVNAME_SIZE]; | ||
1472 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", | ||
1473 | current->comm, task_pid_nr(current), | ||
1474 | (rw & WRITE) ? "WRITE" : "READ", | ||
1475 | (unsigned long long)bio->bi_sector, | ||
1476 | bdevname(bio->bi_bdev, b)); | ||
1477 | } | ||
1478 | } | ||
1479 | |||
1480 | generic_make_request(bio); | ||
1481 | } | ||
1482 | EXPORT_SYMBOL(submit_bio); | ||
1483 | |||
1484 | /** | ||
1485 | * __end_that_request_first - end I/O on a request | ||
1486 | * @req: the request being processed | ||
1487 | * @error: 0 for success, < 0 for error | ||
1488 | * @nr_bytes: number of bytes to complete | ||
1489 | * | ||
1490 | * Description: | ||
1491 | * Ends I/O on a number of bytes attached to @req, and sets it up | ||
1492 | * for the next range of segments (if any) in the cluster. | ||
1493 | * | ||
1494 | * Return: | ||
1495 | * 0 - we are done with this request, call end_that_request_last() | ||
1496 | * 1 - still buffers pending for this request | ||
1497 | **/ | ||
1498 | static int __end_that_request_first(struct request *req, int error, | ||
1499 | int nr_bytes) | ||
1500 | { | ||
1501 | int total_bytes, bio_nbytes, next_idx = 0; | ||
1502 | struct bio *bio; | ||
1503 | |||
1504 | blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); | ||
1505 | |||
1506 | /* | ||
1507 | * for a REQ_BLOCK_PC request, we want to carry any eventual | ||
1508 | * sense key with us all the way through | ||
1509 | */ | ||
1510 | if (!blk_pc_request(req)) | ||
1511 | req->errors = 0; | ||
1512 | |||
1513 | if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { | ||
1514 | printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", | ||
1515 | req->rq_disk ? req->rq_disk->disk_name : "?", | ||
1516 | (unsigned long long)req->sector); | ||
1517 | } | ||
1518 | |||
1519 | if (blk_fs_request(req) && req->rq_disk) { | ||
1520 | const int rw = rq_data_dir(req); | ||
1521 | |||
1522 | disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); | ||
1523 | } | ||
1524 | |||
1525 | total_bytes = bio_nbytes = 0; | ||
1526 | while ((bio = req->bio) != NULL) { | ||
1527 | int nbytes; | ||
1528 | |||
1529 | /* | ||
1530 | * For an empty barrier request, the low level driver must | ||
1531 | * store a potential error location in ->sector. We pass | ||
1532 | * that back up in ->bi_sector. | ||
1533 | */ | ||
1534 | if (blk_empty_barrier(req)) | ||
1535 | bio->bi_sector = req->sector; | ||
1536 | |||
1537 | if (nr_bytes >= bio->bi_size) { | ||
1538 | req->bio = bio->bi_next; | ||
1539 | nbytes = bio->bi_size; | ||
1540 | req_bio_endio(req, bio, nbytes, error); | ||
1541 | next_idx = 0; | ||
1542 | bio_nbytes = 0; | ||
1543 | } else { | ||
1544 | int idx = bio->bi_idx + next_idx; | ||
1545 | |||
1546 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | ||
1547 | blk_dump_rq_flags(req, "__end_that"); | ||
1548 | printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", | ||
1549 | __FUNCTION__, bio->bi_idx, | ||
1550 | bio->bi_vcnt); | ||
1551 | break; | ||
1552 | } | ||
1553 | |||
1554 | nbytes = bio_iovec_idx(bio, idx)->bv_len; | ||
1555 | BIO_BUG_ON(nbytes > bio->bi_size); | ||
1556 | |||
1557 | /* | ||
1558 | * not a complete bvec done | ||
1559 | */ | ||
1560 | if (unlikely(nbytes > nr_bytes)) { | ||
1561 | bio_nbytes += nr_bytes; | ||
1562 | total_bytes += nr_bytes; | ||
1563 | break; | ||
1564 | } | ||
1565 | |||
1566 | /* | ||
1567 | * advance to the next vector | ||
1568 | */ | ||
1569 | next_idx++; | ||
1570 | bio_nbytes += nbytes; | ||
1571 | } | ||
1572 | |||
1573 | total_bytes += nbytes; | ||
1574 | nr_bytes -= nbytes; | ||
1575 | |||
1576 | bio = req->bio; | ||
1577 | if (bio) { | ||
1578 | /* | ||
1579 | * end more in this run, or just return 'not-done' | ||
1580 | */ | ||
1581 | if (unlikely(nr_bytes <= 0)) | ||
1582 | break; | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | /* | ||
1587 | * completely done | ||
1588 | */ | ||
1589 | if (!req->bio) | ||
1590 | return 0; | ||
1591 | |||
1592 | /* | ||
1593 | * if the request wasn't completed, update state | ||
1594 | */ | ||
1595 | if (bio_nbytes) { | ||
1596 | req_bio_endio(req, bio, bio_nbytes, error); | ||
1597 | bio->bi_idx += next_idx; | ||
1598 | bio_iovec(bio)->bv_offset += nr_bytes; | ||
1599 | bio_iovec(bio)->bv_len -= nr_bytes; | ||
1600 | } | ||
1601 | |||
1602 | blk_recalc_rq_sectors(req, total_bytes >> 9); | ||
1603 | blk_recalc_rq_segments(req); | ||
1604 | return 1; | ||
1605 | } | ||
1606 | |||
1607 | /* | ||
1608 | * splice the completion data to a local structure and hand off to | ||
1609 | * process_completion_queue() to complete the requests | ||
1610 | */ | ||
1611 | static void blk_done_softirq(struct softirq_action *h) | ||
1612 | { | ||
1613 | struct list_head *cpu_list, local_list; | ||
1614 | |||
1615 | local_irq_disable(); | ||
1616 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
1617 | list_replace_init(cpu_list, &local_list); | ||
1618 | local_irq_enable(); | ||
1619 | |||
1620 | while (!list_empty(&local_list)) { | ||
1621 | struct request *rq; | ||
1622 | |||
1623 | rq = list_entry(local_list.next, struct request, donelist); | ||
1624 | list_del_init(&rq->donelist); | ||
1625 | rq->q->softirq_done_fn(rq); | ||
1626 | } | ||
1627 | } | ||
1628 | |||
1629 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, | ||
1630 | unsigned long action, void *hcpu) | ||
1631 | { | ||
1632 | /* | ||
1633 | * If a CPU goes away, splice its entries to the current CPU | ||
1634 | * and trigger a run of the softirq | ||
1635 | */ | ||
1636 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
1637 | int cpu = (unsigned long) hcpu; | ||
1638 | |||
1639 | local_irq_disable(); | ||
1640 | list_splice_init(&per_cpu(blk_cpu_done, cpu), | ||
1641 | &__get_cpu_var(blk_cpu_done)); | ||
1642 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
1643 | local_irq_enable(); | ||
1644 | } | ||
1645 | |||
1646 | return NOTIFY_OK; | ||
1647 | } | ||
1648 | |||
1649 | |||
1650 | static struct notifier_block blk_cpu_notifier __cpuinitdata = { | ||
1651 | .notifier_call = blk_cpu_notify, | ||
1652 | }; | ||
1653 | |||
1654 | /** | ||
1655 | * blk_complete_request - end I/O on a request | ||
1656 | * @req: the request being processed | ||
1657 | * | ||
1658 | * Description: | ||
1659 | * Ends all I/O on a request. It does not handle partial completions, | ||
1660 | * unless the driver actually implements this in its completion callback | ||
1661 | * through requeueing. The actual completion happens out-of-order, | ||
1662 | * through a softirq handler. The user must have registered a completion | ||
1663 | * callback through blk_queue_softirq_done(). | ||
1664 | **/ | ||
1665 | |||
1666 | void blk_complete_request(struct request *req) | ||
1667 | { | ||
1668 | struct list_head *cpu_list; | ||
1669 | unsigned long flags; | ||
1670 | |||
1671 | BUG_ON(!req->q->softirq_done_fn); | ||
1672 | |||
1673 | local_irq_save(flags); | ||
1674 | |||
1675 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
1676 | list_add_tail(&req->donelist, cpu_list); | ||
1677 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
1678 | |||
1679 | local_irq_restore(flags); | ||
1680 | } | ||
1681 | EXPORT_SYMBOL(blk_complete_request); | ||
1682 | |||
1683 | /* | ||
1684 | * queue lock must be held | ||
1685 | */ | ||
1686 | static void end_that_request_last(struct request *req, int error) | ||
1687 | { | ||
1688 | struct gendisk *disk = req->rq_disk; | ||
1689 | |||
1690 | if (blk_rq_tagged(req)) | ||
1691 | blk_queue_end_tag(req->q, req); | ||
1692 | |||
1693 | if (blk_queued_rq(req)) | ||
1694 | blkdev_dequeue_request(req); | ||
1695 | |||
1696 | if (unlikely(laptop_mode) && blk_fs_request(req)) | ||
1697 | laptop_io_completion(); | ||
1698 | |||
1699 | /* | ||
1700 | * Account IO completion. bar_rq isn't accounted as a normal | ||
1701 | * IO on queueing nor completion. Accounting the containing | ||
1702 | * request is enough. | ||
1703 | */ | ||
1704 | if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { | ||
1705 | unsigned long duration = jiffies - req->start_time; | ||
1706 | const int rw = rq_data_dir(req); | ||
1707 | |||
1708 | __disk_stat_inc(disk, ios[rw]); | ||
1709 | __disk_stat_add(disk, ticks[rw], duration); | ||
1710 | disk_round_stats(disk); | ||
1711 | disk->in_flight--; | ||
1712 | } | ||
1713 | |||
1714 | if (req->end_io) | ||
1715 | req->end_io(req, error); | ||
1716 | else { | ||
1717 | if (blk_bidi_rq(req)) | ||
1718 | __blk_put_request(req->next_rq->q, req->next_rq); | ||
1719 | |||
1720 | __blk_put_request(req->q, req); | ||
1721 | } | ||
1722 | } | ||
1723 | |||
1724 | static inline void __end_request(struct request *rq, int uptodate, | ||
1725 | unsigned int nr_bytes) | ||
1726 | { | ||
1727 | int error = 0; | ||
1728 | |||
1729 | if (uptodate <= 0) | ||
1730 | error = uptodate ? uptodate : -EIO; | ||
1731 | |||
1732 | __blk_end_request(rq, error, nr_bytes); | ||
1733 | } | ||
1734 | |||
1735 | /** | ||
1736 | * blk_rq_bytes - Returns bytes left to complete in the entire request | ||
1737 | **/ | ||
1738 | unsigned int blk_rq_bytes(struct request *rq) | ||
1739 | { | ||
1740 | if (blk_fs_request(rq)) | ||
1741 | return rq->hard_nr_sectors << 9; | ||
1742 | |||
1743 | return rq->data_len; | ||
1744 | } | ||
1745 | EXPORT_SYMBOL_GPL(blk_rq_bytes); | ||
1746 | |||
1747 | /** | ||
1748 | * blk_rq_cur_bytes - Returns bytes left to complete in the current segment | ||
1749 | **/ | ||
1750 | unsigned int blk_rq_cur_bytes(struct request *rq) | ||
1751 | { | ||
1752 | if (blk_fs_request(rq)) | ||
1753 | return rq->current_nr_sectors << 9; | ||
1754 | |||
1755 | if (rq->bio) | ||
1756 | return rq->bio->bi_size; | ||
1757 | |||
1758 | return rq->data_len; | ||
1759 | } | ||
1760 | EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); | ||
1761 | |||
1762 | /** | ||
1763 | * end_queued_request - end all I/O on a queued request | ||
1764 | * @rq: the request being processed | ||
1765 | * @uptodate: error value or 0/1 uptodate flag | ||
1766 | * | ||
1767 | * Description: | ||
1768 | * Ends all I/O on a request, and removes it from the block layer queues. | ||
1769 | * Not suitable for normal IO completion, unless the driver still has | ||
1770 | * the request attached to the block layer. | ||
1771 | * | ||
1772 | **/ | ||
1773 | void end_queued_request(struct request *rq, int uptodate) | ||
1774 | { | ||
1775 | __end_request(rq, uptodate, blk_rq_bytes(rq)); | ||
1776 | } | ||
1777 | EXPORT_SYMBOL(end_queued_request); | ||
1778 | |||
1779 | /** | ||
1780 | * end_dequeued_request - end all I/O on a dequeued request | ||
1781 | * @rq: the request being processed | ||
1782 | * @uptodate: error value or 0/1 uptodate flag | ||
1783 | * | ||
1784 | * Description: | ||
1785 | * Ends all I/O on a request. The request must already have been | ||
1786 | * dequeued using blkdev_dequeue_request(), as is normally the case | ||
1787 | * for most drivers. | ||
1788 | * | ||
1789 | **/ | ||
1790 | void end_dequeued_request(struct request *rq, int uptodate) | ||
1791 | { | ||
1792 | __end_request(rq, uptodate, blk_rq_bytes(rq)); | ||
1793 | } | ||
1794 | EXPORT_SYMBOL(end_dequeued_request); | ||
1795 | |||
1796 | |||
1797 | /** | ||
1798 | * end_request - end I/O on the current segment of the request | ||
1799 | * @req: the request being processed | ||
1800 | * @uptodate: error value or 0/1 uptodate flag | ||
1801 | * | ||
1802 | * Description: | ||
1803 | * Ends I/O on the current segment of a request. If that is the only | ||
1804 | * remaining segment, the request is also completed and freed. | ||
1805 | * | ||
1806 | * This is a remnant of how older block drivers handled IO completions. | ||
1807 | * Modern drivers typically end IO on the full request in one go, unless | ||
1808 | * they have a residual value to account for. For that case this function | ||
1809 | * isn't really useful, unless the residual just happens to be the | ||
1810 | * full current segment. In other words, don't use this function in new | ||
1811 | * code. Either use end_request_completely(), or the | ||
1812 | * end_that_request_chunk() (along with end_that_request_last()) for | ||
1813 | * partial completions. | ||
1814 | * | ||
1815 | **/ | ||
1816 | void end_request(struct request *req, int uptodate) | ||
1817 | { | ||
1818 | __end_request(req, uptodate, req->hard_cur_sectors << 9); | ||
1819 | } | ||
1820 | EXPORT_SYMBOL(end_request); | ||
1821 | |||
1822 | /** | ||
1823 | * blk_end_io - Generic end_io function to complete a request. | ||
1824 | * @rq: the request being processed | ||
1825 | * @error: 0 for success, < 0 for error | ||
1826 | * @nr_bytes: number of bytes to complete @rq | ||
1827 | * @bidi_bytes: number of bytes to complete @rq->next_rq | ||
1828 | * @drv_callback: function called between completion of bios in the request | ||
1829 | * and completion of the request. | ||
1830 | * If the callback returns non 0, this helper returns without | ||
1831 | * completion of the request. | ||
1832 | * | ||
1833 | * Description: | ||
1834 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | ||
1835 | * If @rq has leftover, sets it up for the next range of segments. | ||
1836 | * | ||
1837 | * Return: | ||
1838 | * 0 - we are done with this request | ||
1839 | * 1 - this request is not freed yet, it still has pending buffers. | ||
1840 | **/ | ||
1841 | static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, | ||
1842 | unsigned int bidi_bytes, | ||
1843 | int (drv_callback)(struct request *)) | ||
1844 | { | ||
1845 | struct request_queue *q = rq->q; | ||
1846 | unsigned long flags = 0UL; | ||
1847 | |||
1848 | if (blk_fs_request(rq) || blk_pc_request(rq)) { | ||
1849 | if (__end_that_request_first(rq, error, nr_bytes)) | ||
1850 | return 1; | ||
1851 | |||
1852 | /* Bidi request must be completed as a whole */ | ||
1853 | if (blk_bidi_rq(rq) && | ||
1854 | __end_that_request_first(rq->next_rq, error, bidi_bytes)) | ||
1855 | return 1; | ||
1856 | } | ||
1857 | |||
1858 | /* Special feature for tricky drivers */ | ||
1859 | if (drv_callback && drv_callback(rq)) | ||
1860 | return 1; | ||
1861 | |||
1862 | add_disk_randomness(rq->rq_disk); | ||
1863 | |||
1864 | spin_lock_irqsave(q->queue_lock, flags); | ||
1865 | end_that_request_last(rq, error); | ||
1866 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1867 | |||
1868 | return 0; | ||
1869 | } | ||
1870 | |||
1871 | /** | ||
1872 | * blk_end_request - Helper function for drivers to complete the request. | ||
1873 | * @rq: the request being processed | ||
1874 | * @error: 0 for success, < 0 for error | ||
1875 | * @nr_bytes: number of bytes to complete | ||
1876 | * | ||
1877 | * Description: | ||
1878 | * Ends I/O on a number of bytes attached to @rq. | ||
1879 | * If @rq has leftover, sets it up for the next range of segments. | ||
1880 | * | ||
1881 | * Return: | ||
1882 | * 0 - we are done with this request | ||
1883 | * 1 - still buffers pending for this request | ||
1884 | **/ | ||
1885 | int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | ||
1886 | { | ||
1887 | return blk_end_io(rq, error, nr_bytes, 0, NULL); | ||
1888 | } | ||
1889 | EXPORT_SYMBOL_GPL(blk_end_request); | ||
1890 | |||
1891 | /** | ||
1892 | * __blk_end_request - Helper function for drivers to complete the request. | ||
1893 | * @rq: the request being processed | ||
1894 | * @error: 0 for success, < 0 for error | ||
1895 | * @nr_bytes: number of bytes to complete | ||
1896 | * | ||
1897 | * Description: | ||
1898 | * Must be called with queue lock held unlike blk_end_request(). | ||
1899 | * | ||
1900 | * Return: | ||
1901 | * 0 - we are done with this request | ||
1902 | * 1 - still buffers pending for this request | ||
1903 | **/ | ||
1904 | int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | ||
1905 | { | ||
1906 | if (blk_fs_request(rq) || blk_pc_request(rq)) { | ||
1907 | if (__end_that_request_first(rq, error, nr_bytes)) | ||
1908 | return 1; | ||
1909 | } | ||
1910 | |||
1911 | add_disk_randomness(rq->rq_disk); | ||
1912 | |||
1913 | end_that_request_last(rq, error); | ||
1914 | |||
1915 | return 0; | ||
1916 | } | ||
1917 | EXPORT_SYMBOL_GPL(__blk_end_request); | ||
1918 | |||
1919 | /** | ||
1920 | * blk_end_bidi_request - Helper function for drivers to complete bidi request. | ||
1921 | * @rq: the bidi request being processed | ||
1922 | * @error: 0 for success, < 0 for error | ||
1923 | * @nr_bytes: number of bytes to complete @rq | ||
1924 | * @bidi_bytes: number of bytes to complete @rq->next_rq | ||
1925 | * | ||
1926 | * Description: | ||
1927 | * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | ||
1928 | * | ||
1929 | * Return: | ||
1930 | * 0 - we are done with this request | ||
1931 | * 1 - still buffers pending for this request | ||
1932 | **/ | ||
1933 | int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, | ||
1934 | unsigned int bidi_bytes) | ||
1935 | { | ||
1936 | return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); | ||
1937 | } | ||
1938 | EXPORT_SYMBOL_GPL(blk_end_bidi_request); | ||
1939 | |||
1940 | /** | ||
1941 | * blk_end_request_callback - Special helper function for tricky drivers | ||
1942 | * @rq: the request being processed | ||
1943 | * @error: 0 for success, < 0 for error | ||
1944 | * @nr_bytes: number of bytes to complete | ||
1945 | * @drv_callback: function called between completion of bios in the request | ||
1946 | * and completion of the request. | ||
1947 | * If the callback returns non 0, this helper returns without | ||
1948 | * completion of the request. | ||
1949 | * | ||
1950 | * Description: | ||
1951 | * Ends I/O on a number of bytes attached to @rq. | ||
1952 | * If @rq has leftover, sets it up for the next range of segments. | ||
1953 | * | ||
1954 | * This special helper function is used only for existing tricky drivers. | ||
1955 | * (e.g. cdrom_newpc_intr() of ide-cd) | ||
1956 | * This interface will be removed when such drivers are rewritten. | ||
1957 | * Don't use this interface in other places anymore. | ||
1958 | * | ||
1959 | * Return: | ||
1960 | * 0 - we are done with this request | ||
1961 | * 1 - this request is not freed yet. | ||
1962 | * this request still has pending buffers or | ||
1963 | * the driver doesn't want to finish this request yet. | ||
1964 | **/ | ||
1965 | int blk_end_request_callback(struct request *rq, int error, | ||
1966 | unsigned int nr_bytes, | ||
1967 | int (drv_callback)(struct request *)) | ||
1968 | { | ||
1969 | return blk_end_io(rq, error, nr_bytes, 0, drv_callback); | ||
1970 | } | ||
1971 | EXPORT_SYMBOL_GPL(blk_end_request_callback); | ||
1972 | |||
1973 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | ||
1974 | struct bio *bio) | ||
1975 | { | ||
1976 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ | ||
1977 | rq->cmd_flags |= (bio->bi_rw & 3); | ||
1978 | |||
1979 | rq->nr_phys_segments = bio_phys_segments(q, bio); | ||
1980 | rq->nr_hw_segments = bio_hw_segments(q, bio); | ||
1981 | rq->current_nr_sectors = bio_cur_sectors(bio); | ||
1982 | rq->hard_cur_sectors = rq->current_nr_sectors; | ||
1983 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | ||
1984 | rq->buffer = bio_data(bio); | ||
1985 | rq->data_len = bio->bi_size; | ||
1986 | |||
1987 | rq->bio = rq->biotail = bio; | ||
1988 | |||
1989 | if (bio->bi_bdev) | ||
1990 | rq->rq_disk = bio->bi_bdev->bd_disk; | ||
1991 | } | ||
1992 | |||
1993 | int kblockd_schedule_work(struct work_struct *work) | ||
1994 | { | ||
1995 | return queue_work(kblockd_workqueue, work); | ||
1996 | } | ||
1997 | EXPORT_SYMBOL(kblockd_schedule_work); | ||
1998 | |||
1999 | void kblockd_flush_work(struct work_struct *work) | ||
2000 | { | ||
2001 | cancel_work_sync(work); | ||
2002 | } | ||
2003 | EXPORT_SYMBOL(kblockd_flush_work); | ||
2004 | |||
2005 | int __init blk_dev_init(void) | ||
2006 | { | ||
2007 | int i; | ||
2008 | |||
2009 | kblockd_workqueue = create_workqueue("kblockd"); | ||
2010 | if (!kblockd_workqueue) | ||
2011 | panic("Failed to create kblockd\n"); | ||
2012 | |||
2013 | request_cachep = kmem_cache_create("blkdev_requests", | ||
2014 | sizeof(struct request), 0, SLAB_PANIC, NULL); | ||
2015 | |||
2016 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", | ||
2017 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | ||
2018 | |||
2019 | for_each_possible_cpu(i) | ||
2020 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | ||
2021 | |||
2022 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); | ||
2023 | register_hotcpu_notifier(&blk_cpu_notifier); | ||
2024 | |||
2025 | return 0; | ||
2026 | } | ||
2027 | |||
diff --git a/block/blk-exec.c b/block/blk-exec.c new file mode 100644 index 000000000000..391dd6224890 --- /dev/null +++ b/block/blk-exec.c | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Functions related to setting various queue properties from drivers | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | |||
9 | #include "blk.h" | ||
10 | |||
11 | /* | ||
12 | * for max sense size | ||
13 | */ | ||
14 | #include <scsi/scsi_cmnd.h> | ||
15 | |||
16 | /** | ||
17 | * blk_end_sync_rq - executes a completion event on a request | ||
18 | * @rq: request to complete | ||
19 | * @error: end io status of the request | ||
20 | */ | ||
21 | void blk_end_sync_rq(struct request *rq, int error) | ||
22 | { | ||
23 | struct completion *waiting = rq->end_io_data; | ||
24 | |||
25 | rq->end_io_data = NULL; | ||
26 | __blk_put_request(rq->q, rq); | ||
27 | |||
28 | /* | ||
29 | * complete last, if this is a stack request the process (and thus | ||
30 | * the rq pointer) could be invalid right after this complete() | ||
31 | */ | ||
32 | complete(waiting); | ||
33 | } | ||
34 | EXPORT_SYMBOL(blk_end_sync_rq); | ||
35 | |||
36 | /** | ||
37 | * blk_execute_rq_nowait - insert a request into queue for execution | ||
38 | * @q: queue to insert the request in | ||
39 | * @bd_disk: matching gendisk | ||
40 | * @rq: request to insert | ||
41 | * @at_head: insert request at head or tail of queue | ||
42 | * @done: I/O completion handler | ||
43 | * | ||
44 | * Description: | ||
45 | * Insert a fully prepared request at the back of the io scheduler queue | ||
46 | * for execution. Don't wait for completion. | ||
47 | */ | ||
48 | void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | ||
49 | struct request *rq, int at_head, | ||
50 | rq_end_io_fn *done) | ||
51 | { | ||
52 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
53 | |||
54 | rq->rq_disk = bd_disk; | ||
55 | rq->cmd_flags |= REQ_NOMERGE; | ||
56 | rq->end_io = done; | ||
57 | WARN_ON(irqs_disabled()); | ||
58 | spin_lock_irq(q->queue_lock); | ||
59 | __elv_add_request(q, rq, where, 1); | ||
60 | __generic_unplug_device(q); | ||
61 | spin_unlock_irq(q->queue_lock); | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); | ||
64 | |||
65 | /** | ||
66 | * blk_execute_rq - insert a request into queue for execution | ||
67 | * @q: queue to insert the request in | ||
68 | * @bd_disk: matching gendisk | ||
69 | * @rq: request to insert | ||
70 | * @at_head: insert request at head or tail of queue | ||
71 | * | ||
72 | * Description: | ||
73 | * Insert a fully prepared request at the back of the io scheduler queue | ||
74 | * for execution and wait for completion. | ||
75 | */ | ||
76 | int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, | ||
77 | struct request *rq, int at_head) | ||
78 | { | ||
79 | DECLARE_COMPLETION_ONSTACK(wait); | ||
80 | char sense[SCSI_SENSE_BUFFERSIZE]; | ||
81 | int err = 0; | ||
82 | |||
83 | /* | ||
84 | * we need an extra reference to the request, so we can look at | ||
85 | * it after io completion | ||
86 | */ | ||
87 | rq->ref_count++; | ||
88 | |||
89 | if (!rq->sense) { | ||
90 | memset(sense, 0, sizeof(sense)); | ||
91 | rq->sense = sense; | ||
92 | rq->sense_len = 0; | ||
93 | } | ||
94 | |||
95 | rq->end_io_data = &wait; | ||
96 | blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); | ||
97 | wait_for_completion(&wait); | ||
98 | |||
99 | if (rq->errors) | ||
100 | err = -EIO; | ||
101 | |||
102 | return err; | ||
103 | } | ||
104 | EXPORT_SYMBOL(blk_execute_rq); | ||
diff --git a/block/blk-ioc.c b/block/blk-ioc.c new file mode 100644 index 000000000000..80245dc30c75 --- /dev/null +++ b/block/blk-ioc.c | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * Functions related to io context handling | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/bio.h> | ||
8 | #include <linux/blkdev.h> | ||
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | ||
10 | |||
11 | #include "blk.h" | ||
12 | |||
13 | /* | ||
14 | * For io context allocations | ||
15 | */ | ||
16 | static struct kmem_cache *iocontext_cachep; | ||
17 | |||
18 | static void cfq_dtor(struct io_context *ioc) | ||
19 | { | ||
20 | struct cfq_io_context *cic[1]; | ||
21 | int r; | ||
22 | |||
23 | /* | ||
24 | * We don't have a specific key to lookup with, so use the gang | ||
25 | * lookup to just retrieve the first item stored. The cfq exit | ||
26 | * function will iterate the full tree, so any member will do. | ||
27 | */ | ||
28 | r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); | ||
29 | if (r > 0) | ||
30 | cic[0]->dtor(ioc); | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * IO Context helper functions. put_io_context() returns 1 if there are no | ||
35 | * more users of this io context, 0 otherwise. | ||
36 | */ | ||
37 | int put_io_context(struct io_context *ioc) | ||
38 | { | ||
39 | if (ioc == NULL) | ||
40 | return 1; | ||
41 | |||
42 | BUG_ON(atomic_read(&ioc->refcount) == 0); | ||
43 | |||
44 | if (atomic_dec_and_test(&ioc->refcount)) { | ||
45 | rcu_read_lock(); | ||
46 | if (ioc->aic && ioc->aic->dtor) | ||
47 | ioc->aic->dtor(ioc->aic); | ||
48 | rcu_read_unlock(); | ||
49 | cfq_dtor(ioc); | ||
50 | |||
51 | kmem_cache_free(iocontext_cachep, ioc); | ||
52 | return 1; | ||
53 | } | ||
54 | return 0; | ||
55 | } | ||
56 | EXPORT_SYMBOL(put_io_context); | ||
57 | |||
58 | static void cfq_exit(struct io_context *ioc) | ||
59 | { | ||
60 | struct cfq_io_context *cic[1]; | ||
61 | int r; | ||
62 | |||
63 | rcu_read_lock(); | ||
64 | /* | ||
65 | * See comment for cfq_dtor() | ||
66 | */ | ||
67 | r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); | ||
68 | rcu_read_unlock(); | ||
69 | |||
70 | if (r > 0) | ||
71 | cic[0]->exit(ioc); | ||
72 | } | ||
73 | |||
74 | /* Called by the exitting task */ | ||
75 | void exit_io_context(void) | ||
76 | { | ||
77 | struct io_context *ioc; | ||
78 | |||
79 | task_lock(current); | ||
80 | ioc = current->io_context; | ||
81 | current->io_context = NULL; | ||
82 | task_unlock(current); | ||
83 | |||
84 | if (atomic_dec_and_test(&ioc->nr_tasks)) { | ||
85 | if (ioc->aic && ioc->aic->exit) | ||
86 | ioc->aic->exit(ioc->aic); | ||
87 | cfq_exit(ioc); | ||
88 | |||
89 | put_io_context(ioc); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | ||
94 | { | ||
95 | struct io_context *ret; | ||
96 | |||
97 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | ||
98 | if (ret) { | ||
99 | atomic_set(&ret->refcount, 1); | ||
100 | atomic_set(&ret->nr_tasks, 1); | ||
101 | spin_lock_init(&ret->lock); | ||
102 | ret->ioprio_changed = 0; | ||
103 | ret->ioprio = 0; | ||
104 | ret->last_waited = jiffies; /* doesn't matter... */ | ||
105 | ret->nr_batch_requests = 0; /* because this is 0 */ | ||
106 | ret->aic = NULL; | ||
107 | INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); | ||
108 | ret->ioc_data = NULL; | ||
109 | } | ||
110 | |||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * If the current task has no IO context then create one and initialise it. | ||
116 | * Otherwise, return its existing IO context. | ||
117 | * | ||
118 | * This returned IO context doesn't have a specifically elevated refcount, | ||
119 | * but since the current task itself holds a reference, the context can be | ||
120 | * used in general code, so long as it stays within `current` context. | ||
121 | */ | ||
122 | struct io_context *current_io_context(gfp_t gfp_flags, int node) | ||
123 | { | ||
124 | struct task_struct *tsk = current; | ||
125 | struct io_context *ret; | ||
126 | |||
127 | ret = tsk->io_context; | ||
128 | if (likely(ret)) | ||
129 | return ret; | ||
130 | |||
131 | ret = alloc_io_context(gfp_flags, node); | ||
132 | if (ret) { | ||
133 | /* make sure set_task_ioprio() sees the settings above */ | ||
134 | smp_wmb(); | ||
135 | tsk->io_context = ret; | ||
136 | } | ||
137 | |||
138 | return ret; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * If the current task has no IO context then create one and initialise it. | ||
143 | * If it does have a context, take a ref on it. | ||
144 | * | ||
145 | * This is always called in the context of the task which submitted the I/O. | ||
146 | */ | ||
147 | struct io_context *get_io_context(gfp_t gfp_flags, int node) | ||
148 | { | ||
149 | struct io_context *ret = NULL; | ||
150 | |||
151 | /* | ||
152 | * Check for unlikely race with exiting task. ioc ref count is | ||
153 | * zero when ioc is being detached. | ||
154 | */ | ||
155 | do { | ||
156 | ret = current_io_context(gfp_flags, node); | ||
157 | if (unlikely(!ret)) | ||
158 | break; | ||
159 | } while (!atomic_inc_not_zero(&ret->refcount)); | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | EXPORT_SYMBOL(get_io_context); | ||
164 | |||
165 | void copy_io_context(struct io_context **pdst, struct io_context **psrc) | ||
166 | { | ||
167 | struct io_context *src = *psrc; | ||
168 | struct io_context *dst = *pdst; | ||
169 | |||
170 | if (src) { | ||
171 | BUG_ON(atomic_read(&src->refcount) == 0); | ||
172 | atomic_inc(&src->refcount); | ||
173 | put_io_context(dst); | ||
174 | *pdst = src; | ||
175 | } | ||
176 | } | ||
177 | EXPORT_SYMBOL(copy_io_context); | ||
178 | |||
179 | int __init blk_ioc_init(void) | ||
180 | { | ||
181 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | ||
182 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); | ||
183 | return 0; | ||
184 | } | ||
185 | subsys_initcall(blk_ioc_init); | ||
diff --git a/block/blk-map.c b/block/blk-map.c new file mode 100644 index 000000000000..955d75c1a58f --- /dev/null +++ b/block/blk-map.c | |||
@@ -0,0 +1,262 @@ | |||
1 | /* | ||
2 | * Functions related to mapping data to requests | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | |||
9 | #include "blk.h" | ||
10 | |||
11 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
12 | struct bio *bio) | ||
13 | { | ||
14 | if (!rq->bio) | ||
15 | blk_rq_bio_prep(q, rq, bio); | ||
16 | else if (!ll_back_merge_fn(q, rq, bio)) | ||
17 | return -EINVAL; | ||
18 | else { | ||
19 | rq->biotail->bi_next = bio; | ||
20 | rq->biotail = bio; | ||
21 | |||
22 | rq->data_len += bio->bi_size; | ||
23 | } | ||
24 | return 0; | ||
25 | } | ||
26 | EXPORT_SYMBOL(blk_rq_append_bio); | ||
27 | |||
28 | static int __blk_rq_unmap_user(struct bio *bio) | ||
29 | { | ||
30 | int ret = 0; | ||
31 | |||
32 | if (bio) { | ||
33 | if (bio_flagged(bio, BIO_USER_MAPPED)) | ||
34 | bio_unmap_user(bio); | ||
35 | else | ||
36 | ret = bio_uncopy_user(bio); | ||
37 | } | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
43 | void __user *ubuf, unsigned int len) | ||
44 | { | ||
45 | unsigned long uaddr; | ||
46 | struct bio *bio, *orig_bio; | ||
47 | int reading, ret; | ||
48 | |||
49 | reading = rq_data_dir(rq) == READ; | ||
50 | |||
51 | /* | ||
52 | * if alignment requirement is satisfied, map in user pages for | ||
53 | * direct dma. else, set up kernel bounce buffers | ||
54 | */ | ||
55 | uaddr = (unsigned long) ubuf; | ||
56 | if (!(uaddr & queue_dma_alignment(q)) && | ||
57 | !(len & queue_dma_alignment(q))) | ||
58 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
59 | else | ||
60 | bio = bio_copy_user(q, uaddr, len, reading); | ||
61 | |||
62 | if (IS_ERR(bio)) | ||
63 | return PTR_ERR(bio); | ||
64 | |||
65 | orig_bio = bio; | ||
66 | blk_queue_bounce(q, &bio); | ||
67 | |||
68 | /* | ||
69 | * We link the bounce buffer in and could have to traverse it | ||
70 | * later so we have to get a ref to prevent it from being freed | ||
71 | */ | ||
72 | bio_get(bio); | ||
73 | |||
74 | ret = blk_rq_append_bio(q, rq, bio); | ||
75 | if (!ret) | ||
76 | return bio->bi_size; | ||
77 | |||
78 | /* if it was boucned we must call the end io function */ | ||
79 | bio_endio(bio, 0); | ||
80 | __blk_rq_unmap_user(orig_bio); | ||
81 | bio_put(bio); | ||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage | ||
87 | * @q: request queue where request should be inserted | ||
88 | * @rq: request structure to fill | ||
89 | * @ubuf: the user buffer | ||
90 | * @len: length of user data | ||
91 | * | ||
92 | * Description: | ||
93 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
94 | * a kernel bounce buffer is used. | ||
95 | * | ||
96 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
97 | * still in process context. | ||
98 | * | ||
99 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
100 | * before being submitted to the device, as pages mapped may be out of | ||
101 | * reach. It's the callers responsibility to make sure this happens. The | ||
102 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
103 | * unmapping. | ||
104 | */ | ||
105 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
106 | void __user *ubuf, unsigned long len) | ||
107 | { | ||
108 | unsigned long bytes_read = 0; | ||
109 | struct bio *bio = NULL; | ||
110 | int ret; | ||
111 | |||
112 | if (len > (q->max_hw_sectors << 9)) | ||
113 | return -EINVAL; | ||
114 | if (!len || !ubuf) | ||
115 | return -EINVAL; | ||
116 | |||
117 | while (bytes_read != len) { | ||
118 | unsigned long map_len, end, start; | ||
119 | |||
120 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); | ||
121 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) | ||
122 | >> PAGE_SHIFT; | ||
123 | start = (unsigned long)ubuf >> PAGE_SHIFT; | ||
124 | |||
125 | /* | ||
126 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 | ||
127 | * pages. If this happens we just lower the requested | ||
128 | * mapping len by a page so that we can fit | ||
129 | */ | ||
130 | if (end - start > BIO_MAX_PAGES) | ||
131 | map_len -= PAGE_SIZE; | ||
132 | |||
133 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); | ||
134 | if (ret < 0) | ||
135 | goto unmap_rq; | ||
136 | if (!bio) | ||
137 | bio = rq->bio; | ||
138 | bytes_read += ret; | ||
139 | ubuf += ret; | ||
140 | } | ||
141 | |||
142 | rq->buffer = rq->data = NULL; | ||
143 | return 0; | ||
144 | unmap_rq: | ||
145 | blk_rq_unmap_user(bio); | ||
146 | return ret; | ||
147 | } | ||
148 | EXPORT_SYMBOL(blk_rq_map_user); | ||
149 | |||
150 | /** | ||
151 | * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage | ||
152 | * @q: request queue where request should be inserted | ||
153 | * @rq: request to map data to | ||
154 | * @iov: pointer to the iovec | ||
155 | * @iov_count: number of elements in the iovec | ||
156 | * @len: I/O byte count | ||
157 | * | ||
158 | * Description: | ||
159 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
160 | * a kernel bounce buffer is used. | ||
161 | * | ||
162 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
163 | * still in process context. | ||
164 | * | ||
165 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
166 | * before being submitted to the device, as pages mapped may be out of | ||
167 | * reach. It's the callers responsibility to make sure this happens. The | ||
168 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
169 | * unmapping. | ||
170 | */ | ||
171 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | ||
172 | struct sg_iovec *iov, int iov_count, unsigned int len) | ||
173 | { | ||
174 | struct bio *bio; | ||
175 | |||
176 | if (!iov || iov_count <= 0) | ||
177 | return -EINVAL; | ||
178 | |||
179 | /* we don't allow misaligned data like bio_map_user() does. If the | ||
180 | * user is using sg, they're expected to know the alignment constraints | ||
181 | * and respect them accordingly */ | ||
182 | bio = bio_map_user_iov(q, NULL, iov, iov_count, | ||
183 | rq_data_dir(rq) == READ); | ||
184 | if (IS_ERR(bio)) | ||
185 | return PTR_ERR(bio); | ||
186 | |||
187 | if (bio->bi_size != len) { | ||
188 | bio_endio(bio, 0); | ||
189 | bio_unmap_user(bio); | ||
190 | return -EINVAL; | ||
191 | } | ||
192 | |||
193 | bio_get(bio); | ||
194 | blk_rq_bio_prep(q, rq, bio); | ||
195 | rq->buffer = rq->data = NULL; | ||
196 | return 0; | ||
197 | } | ||
198 | EXPORT_SYMBOL(blk_rq_map_user_iov); | ||
199 | |||
200 | /** | ||
201 | * blk_rq_unmap_user - unmap a request with user data | ||
202 | * @bio: start of bio list | ||
203 | * | ||
204 | * Description: | ||
205 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | ||
206 | * supply the original rq->bio from the blk_rq_map_user() return, since | ||
207 | * the io completion may have changed rq->bio. | ||
208 | */ | ||
209 | int blk_rq_unmap_user(struct bio *bio) | ||
210 | { | ||
211 | struct bio *mapped_bio; | ||
212 | int ret = 0, ret2; | ||
213 | |||
214 | while (bio) { | ||
215 | mapped_bio = bio; | ||
216 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | ||
217 | mapped_bio = bio->bi_private; | ||
218 | |||
219 | ret2 = __blk_rq_unmap_user(mapped_bio); | ||
220 | if (ret2 && !ret) | ||
221 | ret = ret2; | ||
222 | |||
223 | mapped_bio = bio; | ||
224 | bio = bio->bi_next; | ||
225 | bio_put(mapped_bio); | ||
226 | } | ||
227 | |||
228 | return ret; | ||
229 | } | ||
230 | EXPORT_SYMBOL(blk_rq_unmap_user); | ||
231 | |||
232 | /** | ||
233 | * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage | ||
234 | * @q: request queue where request should be inserted | ||
235 | * @rq: request to fill | ||
236 | * @kbuf: the kernel buffer | ||
237 | * @len: length of user data | ||
238 | * @gfp_mask: memory allocation flags | ||
239 | */ | ||
240 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | ||
241 | unsigned int len, gfp_t gfp_mask) | ||
242 | { | ||
243 | struct bio *bio; | ||
244 | |||
245 | if (len > (q->max_hw_sectors << 9)) | ||
246 | return -EINVAL; | ||
247 | if (!len || !kbuf) | ||
248 | return -EINVAL; | ||
249 | |||
250 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | ||
251 | if (IS_ERR(bio)) | ||
252 | return PTR_ERR(bio); | ||
253 | |||
254 | if (rq_data_dir(rq) == WRITE) | ||
255 | bio->bi_rw |= (1 << BIO_RW); | ||
256 | |||
257 | blk_rq_bio_prep(q, rq, bio); | ||
258 | blk_queue_bounce(q, &rq->bio); | ||
259 | rq->buffer = rq->data = NULL; | ||
260 | return 0; | ||
261 | } | ||
262 | EXPORT_SYMBOL(blk_rq_map_kern); | ||
diff --git a/block/blk-merge.c b/block/blk-merge.c new file mode 100644 index 000000000000..845ef8131108 --- /dev/null +++ b/block/blk-merge.c | |||
@@ -0,0 +1,485 @@ | |||
1 | /* | ||
2 | * Functions related to segment and merge handling | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | #include <linux/scatterlist.h> | ||
9 | |||
10 | #include "blk.h" | ||
11 | |||
12 | void blk_recalc_rq_sectors(struct request *rq, int nsect) | ||
13 | { | ||
14 | if (blk_fs_request(rq)) { | ||
15 | rq->hard_sector += nsect; | ||
16 | rq->hard_nr_sectors -= nsect; | ||
17 | |||
18 | /* | ||
19 | * Move the I/O submission pointers ahead if required. | ||
20 | */ | ||
21 | if ((rq->nr_sectors >= rq->hard_nr_sectors) && | ||
22 | (rq->sector <= rq->hard_sector)) { | ||
23 | rq->sector = rq->hard_sector; | ||
24 | rq->nr_sectors = rq->hard_nr_sectors; | ||
25 | rq->hard_cur_sectors = bio_cur_sectors(rq->bio); | ||
26 | rq->current_nr_sectors = rq->hard_cur_sectors; | ||
27 | rq->buffer = bio_data(rq->bio); | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * if total number of sectors is less than the first segment | ||
32 | * size, something has gone terribly wrong | ||
33 | */ | ||
34 | if (rq->nr_sectors < rq->current_nr_sectors) { | ||
35 | printk(KERN_ERR "blk: request botched\n"); | ||
36 | rq->nr_sectors = rq->current_nr_sectors; | ||
37 | } | ||
38 | } | ||
39 | } | ||
40 | |||
41 | void blk_recalc_rq_segments(struct request *rq) | ||
42 | { | ||
43 | int nr_phys_segs; | ||
44 | int nr_hw_segs; | ||
45 | unsigned int phys_size; | ||
46 | unsigned int hw_size; | ||
47 | struct bio_vec *bv, *bvprv = NULL; | ||
48 | int seg_size; | ||
49 | int hw_seg_size; | ||
50 | int cluster; | ||
51 | struct req_iterator iter; | ||
52 | int high, highprv = 1; | ||
53 | struct request_queue *q = rq->q; | ||
54 | |||
55 | if (!rq->bio) | ||
56 | return; | ||
57 | |||
58 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | ||
59 | hw_seg_size = seg_size = 0; | ||
60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | ||
61 | rq_for_each_segment(bv, rq, iter) { | ||
62 | /* | ||
63 | * the trick here is making sure that a high page is never | ||
64 | * considered part of another segment, since that might | ||
65 | * change with the bounce page. | ||
66 | */ | ||
67 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | ||
68 | if (high || highprv) | ||
69 | goto new_hw_segment; | ||
70 | if (cluster) { | ||
71 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
72 | goto new_segment; | ||
73 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
74 | goto new_segment; | ||
75 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
76 | goto new_segment; | ||
77 | if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | ||
78 | goto new_hw_segment; | ||
79 | |||
80 | seg_size += bv->bv_len; | ||
81 | hw_seg_size += bv->bv_len; | ||
82 | bvprv = bv; | ||
83 | continue; | ||
84 | } | ||
85 | new_segment: | ||
86 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && | ||
87 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | ||
88 | hw_seg_size += bv->bv_len; | ||
89 | else { | ||
90 | new_hw_segment: | ||
91 | if (nr_hw_segs == 1 && | ||
92 | hw_seg_size > rq->bio->bi_hw_front_size) | ||
93 | rq->bio->bi_hw_front_size = hw_seg_size; | ||
94 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; | ||
95 | nr_hw_segs++; | ||
96 | } | ||
97 | |||
98 | nr_phys_segs++; | ||
99 | bvprv = bv; | ||
100 | seg_size = bv->bv_len; | ||
101 | highprv = high; | ||
102 | } | ||
103 | |||
104 | if (nr_hw_segs == 1 && | ||
105 | hw_seg_size > rq->bio->bi_hw_front_size) | ||
106 | rq->bio->bi_hw_front_size = hw_seg_size; | ||
107 | if (hw_seg_size > rq->biotail->bi_hw_back_size) | ||
108 | rq->biotail->bi_hw_back_size = hw_seg_size; | ||
109 | rq->nr_phys_segments = nr_phys_segs; | ||
110 | rq->nr_hw_segments = nr_hw_segs; | ||
111 | } | ||
112 | |||
113 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | ||
114 | { | ||
115 | struct request rq; | ||
116 | struct bio *nxt = bio->bi_next; | ||
117 | rq.q = q; | ||
118 | rq.bio = rq.biotail = bio; | ||
119 | bio->bi_next = NULL; | ||
120 | blk_recalc_rq_segments(&rq); | ||
121 | bio->bi_next = nxt; | ||
122 | bio->bi_phys_segments = rq.nr_phys_segments; | ||
123 | bio->bi_hw_segments = rq.nr_hw_segments; | ||
124 | bio->bi_flags |= (1 << BIO_SEG_VALID); | ||
125 | } | ||
126 | EXPORT_SYMBOL(blk_recount_segments); | ||
127 | |||
128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | ||
129 | struct bio *nxt) | ||
130 | { | ||
131 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | ||
132 | return 0; | ||
133 | |||
134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | ||
135 | return 0; | ||
136 | if (bio->bi_size + nxt->bi_size > q->max_segment_size) | ||
137 | return 0; | ||
138 | |||
139 | /* | ||
140 | * bio and nxt are contigous in memory, check if the queue allows | ||
141 | * these two to be merged into one | ||
142 | */ | ||
143 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | ||
144 | return 1; | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | ||
150 | struct bio *nxt) | ||
151 | { | ||
152 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
153 | blk_recount_segments(q, bio); | ||
154 | if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) | ||
155 | blk_recount_segments(q, nxt); | ||
156 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || | ||
157 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) | ||
158 | return 0; | ||
159 | if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) | ||
160 | return 0; | ||
161 | |||
162 | return 1; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * map a request to scatterlist, return number of sg entries setup. Caller | ||
167 | * must make sure sg can hold rq->nr_phys_segments entries | ||
168 | */ | ||
169 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | ||
170 | struct scatterlist *sglist) | ||
171 | { | ||
172 | struct bio_vec *bvec, *bvprv; | ||
173 | struct req_iterator iter; | ||
174 | struct scatterlist *sg; | ||
175 | int nsegs, cluster; | ||
176 | |||
177 | nsegs = 0; | ||
178 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | ||
179 | |||
180 | /* | ||
181 | * for each bio in rq | ||
182 | */ | ||
183 | bvprv = NULL; | ||
184 | sg = NULL; | ||
185 | rq_for_each_segment(bvec, rq, iter) { | ||
186 | int nbytes = bvec->bv_len; | ||
187 | |||
188 | if (bvprv && cluster) { | ||
189 | if (sg->length + nbytes > q->max_segment_size) | ||
190 | goto new_segment; | ||
191 | |||
192 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | ||
193 | goto new_segment; | ||
194 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | ||
195 | goto new_segment; | ||
196 | |||
197 | sg->length += nbytes; | ||
198 | } else { | ||
199 | new_segment: | ||
200 | if (!sg) | ||
201 | sg = sglist; | ||
202 | else { | ||
203 | /* | ||
204 | * If the driver previously mapped a shorter | ||
205 | * list, we could see a termination bit | ||
206 | * prematurely unless it fully inits the sg | ||
207 | * table on each mapping. We KNOW that there | ||
208 | * must be more entries here or the driver | ||
209 | * would be buggy, so force clear the | ||
210 | * termination bit to avoid doing a full | ||
211 | * sg_init_table() in drivers for each command. | ||
212 | */ | ||
213 | sg->page_link &= ~0x02; | ||
214 | sg = sg_next(sg); | ||
215 | } | ||
216 | |||
217 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); | ||
218 | nsegs++; | ||
219 | } | ||
220 | bvprv = bvec; | ||
221 | } /* segments in rq */ | ||
222 | |||
223 | if (q->dma_drain_size) { | ||
224 | sg->page_link &= ~0x02; | ||
225 | sg = sg_next(sg); | ||
226 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | ||
227 | q->dma_drain_size, | ||
228 | ((unsigned long)q->dma_drain_buffer) & | ||
229 | (PAGE_SIZE - 1)); | ||
230 | nsegs++; | ||
231 | } | ||
232 | |||
233 | if (sg) | ||
234 | sg_mark_end(sg); | ||
235 | |||
236 | return nsegs; | ||
237 | } | ||
238 | EXPORT_SYMBOL(blk_rq_map_sg); | ||
239 | |||
240 | static inline int ll_new_mergeable(struct request_queue *q, | ||
241 | struct request *req, | ||
242 | struct bio *bio) | ||
243 | { | ||
244 | int nr_phys_segs = bio_phys_segments(q, bio); | ||
245 | |||
246 | if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | ||
247 | req->cmd_flags |= REQ_NOMERGE; | ||
248 | if (req == q->last_merge) | ||
249 | q->last_merge = NULL; | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * A hw segment is just getting larger, bump just the phys | ||
255 | * counter. | ||
256 | */ | ||
257 | req->nr_phys_segments += nr_phys_segs; | ||
258 | return 1; | ||
259 | } | ||
260 | |||
261 | static inline int ll_new_hw_segment(struct request_queue *q, | ||
262 | struct request *req, | ||
263 | struct bio *bio) | ||
264 | { | ||
265 | int nr_hw_segs = bio_hw_segments(q, bio); | ||
266 | int nr_phys_segs = bio_phys_segments(q, bio); | ||
267 | |||
268 | if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments | ||
269 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | ||
270 | req->cmd_flags |= REQ_NOMERGE; | ||
271 | if (req == q->last_merge) | ||
272 | q->last_merge = NULL; | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * This will form the start of a new hw segment. Bump both | ||
278 | * counters. | ||
279 | */ | ||
280 | req->nr_hw_segments += nr_hw_segs; | ||
281 | req->nr_phys_segments += nr_phys_segs; | ||
282 | return 1; | ||
283 | } | ||
284 | |||
285 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | ||
286 | struct bio *bio) | ||
287 | { | ||
288 | unsigned short max_sectors; | ||
289 | int len; | ||
290 | |||
291 | if (unlikely(blk_pc_request(req))) | ||
292 | max_sectors = q->max_hw_sectors; | ||
293 | else | ||
294 | max_sectors = q->max_sectors; | ||
295 | |||
296 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | ||
297 | req->cmd_flags |= REQ_NOMERGE; | ||
298 | if (req == q->last_merge) | ||
299 | q->last_merge = NULL; | ||
300 | return 0; | ||
301 | } | ||
302 | if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) | ||
303 | blk_recount_segments(q, req->biotail); | ||
304 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
305 | blk_recount_segments(q, bio); | ||
306 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; | ||
307 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) | ||
308 | && !BIOVEC_VIRT_OVERSIZE(len)) { | ||
309 | int mergeable = ll_new_mergeable(q, req, bio); | ||
310 | |||
311 | if (mergeable) { | ||
312 | if (req->nr_hw_segments == 1) | ||
313 | req->bio->bi_hw_front_size = len; | ||
314 | if (bio->bi_hw_segments == 1) | ||
315 | bio->bi_hw_back_size = len; | ||
316 | } | ||
317 | return mergeable; | ||
318 | } | ||
319 | |||
320 | return ll_new_hw_segment(q, req, bio); | ||
321 | } | ||
322 | |||
323 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | ||
324 | struct bio *bio) | ||
325 | { | ||
326 | unsigned short max_sectors; | ||
327 | int len; | ||
328 | |||
329 | if (unlikely(blk_pc_request(req))) | ||
330 | max_sectors = q->max_hw_sectors; | ||
331 | else | ||
332 | max_sectors = q->max_sectors; | ||
333 | |||
334 | |||
335 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | ||
336 | req->cmd_flags |= REQ_NOMERGE; | ||
337 | if (req == q->last_merge) | ||
338 | q->last_merge = NULL; | ||
339 | return 0; | ||
340 | } | ||
341 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; | ||
342 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
343 | blk_recount_segments(q, bio); | ||
344 | if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) | ||
345 | blk_recount_segments(q, req->bio); | ||
346 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && | ||
347 | !BIOVEC_VIRT_OVERSIZE(len)) { | ||
348 | int mergeable = ll_new_mergeable(q, req, bio); | ||
349 | |||
350 | if (mergeable) { | ||
351 | if (bio->bi_hw_segments == 1) | ||
352 | bio->bi_hw_front_size = len; | ||
353 | if (req->nr_hw_segments == 1) | ||
354 | req->biotail->bi_hw_back_size = len; | ||
355 | } | ||
356 | return mergeable; | ||
357 | } | ||
358 | |||
359 | return ll_new_hw_segment(q, req, bio); | ||
360 | } | ||
361 | |||
362 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | ||
363 | struct request *next) | ||
364 | { | ||
365 | int total_phys_segments; | ||
366 | int total_hw_segments; | ||
367 | |||
368 | /* | ||
369 | * First check if the either of the requests are re-queued | ||
370 | * requests. Can't merge them if they are. | ||
371 | */ | ||
372 | if (req->special || next->special) | ||
373 | return 0; | ||
374 | |||
375 | /* | ||
376 | * Will it become too large? | ||
377 | */ | ||
378 | if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) | ||
379 | return 0; | ||
380 | |||
381 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | ||
382 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) | ||
383 | total_phys_segments--; | ||
384 | |||
385 | if (total_phys_segments > q->max_phys_segments) | ||
386 | return 0; | ||
387 | |||
388 | total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; | ||
389 | if (blk_hw_contig_segment(q, req->biotail, next->bio)) { | ||
390 | int len = req->biotail->bi_hw_back_size + | ||
391 | next->bio->bi_hw_front_size; | ||
392 | /* | ||
393 | * propagate the combined length to the end of the requests | ||
394 | */ | ||
395 | if (req->nr_hw_segments == 1) | ||
396 | req->bio->bi_hw_front_size = len; | ||
397 | if (next->nr_hw_segments == 1) | ||
398 | next->biotail->bi_hw_back_size = len; | ||
399 | total_hw_segments--; | ||
400 | } | ||
401 | |||
402 | if (total_hw_segments > q->max_hw_segments) | ||
403 | return 0; | ||
404 | |||
405 | /* Merge is OK... */ | ||
406 | req->nr_phys_segments = total_phys_segments; | ||
407 | req->nr_hw_segments = total_hw_segments; | ||
408 | return 1; | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * Has to be called with the request spinlock acquired | ||
413 | */ | ||
414 | static int attempt_merge(struct request_queue *q, struct request *req, | ||
415 | struct request *next) | ||
416 | { | ||
417 | if (!rq_mergeable(req) || !rq_mergeable(next)) | ||
418 | return 0; | ||
419 | |||
420 | /* | ||
421 | * not contiguous | ||
422 | */ | ||
423 | if (req->sector + req->nr_sectors != next->sector) | ||
424 | return 0; | ||
425 | |||
426 | if (rq_data_dir(req) != rq_data_dir(next) | ||
427 | || req->rq_disk != next->rq_disk | ||
428 | || next->special) | ||
429 | return 0; | ||
430 | |||
431 | /* | ||
432 | * If we are allowed to merge, then append bio list | ||
433 | * from next to rq and release next. merge_requests_fn | ||
434 | * will have updated segment counts, update sector | ||
435 | * counts here. | ||
436 | */ | ||
437 | if (!ll_merge_requests_fn(q, req, next)) | ||
438 | return 0; | ||
439 | |||
440 | /* | ||
441 | * At this point we have either done a back merge | ||
442 | * or front merge. We need the smaller start_time of | ||
443 | * the merged requests to be the current request | ||
444 | * for accounting purposes. | ||
445 | */ | ||
446 | if (time_after(req->start_time, next->start_time)) | ||
447 | req->start_time = next->start_time; | ||
448 | |||
449 | req->biotail->bi_next = next->bio; | ||
450 | req->biotail = next->biotail; | ||
451 | |||
452 | req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; | ||
453 | |||
454 | elv_merge_requests(q, req, next); | ||
455 | |||
456 | if (req->rq_disk) { | ||
457 | disk_round_stats(req->rq_disk); | ||
458 | req->rq_disk->in_flight--; | ||
459 | } | ||
460 | |||
461 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | ||
462 | |||
463 | __blk_put_request(q, next); | ||
464 | return 1; | ||
465 | } | ||
466 | |||
467 | int attempt_back_merge(struct request_queue *q, struct request *rq) | ||
468 | { | ||
469 | struct request *next = elv_latter_request(q, rq); | ||
470 | |||
471 | if (next) | ||
472 | return attempt_merge(q, rq, next); | ||
473 | |||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | int attempt_front_merge(struct request_queue *q, struct request *rq) | ||
478 | { | ||
479 | struct request *prev = elv_former_request(q, rq); | ||
480 | |||
481 | if (prev) | ||
482 | return attempt_merge(q, prev, rq); | ||
483 | |||
484 | return 0; | ||
485 | } | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c new file mode 100644 index 000000000000..c8d0c5724098 --- /dev/null +++ b/block/blk-settings.c | |||
@@ -0,0 +1,395 @@ | |||
1 | /* | ||
2 | * Functions related to setting various queue properties from drivers | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/bio.h> | ||
8 | #include <linux/blkdev.h> | ||
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | ||
10 | |||
11 | #include "blk.h" | ||
12 | |||
13 | unsigned long blk_max_low_pfn; | ||
14 | EXPORT_SYMBOL(blk_max_low_pfn); | ||
15 | |||
16 | unsigned long blk_max_pfn; | ||
17 | EXPORT_SYMBOL(blk_max_pfn); | ||
18 | |||
19 | /** | ||
20 | * blk_queue_prep_rq - set a prepare_request function for queue | ||
21 | * @q: queue | ||
22 | * @pfn: prepare_request function | ||
23 | * | ||
24 | * It's possible for a queue to register a prepare_request callback which | ||
25 | * is invoked before the request is handed to the request_fn. The goal of | ||
26 | * the function is to prepare a request for I/O, it can be used to build a | ||
27 | * cdb from the request data for instance. | ||
28 | * | ||
29 | */ | ||
30 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | ||
31 | { | ||
32 | q->prep_rq_fn = pfn; | ||
33 | } | ||
34 | EXPORT_SYMBOL(blk_queue_prep_rq); | ||
35 | |||
36 | /** | ||
37 | * blk_queue_merge_bvec - set a merge_bvec function for queue | ||
38 | * @q: queue | ||
39 | * @mbfn: merge_bvec_fn | ||
40 | * | ||
41 | * Usually queues have static limitations on the max sectors or segments that | ||
42 | * we can put in a request. Stacking drivers may have some settings that | ||
43 | * are dynamic, and thus we have to query the queue whether it is ok to | ||
44 | * add a new bio_vec to a bio at a given offset or not. If the block device | ||
45 | * has such limitations, it needs to register a merge_bvec_fn to control | ||
46 | * the size of bio's sent to it. Note that a block device *must* allow a | ||
47 | * single page to be added to an empty bio. The block device driver may want | ||
48 | * to use the bio_split() function to deal with these bio's. By default | ||
49 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | ||
50 | * honored. | ||
51 | */ | ||
52 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | ||
53 | { | ||
54 | q->merge_bvec_fn = mbfn; | ||
55 | } | ||
56 | EXPORT_SYMBOL(blk_queue_merge_bvec); | ||
57 | |||
58 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | ||
59 | { | ||
60 | q->softirq_done_fn = fn; | ||
61 | } | ||
62 | EXPORT_SYMBOL(blk_queue_softirq_done); | ||
63 | |||
64 | /** | ||
65 | * blk_queue_make_request - define an alternate make_request function for a device | ||
66 | * @q: the request queue for the device to be affected | ||
67 | * @mfn: the alternate make_request function | ||
68 | * | ||
69 | * Description: | ||
70 | * The normal way for &struct bios to be passed to a device | ||
71 | * driver is for them to be collected into requests on a request | ||
72 | * queue, and then to allow the device driver to select requests | ||
73 | * off that queue when it is ready. This works well for many block | ||
74 | * devices. However some block devices (typically virtual devices | ||
75 | * such as md or lvm) do not benefit from the processing on the | ||
76 | * request queue, and are served best by having the requests passed | ||
77 | * directly to them. This can be achieved by providing a function | ||
78 | * to blk_queue_make_request(). | ||
79 | * | ||
80 | * Caveat: | ||
81 | * The driver that does this *must* be able to deal appropriately | ||
82 | * with buffers in "highmemory". This can be accomplished by either calling | ||
83 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | ||
84 | * blk_queue_bounce() to create a buffer in normal memory. | ||
85 | **/ | ||
86 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | ||
87 | { | ||
88 | /* | ||
89 | * set defaults | ||
90 | */ | ||
91 | q->nr_requests = BLKDEV_MAX_RQ; | ||
92 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
93 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
94 | q->make_request_fn = mfn; | ||
95 | q->backing_dev_info.ra_pages = | ||
96 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | ||
97 | q->backing_dev_info.state = 0; | ||
98 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | ||
99 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | ||
100 | blk_queue_hardsect_size(q, 512); | ||
101 | blk_queue_dma_alignment(q, 511); | ||
102 | blk_queue_congestion_threshold(q); | ||
103 | q->nr_batching = BLK_BATCH_REQ; | ||
104 | |||
105 | q->unplug_thresh = 4; /* hmm */ | ||
106 | q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ | ||
107 | if (q->unplug_delay == 0) | ||
108 | q->unplug_delay = 1; | ||
109 | |||
110 | INIT_WORK(&q->unplug_work, blk_unplug_work); | ||
111 | |||
112 | q->unplug_timer.function = blk_unplug_timeout; | ||
113 | q->unplug_timer.data = (unsigned long)q; | ||
114 | |||
115 | /* | ||
116 | * by default assume old behaviour and bounce for any highmem page | ||
117 | */ | ||
118 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | ||
119 | } | ||
120 | EXPORT_SYMBOL(blk_queue_make_request); | ||
121 | |||
122 | /** | ||
123 | * blk_queue_bounce_limit - set bounce buffer limit for queue | ||
124 | * @q: the request queue for the device | ||
125 | * @dma_addr: bus address limit | ||
126 | * | ||
127 | * Description: | ||
128 | * Different hardware can have different requirements as to what pages | ||
129 | * it can do I/O directly to. A low level driver can call | ||
130 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | ||
131 | * buffers for doing I/O to pages residing above @page. | ||
132 | **/ | ||
133 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | ||
134 | { | ||
135 | unsigned long b_pfn = dma_addr >> PAGE_SHIFT; | ||
136 | int dma = 0; | ||
137 | |||
138 | q->bounce_gfp = GFP_NOIO; | ||
139 | #if BITS_PER_LONG == 64 | ||
140 | /* Assume anything <= 4GB can be handled by IOMMU. | ||
141 | Actually some IOMMUs can handle everything, but I don't | ||
142 | know of a way to test this here. */ | ||
143 | if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | ||
144 | dma = 1; | ||
145 | q->bounce_pfn = max_low_pfn; | ||
146 | #else | ||
147 | if (b_pfn < blk_max_low_pfn) | ||
148 | dma = 1; | ||
149 | q->bounce_pfn = b_pfn; | ||
150 | #endif | ||
151 | if (dma) { | ||
152 | init_emergency_isa_pool(); | ||
153 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | ||
154 | q->bounce_pfn = b_pfn; | ||
155 | } | ||
156 | } | ||
157 | EXPORT_SYMBOL(blk_queue_bounce_limit); | ||
158 | |||
159 | /** | ||
160 | * blk_queue_max_sectors - set max sectors for a request for this queue | ||
161 | * @q: the request queue for the device | ||
162 | * @max_sectors: max sectors in the usual 512b unit | ||
163 | * | ||
164 | * Description: | ||
165 | * Enables a low level driver to set an upper limit on the size of | ||
166 | * received requests. | ||
167 | **/ | ||
168 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | ||
169 | { | ||
170 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | ||
171 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | ||
172 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, | ||
173 | max_sectors); | ||
174 | } | ||
175 | |||
176 | if (BLK_DEF_MAX_SECTORS > max_sectors) | ||
177 | q->max_hw_sectors = q->max_sectors = max_sectors; | ||
178 | else { | ||
179 | q->max_sectors = BLK_DEF_MAX_SECTORS; | ||
180 | q->max_hw_sectors = max_sectors; | ||
181 | } | ||
182 | } | ||
183 | EXPORT_SYMBOL(blk_queue_max_sectors); | ||
184 | |||
185 | /** | ||
186 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | ||
187 | * @q: the request queue for the device | ||
188 | * @max_segments: max number of segments | ||
189 | * | ||
190 | * Description: | ||
191 | * Enables a low level driver to set an upper limit on the number of | ||
192 | * physical data segments in a request. This would be the largest sized | ||
193 | * scatter list the driver could handle. | ||
194 | **/ | ||
195 | void blk_queue_max_phys_segments(struct request_queue *q, | ||
196 | unsigned short max_segments) | ||
197 | { | ||
198 | if (!max_segments) { | ||
199 | max_segments = 1; | ||
200 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, | ||
201 | max_segments); | ||
202 | } | ||
203 | |||
204 | q->max_phys_segments = max_segments; | ||
205 | } | ||
206 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | ||
207 | |||
208 | /** | ||
209 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | ||
210 | * @q: the request queue for the device | ||
211 | * @max_segments: max number of segments | ||
212 | * | ||
213 | * Description: | ||
214 | * Enables a low level driver to set an upper limit on the number of | ||
215 | * hw data segments in a request. This would be the largest number of | ||
216 | * address/length pairs the host adapter can actually give as once | ||
217 | * to the device. | ||
218 | **/ | ||
219 | void blk_queue_max_hw_segments(struct request_queue *q, | ||
220 | unsigned short max_segments) | ||
221 | { | ||
222 | if (!max_segments) { | ||
223 | max_segments = 1; | ||
224 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, | ||
225 | max_segments); | ||
226 | } | ||
227 | |||
228 | q->max_hw_segments = max_segments; | ||
229 | } | ||
230 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | ||
231 | |||
232 | /** | ||
233 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | ||
234 | * @q: the request queue for the device | ||
235 | * @max_size: max size of segment in bytes | ||
236 | * | ||
237 | * Description: | ||
238 | * Enables a low level driver to set an upper limit on the size of a | ||
239 | * coalesced segment | ||
240 | **/ | ||
241 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | ||
242 | { | ||
243 | if (max_size < PAGE_CACHE_SIZE) { | ||
244 | max_size = PAGE_CACHE_SIZE; | ||
245 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, | ||
246 | max_size); | ||
247 | } | ||
248 | |||
249 | q->max_segment_size = max_size; | ||
250 | } | ||
251 | EXPORT_SYMBOL(blk_queue_max_segment_size); | ||
252 | |||
253 | /** | ||
254 | * blk_queue_hardsect_size - set hardware sector size for the queue | ||
255 | * @q: the request queue for the device | ||
256 | * @size: the hardware sector size, in bytes | ||
257 | * | ||
258 | * Description: | ||
259 | * This should typically be set to the lowest possible sector size | ||
260 | * that the hardware can operate on (possible without reverting to | ||
261 | * even internal read-modify-write operations). Usually the default | ||
262 | * of 512 covers most hardware. | ||
263 | **/ | ||
264 | void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) | ||
265 | { | ||
266 | q->hardsect_size = size; | ||
267 | } | ||
268 | EXPORT_SYMBOL(blk_queue_hardsect_size); | ||
269 | |||
270 | /* | ||
271 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
272 | */ | ||
273 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
274 | |||
275 | /** | ||
276 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | ||
277 | * @t: the stacking driver (top) | ||
278 | * @b: the underlying device (bottom) | ||
279 | **/ | ||
280 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | ||
281 | { | ||
282 | /* zero is "infinity" */ | ||
283 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | ||
284 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | ||
285 | |||
286 | t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); | ||
287 | t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); | ||
288 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); | ||
289 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); | ||
290 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
291 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | ||
292 | } | ||
293 | EXPORT_SYMBOL(blk_queue_stack_limits); | ||
294 | |||
295 | /** | ||
296 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | ||
297 | * | ||
298 | * @q: the request queue for the device | ||
299 | * @buf: physically contiguous buffer | ||
300 | * @size: size of the buffer in bytes | ||
301 | * | ||
302 | * Some devices have excess DMA problems and can't simply discard (or | ||
303 | * zero fill) the unwanted piece of the transfer. They have to have a | ||
304 | * real area of memory to transfer it into. The use case for this is | ||
305 | * ATAPI devices in DMA mode. If the packet command causes a transfer | ||
306 | * bigger than the transfer size some HBAs will lock up if there | ||
307 | * aren't DMA elements to contain the excess transfer. What this API | ||
308 | * does is adjust the queue so that the buf is always appended | ||
309 | * silently to the scatterlist. | ||
310 | * | ||
311 | * Note: This routine adjusts max_hw_segments to make room for | ||
312 | * appending the drain buffer. If you call | ||
313 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | ||
314 | * calling this routine, you must set the limit to one fewer than your | ||
315 | * device can support otherwise there won't be room for the drain | ||
316 | * buffer. | ||
317 | */ | ||
318 | int blk_queue_dma_drain(struct request_queue *q, void *buf, | ||
319 | unsigned int size) | ||
320 | { | ||
321 | if (q->max_hw_segments < 2 || q->max_phys_segments < 2) | ||
322 | return -EINVAL; | ||
323 | /* make room for appending the drain */ | ||
324 | --q->max_hw_segments; | ||
325 | --q->max_phys_segments; | ||
326 | q->dma_drain_buffer = buf; | ||
327 | q->dma_drain_size = size; | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | ||
332 | |||
333 | /** | ||
334 | * blk_queue_segment_boundary - set boundary rules for segment merging | ||
335 | * @q: the request queue for the device | ||
336 | * @mask: the memory boundary mask | ||
337 | **/ | ||
338 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | ||
339 | { | ||
340 | if (mask < PAGE_CACHE_SIZE - 1) { | ||
341 | mask = PAGE_CACHE_SIZE - 1; | ||
342 | printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, | ||
343 | mask); | ||
344 | } | ||
345 | |||
346 | q->seg_boundary_mask = mask; | ||
347 | } | ||
348 | EXPORT_SYMBOL(blk_queue_segment_boundary); | ||
349 | |||
350 | /** | ||
351 | * blk_queue_dma_alignment - set dma length and memory alignment | ||
352 | * @q: the request queue for the device | ||
353 | * @mask: alignment mask | ||
354 | * | ||
355 | * description: | ||
356 | * set required memory and length aligment for direct dma transactions. | ||
357 | * this is used when buiding direct io requests for the queue. | ||
358 | * | ||
359 | **/ | ||
360 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | ||
361 | { | ||
362 | q->dma_alignment = mask; | ||
363 | } | ||
364 | EXPORT_SYMBOL(blk_queue_dma_alignment); | ||
365 | |||
366 | /** | ||
367 | * blk_queue_update_dma_alignment - update dma length and memory alignment | ||
368 | * @q: the request queue for the device | ||
369 | * @mask: alignment mask | ||
370 | * | ||
371 | * description: | ||
372 | * update required memory and length aligment for direct dma transactions. | ||
373 | * If the requested alignment is larger than the current alignment, then | ||
374 | * the current queue alignment is updated to the new value, otherwise it | ||
375 | * is left alone. The design of this is to allow multiple objects | ||
376 | * (driver, device, transport etc) to set their respective | ||
377 | * alignments without having them interfere. | ||
378 | * | ||
379 | **/ | ||
380 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | ||
381 | { | ||
382 | BUG_ON(mask > PAGE_SIZE); | ||
383 | |||
384 | if (mask > q->dma_alignment) | ||
385 | q->dma_alignment = mask; | ||
386 | } | ||
387 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | ||
388 | |||
389 | int __init blk_settings_init(void) | ||
390 | { | ||
391 | blk_max_low_pfn = max_low_pfn - 1; | ||
392 | blk_max_pfn = max_pfn - 1; | ||
393 | return 0; | ||
394 | } | ||
395 | subsys_initcall(blk_settings_init); | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c new file mode 100644 index 000000000000..54d0db116153 --- /dev/null +++ b/block/blk-sysfs.c | |||
@@ -0,0 +1,310 @@ | |||
1 | /* | ||
2 | * Functions related to sysfs handling | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | #include <linux/blktrace_api.h> | ||
9 | |||
10 | #include "blk.h" | ||
11 | |||
12 | struct queue_sysfs_entry { | ||
13 | struct attribute attr; | ||
14 | ssize_t (*show)(struct request_queue *, char *); | ||
15 | ssize_t (*store)(struct request_queue *, const char *, size_t); | ||
16 | }; | ||
17 | |||
18 | static ssize_t | ||
19 | queue_var_show(unsigned int var, char *page) | ||
20 | { | ||
21 | return sprintf(page, "%d\n", var); | ||
22 | } | ||
23 | |||
24 | static ssize_t | ||
25 | queue_var_store(unsigned long *var, const char *page, size_t count) | ||
26 | { | ||
27 | char *p = (char *) page; | ||
28 | |||
29 | *var = simple_strtoul(p, &p, 10); | ||
30 | return count; | ||
31 | } | ||
32 | |||
33 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | ||
34 | { | ||
35 | return queue_var_show(q->nr_requests, (page)); | ||
36 | } | ||
37 | |||
38 | static ssize_t | ||
39 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | ||
40 | { | ||
41 | struct request_list *rl = &q->rq; | ||
42 | unsigned long nr; | ||
43 | int ret = queue_var_store(&nr, page, count); | ||
44 | if (nr < BLKDEV_MIN_RQ) | ||
45 | nr = BLKDEV_MIN_RQ; | ||
46 | |||
47 | spin_lock_irq(q->queue_lock); | ||
48 | q->nr_requests = nr; | ||
49 | blk_queue_congestion_threshold(q); | ||
50 | |||
51 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | ||
52 | blk_set_queue_congested(q, READ); | ||
53 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) | ||
54 | blk_clear_queue_congested(q, READ); | ||
55 | |||
56 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) | ||
57 | blk_set_queue_congested(q, WRITE); | ||
58 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) | ||
59 | blk_clear_queue_congested(q, WRITE); | ||
60 | |||
61 | if (rl->count[READ] >= q->nr_requests) { | ||
62 | blk_set_queue_full(q, READ); | ||
63 | } else if (rl->count[READ]+1 <= q->nr_requests) { | ||
64 | blk_clear_queue_full(q, READ); | ||
65 | wake_up(&rl->wait[READ]); | ||
66 | } | ||
67 | |||
68 | if (rl->count[WRITE] >= q->nr_requests) { | ||
69 | blk_set_queue_full(q, WRITE); | ||
70 | } else if (rl->count[WRITE]+1 <= q->nr_requests) { | ||
71 | blk_clear_queue_full(q, WRITE); | ||
72 | wake_up(&rl->wait[WRITE]); | ||
73 | } | ||
74 | spin_unlock_irq(q->queue_lock); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | ||
79 | { | ||
80 | int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); | ||
81 | |||
82 | return queue_var_show(ra_kb, (page)); | ||
83 | } | ||
84 | |||
85 | static ssize_t | ||
86 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | ||
87 | { | ||
88 | unsigned long ra_kb; | ||
89 | ssize_t ret = queue_var_store(&ra_kb, page, count); | ||
90 | |||
91 | spin_lock_irq(q->queue_lock); | ||
92 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); | ||
93 | spin_unlock_irq(q->queue_lock); | ||
94 | |||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | ||
99 | { | ||
100 | int max_sectors_kb = q->max_sectors >> 1; | ||
101 | |||
102 | return queue_var_show(max_sectors_kb, (page)); | ||
103 | } | ||
104 | |||
105 | static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) | ||
106 | { | ||
107 | return queue_var_show(q->hardsect_size, page); | ||
108 | } | ||
109 | |||
110 | static ssize_t | ||
111 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | ||
112 | { | ||
113 | unsigned long max_sectors_kb, | ||
114 | max_hw_sectors_kb = q->max_hw_sectors >> 1, | ||
115 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); | ||
116 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | ||
117 | |||
118 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | ||
119 | return -EINVAL; | ||
120 | /* | ||
121 | * Take the queue lock to update the readahead and max_sectors | ||
122 | * values synchronously: | ||
123 | */ | ||
124 | spin_lock_irq(q->queue_lock); | ||
125 | q->max_sectors = max_sectors_kb << 1; | ||
126 | spin_unlock_irq(q->queue_lock); | ||
127 | |||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | ||
132 | { | ||
133 | int max_hw_sectors_kb = q->max_hw_sectors >> 1; | ||
134 | |||
135 | return queue_var_show(max_hw_sectors_kb, (page)); | ||
136 | } | ||
137 | |||
138 | |||
139 | static struct queue_sysfs_entry queue_requests_entry = { | ||
140 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | ||
141 | .show = queue_requests_show, | ||
142 | .store = queue_requests_store, | ||
143 | }; | ||
144 | |||
145 | static struct queue_sysfs_entry queue_ra_entry = { | ||
146 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | ||
147 | .show = queue_ra_show, | ||
148 | .store = queue_ra_store, | ||
149 | }; | ||
150 | |||
151 | static struct queue_sysfs_entry queue_max_sectors_entry = { | ||
152 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | ||
153 | .show = queue_max_sectors_show, | ||
154 | .store = queue_max_sectors_store, | ||
155 | }; | ||
156 | |||
157 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | ||
158 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | ||
159 | .show = queue_max_hw_sectors_show, | ||
160 | }; | ||
161 | |||
162 | static struct queue_sysfs_entry queue_iosched_entry = { | ||
163 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | ||
164 | .show = elv_iosched_show, | ||
165 | .store = elv_iosched_store, | ||
166 | }; | ||
167 | |||
168 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { | ||
169 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | ||
170 | .show = queue_hw_sector_size_show, | ||
171 | }; | ||
172 | |||
173 | static struct attribute *default_attrs[] = { | ||
174 | &queue_requests_entry.attr, | ||
175 | &queue_ra_entry.attr, | ||
176 | &queue_max_hw_sectors_entry.attr, | ||
177 | &queue_max_sectors_entry.attr, | ||
178 | &queue_iosched_entry.attr, | ||
179 | &queue_hw_sector_size_entry.attr, | ||
180 | NULL, | ||
181 | }; | ||
182 | |||
183 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | ||
184 | |||
185 | static ssize_t | ||
186 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
187 | { | ||
188 | struct queue_sysfs_entry *entry = to_queue(attr); | ||
189 | struct request_queue *q = | ||
190 | container_of(kobj, struct request_queue, kobj); | ||
191 | ssize_t res; | ||
192 | |||
193 | if (!entry->show) | ||
194 | return -EIO; | ||
195 | mutex_lock(&q->sysfs_lock); | ||
196 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | ||
197 | mutex_unlock(&q->sysfs_lock); | ||
198 | return -ENOENT; | ||
199 | } | ||
200 | res = entry->show(q, page); | ||
201 | mutex_unlock(&q->sysfs_lock); | ||
202 | return res; | ||
203 | } | ||
204 | |||
205 | static ssize_t | ||
206 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | ||
207 | const char *page, size_t length) | ||
208 | { | ||
209 | struct queue_sysfs_entry *entry = to_queue(attr); | ||
210 | struct request_queue *q; | ||
211 | ssize_t res; | ||
212 | |||
213 | if (!entry->store) | ||
214 | return -EIO; | ||
215 | |||
216 | q = container_of(kobj, struct request_queue, kobj); | ||
217 | mutex_lock(&q->sysfs_lock); | ||
218 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | ||
219 | mutex_unlock(&q->sysfs_lock); | ||
220 | return -ENOENT; | ||
221 | } | ||
222 | res = entry->store(q, page, length); | ||
223 | mutex_unlock(&q->sysfs_lock); | ||
224 | return res; | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed | ||
229 | * @kobj: the kobj belonging of the request queue to be released | ||
230 | * | ||
231 | * Description: | ||
232 | * blk_cleanup_queue is the pair to blk_init_queue() or | ||
233 | * blk_queue_make_request(). It should be called when a request queue is | ||
234 | * being released; typically when a block device is being de-registered. | ||
235 | * Currently, its primary task it to free all the &struct request | ||
236 | * structures that were allocated to the queue and the queue itself. | ||
237 | * | ||
238 | * Caveat: | ||
239 | * Hopefully the low level driver will have finished any | ||
240 | * outstanding requests first... | ||
241 | **/ | ||
242 | static void blk_release_queue(struct kobject *kobj) | ||
243 | { | ||
244 | struct request_queue *q = | ||
245 | container_of(kobj, struct request_queue, kobj); | ||
246 | struct request_list *rl = &q->rq; | ||
247 | |||
248 | blk_sync_queue(q); | ||
249 | |||
250 | if (rl->rq_pool) | ||
251 | mempool_destroy(rl->rq_pool); | ||
252 | |||
253 | if (q->queue_tags) | ||
254 | __blk_queue_free_tags(q); | ||
255 | |||
256 | blk_trace_shutdown(q); | ||
257 | |||
258 | bdi_destroy(&q->backing_dev_info); | ||
259 | kmem_cache_free(blk_requestq_cachep, q); | ||
260 | } | ||
261 | |||
262 | static struct sysfs_ops queue_sysfs_ops = { | ||
263 | .show = queue_attr_show, | ||
264 | .store = queue_attr_store, | ||
265 | }; | ||
266 | |||
267 | struct kobj_type blk_queue_ktype = { | ||
268 | .sysfs_ops = &queue_sysfs_ops, | ||
269 | .default_attrs = default_attrs, | ||
270 | .release = blk_release_queue, | ||
271 | }; | ||
272 | |||
273 | int blk_register_queue(struct gendisk *disk) | ||
274 | { | ||
275 | int ret; | ||
276 | |||
277 | struct request_queue *q = disk->queue; | ||
278 | |||
279 | if (!q || !q->request_fn) | ||
280 | return -ENXIO; | ||
281 | |||
282 | ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj), | ||
283 | "%s", "queue"); | ||
284 | if (ret < 0) | ||
285 | return ret; | ||
286 | |||
287 | kobject_uevent(&q->kobj, KOBJ_ADD); | ||
288 | |||
289 | ret = elv_register_queue(q); | ||
290 | if (ret) { | ||
291 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | ||
292 | kobject_del(&q->kobj); | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | void blk_unregister_queue(struct gendisk *disk) | ||
300 | { | ||
301 | struct request_queue *q = disk->queue; | ||
302 | |||
303 | if (q && q->request_fn) { | ||
304 | elv_unregister_queue(q); | ||
305 | |||
306 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | ||
307 | kobject_del(&q->kobj); | ||
308 | kobject_put(&disk->dev.kobj); | ||
309 | } | ||
310 | } | ||
diff --git a/block/blk-tag.c b/block/blk-tag.c new file mode 100644 index 000000000000..a8c37d4bbb32 --- /dev/null +++ b/block/blk-tag.c | |||
@@ -0,0 +1,390 @@ | |||
1 | /* | ||
2 | * Functions related to tagged command queuing | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/bio.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | |||
9 | /** | ||
10 | * blk_queue_find_tag - find a request by its tag and queue | ||
11 | * @q: The request queue for the device | ||
12 | * @tag: The tag of the request | ||
13 | * | ||
14 | * Notes: | ||
15 | * Should be used when a device returns a tag and you want to match | ||
16 | * it with a request. | ||
17 | * | ||
18 | * no locks need be held. | ||
19 | **/ | ||
20 | struct request *blk_queue_find_tag(struct request_queue *q, int tag) | ||
21 | { | ||
22 | return blk_map_queue_find_tag(q->queue_tags, tag); | ||
23 | } | ||
24 | EXPORT_SYMBOL(blk_queue_find_tag); | ||
25 | |||
26 | /** | ||
27 | * __blk_free_tags - release a given set of tag maintenance info | ||
28 | * @bqt: the tag map to free | ||
29 | * | ||
30 | * Tries to free the specified @bqt@. Returns true if it was | ||
31 | * actually freed and false if there are still references using it | ||
32 | */ | ||
33 | static int __blk_free_tags(struct blk_queue_tag *bqt) | ||
34 | { | ||
35 | int retval; | ||
36 | |||
37 | retval = atomic_dec_and_test(&bqt->refcnt); | ||
38 | if (retval) { | ||
39 | BUG_ON(bqt->busy); | ||
40 | |||
41 | kfree(bqt->tag_index); | ||
42 | bqt->tag_index = NULL; | ||
43 | |||
44 | kfree(bqt->tag_map); | ||
45 | bqt->tag_map = NULL; | ||
46 | |||
47 | kfree(bqt); | ||
48 | } | ||
49 | |||
50 | return retval; | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * __blk_queue_free_tags - release tag maintenance info | ||
55 | * @q: the request queue for the device | ||
56 | * | ||
57 | * Notes: | ||
58 | * blk_cleanup_queue() will take care of calling this function, if tagging | ||
59 | * has been used. So there's no need to call this directly. | ||
60 | **/ | ||
61 | void __blk_queue_free_tags(struct request_queue *q) | ||
62 | { | ||
63 | struct blk_queue_tag *bqt = q->queue_tags; | ||
64 | |||
65 | if (!bqt) | ||
66 | return; | ||
67 | |||
68 | __blk_free_tags(bqt); | ||
69 | |||
70 | q->queue_tags = NULL; | ||
71 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * blk_free_tags - release a given set of tag maintenance info | ||
76 | * @bqt: the tag map to free | ||
77 | * | ||
78 | * For externally managed @bqt@ frees the map. Callers of this | ||
79 | * function must guarantee to have released all the queues that | ||
80 | * might have been using this tag map. | ||
81 | */ | ||
82 | void blk_free_tags(struct blk_queue_tag *bqt) | ||
83 | { | ||
84 | if (unlikely(!__blk_free_tags(bqt))) | ||
85 | BUG(); | ||
86 | } | ||
87 | EXPORT_SYMBOL(blk_free_tags); | ||
88 | |||
89 | /** | ||
90 | * blk_queue_free_tags - release tag maintenance info | ||
91 | * @q: the request queue for the device | ||
92 | * | ||
93 | * Notes: | ||
94 | * This is used to disabled tagged queuing to a device, yet leave | ||
95 | * queue in function. | ||
96 | **/ | ||
97 | void blk_queue_free_tags(struct request_queue *q) | ||
98 | { | ||
99 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | ||
100 | } | ||
101 | EXPORT_SYMBOL(blk_queue_free_tags); | ||
102 | |||
103 | static int | ||
104 | init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) | ||
105 | { | ||
106 | struct request **tag_index; | ||
107 | unsigned long *tag_map; | ||
108 | int nr_ulongs; | ||
109 | |||
110 | if (q && depth > q->nr_requests * 2) { | ||
111 | depth = q->nr_requests * 2; | ||
112 | printk(KERN_ERR "%s: adjusted depth to %d\n", | ||
113 | __FUNCTION__, depth); | ||
114 | } | ||
115 | |||
116 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); | ||
117 | if (!tag_index) | ||
118 | goto fail; | ||
119 | |||
120 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
121 | tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | ||
122 | if (!tag_map) | ||
123 | goto fail; | ||
124 | |||
125 | tags->real_max_depth = depth; | ||
126 | tags->max_depth = depth; | ||
127 | tags->tag_index = tag_index; | ||
128 | tags->tag_map = tag_map; | ||
129 | |||
130 | return 0; | ||
131 | fail: | ||
132 | kfree(tag_index); | ||
133 | return -ENOMEM; | ||
134 | } | ||
135 | |||
136 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | ||
137 | int depth) | ||
138 | { | ||
139 | struct blk_queue_tag *tags; | ||
140 | |||
141 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | ||
142 | if (!tags) | ||
143 | goto fail; | ||
144 | |||
145 | if (init_tag_map(q, tags, depth)) | ||
146 | goto fail; | ||
147 | |||
148 | tags->busy = 0; | ||
149 | atomic_set(&tags->refcnt, 1); | ||
150 | return tags; | ||
151 | fail: | ||
152 | kfree(tags); | ||
153 | return NULL; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * blk_init_tags - initialize the tag info for an external tag map | ||
158 | * @depth: the maximum queue depth supported | ||
159 | * @tags: the tag to use | ||
160 | **/ | ||
161 | struct blk_queue_tag *blk_init_tags(int depth) | ||
162 | { | ||
163 | return __blk_queue_init_tags(NULL, depth); | ||
164 | } | ||
165 | EXPORT_SYMBOL(blk_init_tags); | ||
166 | |||
167 | /** | ||
168 | * blk_queue_init_tags - initialize the queue tag info | ||
169 | * @q: the request queue for the device | ||
170 | * @depth: the maximum queue depth supported | ||
171 | * @tags: the tag to use | ||
172 | **/ | ||
173 | int blk_queue_init_tags(struct request_queue *q, int depth, | ||
174 | struct blk_queue_tag *tags) | ||
175 | { | ||
176 | int rc; | ||
177 | |||
178 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | ||
179 | |||
180 | if (!tags && !q->queue_tags) { | ||
181 | tags = __blk_queue_init_tags(q, depth); | ||
182 | |||
183 | if (!tags) | ||
184 | goto fail; | ||
185 | } else if (q->queue_tags) { | ||
186 | rc = blk_queue_resize_tags(q, depth); | ||
187 | if (rc) | ||
188 | return rc; | ||
189 | set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | ||
190 | return 0; | ||
191 | } else | ||
192 | atomic_inc(&tags->refcnt); | ||
193 | |||
194 | /* | ||
195 | * assign it, all done | ||
196 | */ | ||
197 | q->queue_tags = tags; | ||
198 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | ||
199 | INIT_LIST_HEAD(&q->tag_busy_list); | ||
200 | return 0; | ||
201 | fail: | ||
202 | kfree(tags); | ||
203 | return -ENOMEM; | ||
204 | } | ||
205 | EXPORT_SYMBOL(blk_queue_init_tags); | ||
206 | |||
207 | /** | ||
208 | * blk_queue_resize_tags - change the queueing depth | ||
209 | * @q: the request queue for the device | ||
210 | * @new_depth: the new max command queueing depth | ||
211 | * | ||
212 | * Notes: | ||
213 | * Must be called with the queue lock held. | ||
214 | **/ | ||
215 | int blk_queue_resize_tags(struct request_queue *q, int new_depth) | ||
216 | { | ||
217 | struct blk_queue_tag *bqt = q->queue_tags; | ||
218 | struct request **tag_index; | ||
219 | unsigned long *tag_map; | ||
220 | int max_depth, nr_ulongs; | ||
221 | |||
222 | if (!bqt) | ||
223 | return -ENXIO; | ||
224 | |||
225 | /* | ||
226 | * if we already have large enough real_max_depth. just | ||
227 | * adjust max_depth. *NOTE* as requests with tag value | ||
228 | * between new_depth and real_max_depth can be in-flight, tag | ||
229 | * map can not be shrunk blindly here. | ||
230 | */ | ||
231 | if (new_depth <= bqt->real_max_depth) { | ||
232 | bqt->max_depth = new_depth; | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Currently cannot replace a shared tag map with a new | ||
238 | * one, so error out if this is the case | ||
239 | */ | ||
240 | if (atomic_read(&bqt->refcnt) != 1) | ||
241 | return -EBUSY; | ||
242 | |||
243 | /* | ||
244 | * save the old state info, so we can copy it back | ||
245 | */ | ||
246 | tag_index = bqt->tag_index; | ||
247 | tag_map = bqt->tag_map; | ||
248 | max_depth = bqt->real_max_depth; | ||
249 | |||
250 | if (init_tag_map(q, bqt, new_depth)) | ||
251 | return -ENOMEM; | ||
252 | |||
253 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); | ||
254 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
255 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); | ||
256 | |||
257 | kfree(tag_index); | ||
258 | kfree(tag_map); | ||
259 | return 0; | ||
260 | } | ||
261 | EXPORT_SYMBOL(blk_queue_resize_tags); | ||
262 | |||
263 | /** | ||
264 | * blk_queue_end_tag - end tag operations for a request | ||
265 | * @q: the request queue for the device | ||
266 | * @rq: the request that has completed | ||
267 | * | ||
268 | * Description: | ||
269 | * Typically called when end_that_request_first() returns 0, meaning | ||
270 | * all transfers have been done for a request. It's important to call | ||
271 | * this function before end_that_request_last(), as that will put the | ||
272 | * request back on the free list thus corrupting the internal tag list. | ||
273 | * | ||
274 | * Notes: | ||
275 | * queue lock must be held. | ||
276 | **/ | ||
277 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) | ||
278 | { | ||
279 | struct blk_queue_tag *bqt = q->queue_tags; | ||
280 | int tag = rq->tag; | ||
281 | |||
282 | BUG_ON(tag == -1); | ||
283 | |||
284 | if (unlikely(tag >= bqt->real_max_depth)) | ||
285 | /* | ||
286 | * This can happen after tag depth has been reduced. | ||
287 | * FIXME: how about a warning or info message here? | ||
288 | */ | ||
289 | return; | ||
290 | |||
291 | list_del_init(&rq->queuelist); | ||
292 | rq->cmd_flags &= ~REQ_QUEUED; | ||
293 | rq->tag = -1; | ||
294 | |||
295 | if (unlikely(bqt->tag_index[tag] == NULL)) | ||
296 | printk(KERN_ERR "%s: tag %d is missing\n", | ||
297 | __FUNCTION__, tag); | ||
298 | |||
299 | bqt->tag_index[tag] = NULL; | ||
300 | |||
301 | if (unlikely(!test_bit(tag, bqt->tag_map))) { | ||
302 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | ||
303 | __FUNCTION__, tag); | ||
304 | return; | ||
305 | } | ||
306 | /* | ||
307 | * The tag_map bit acts as a lock for tag_index[bit], so we need | ||
308 | * unlock memory barrier semantics. | ||
309 | */ | ||
310 | clear_bit_unlock(tag, bqt->tag_map); | ||
311 | bqt->busy--; | ||
312 | } | ||
313 | EXPORT_SYMBOL(blk_queue_end_tag); | ||
314 | |||
315 | /** | ||
316 | * blk_queue_start_tag - find a free tag and assign it | ||
317 | * @q: the request queue for the device | ||
318 | * @rq: the block request that needs tagging | ||
319 | * | ||
320 | * Description: | ||
321 | * This can either be used as a stand-alone helper, or possibly be | ||
322 | * assigned as the queue &prep_rq_fn (in which case &struct request | ||
323 | * automagically gets a tag assigned). Note that this function | ||
324 | * assumes that any type of request can be queued! if this is not | ||
325 | * true for your device, you must check the request type before | ||
326 | * calling this function. The request will also be removed from | ||
327 | * the request queue, so it's the drivers responsibility to readd | ||
328 | * it if it should need to be restarted for some reason. | ||
329 | * | ||
330 | * Notes: | ||
331 | * queue lock must be held. | ||
332 | **/ | ||
333 | int blk_queue_start_tag(struct request_queue *q, struct request *rq) | ||
334 | { | ||
335 | struct blk_queue_tag *bqt = q->queue_tags; | ||
336 | int tag; | ||
337 | |||
338 | if (unlikely((rq->cmd_flags & REQ_QUEUED))) { | ||
339 | printk(KERN_ERR | ||
340 | "%s: request %p for device [%s] already tagged %d", | ||
341 | __FUNCTION__, rq, | ||
342 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | ||
343 | BUG(); | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Protect against shared tag maps, as we may not have exclusive | ||
348 | * access to the tag map. | ||
349 | */ | ||
350 | do { | ||
351 | tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); | ||
352 | if (tag >= bqt->max_depth) | ||
353 | return 1; | ||
354 | |||
355 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); | ||
356 | /* | ||
357 | * We need lock ordering semantics given by test_and_set_bit_lock. | ||
358 | * See blk_queue_end_tag for details. | ||
359 | */ | ||
360 | |||
361 | rq->cmd_flags |= REQ_QUEUED; | ||
362 | rq->tag = tag; | ||
363 | bqt->tag_index[tag] = rq; | ||
364 | blkdev_dequeue_request(rq); | ||
365 | list_add(&rq->queuelist, &q->tag_busy_list); | ||
366 | bqt->busy++; | ||
367 | return 0; | ||
368 | } | ||
369 | EXPORT_SYMBOL(blk_queue_start_tag); | ||
370 | |||
371 | /** | ||
372 | * blk_queue_invalidate_tags - invalidate all pending tags | ||
373 | * @q: the request queue for the device | ||
374 | * | ||
375 | * Description: | ||
376 | * Hardware conditions may dictate a need to stop all pending requests. | ||
377 | * In this case, we will safely clear the block side of the tag queue and | ||
378 | * readd all requests to the request queue in the right order. | ||
379 | * | ||
380 | * Notes: | ||
381 | * queue lock must be held. | ||
382 | **/ | ||
383 | void blk_queue_invalidate_tags(struct request_queue *q) | ||
384 | { | ||
385 | struct list_head *tmp, *n; | ||
386 | |||
387 | list_for_each_safe(tmp, n, &q->tag_busy_list) | ||
388 | blk_requeue_request(q, list_entry_rq(tmp)); | ||
389 | } | ||
390 | EXPORT_SYMBOL(blk_queue_invalidate_tags); | ||
diff --git a/block/blk.h b/block/blk.h new file mode 100644 index 000000000000..ec898dd0c65c --- /dev/null +++ b/block/blk.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef BLK_INTERNAL_H | ||
2 | #define BLK_INTERNAL_H | ||
3 | |||
4 | /* Amount of time in which a process may batch requests */ | ||
5 | #define BLK_BATCH_TIME (HZ/50UL) | ||
6 | |||
7 | /* Number of requests a "batching" process may submit */ | ||
8 | #define BLK_BATCH_REQ 32 | ||
9 | |||
10 | extern struct kmem_cache *blk_requestq_cachep; | ||
11 | extern struct kobj_type blk_queue_ktype; | ||
12 | |||
13 | void rq_init(struct request_queue *q, struct request *rq); | ||
14 | void init_request_from_bio(struct request *req, struct bio *bio); | ||
15 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | ||
16 | struct bio *bio); | ||
17 | void __blk_queue_free_tags(struct request_queue *q); | ||
18 | |||
19 | void blk_unplug_work(struct work_struct *work); | ||
20 | void blk_unplug_timeout(unsigned long data); | ||
21 | |||
22 | struct io_context *current_io_context(gfp_t gfp_flags, int node); | ||
23 | |||
24 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | ||
25 | struct bio *bio); | ||
26 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | ||
27 | struct bio *bio); | ||
28 | int attempt_back_merge(struct request_queue *q, struct request *rq); | ||
29 | int attempt_front_merge(struct request_queue *q, struct request *rq); | ||
30 | void blk_recalc_rq_segments(struct request *rq); | ||
31 | void blk_recalc_rq_sectors(struct request *rq, int nsect); | ||
32 | |||
33 | void blk_queue_congestion_threshold(struct request_queue *q); | ||
34 | |||
35 | /* | ||
36 | * Return the threshold (number of used requests) at which the queue is | ||
37 | * considered to be congested. It include a little hysteresis to keep the | ||
38 | * context switch rate down. | ||
39 | */ | ||
40 | static inline int queue_congestion_on_threshold(struct request_queue *q) | ||
41 | { | ||
42 | return q->nr_congestion_on; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * The threshold at which a queue is considered to be uncongested | ||
47 | */ | ||
48 | static inline int queue_congestion_off_threshold(struct request_queue *q) | ||
49 | { | ||
50 | return q->nr_congestion_off; | ||
51 | } | ||
52 | |||
53 | #endif | ||
diff --git a/block/blktrace.c b/block/blktrace.c index d00ac3993c18..568588cd16b2 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/time.h> | 25 | #include <linux/time.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | 27 | ||
28 | static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; | ||
29 | static unsigned int blktrace_seq __read_mostly = 1; | 28 | static unsigned int blktrace_seq __read_mostly = 1; |
30 | 29 | ||
31 | /* | 30 | /* |
@@ -41,7 +40,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, | |||
41 | const int cpu = smp_processor_id(); | 40 | const int cpu = smp_processor_id(); |
42 | 41 | ||
43 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | 42 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; |
44 | t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu); | 43 | t->time = ktime_to_ns(ktime_get()); |
45 | t->device = bt->dev; | 44 | t->device = bt->dev; |
46 | t->action = action; | 45 | t->action = action; |
47 | t->pid = pid; | 46 | t->pid = pid; |
@@ -159,7 +158,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
159 | 158 | ||
160 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | 159 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; |
161 | t->sequence = ++(*sequence); | 160 | t->sequence = ++(*sequence); |
162 | t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu); | 161 | t->time = ktime_to_ns(ktime_get()); |
163 | t->sector = sector; | 162 | t->sector = sector; |
164 | t->bytes = bytes; | 163 | t->bytes = bytes; |
165 | t->action = what; | 164 | t->action = what; |
@@ -179,7 +178,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
179 | EXPORT_SYMBOL_GPL(__blk_add_trace); | 178 | EXPORT_SYMBOL_GPL(__blk_add_trace); |
180 | 179 | ||
181 | static struct dentry *blk_tree_root; | 180 | static struct dentry *blk_tree_root; |
182 | static struct mutex blk_tree_mutex; | 181 | static DEFINE_MUTEX(blk_tree_mutex); |
183 | static unsigned int root_users; | 182 | static unsigned int root_users; |
184 | 183 | ||
185 | static inline void blk_remove_root(void) | 184 | static inline void blk_remove_root(void) |
@@ -202,6 +201,7 @@ static void blk_remove_tree(struct dentry *dir) | |||
202 | static struct dentry *blk_create_tree(const char *blk_name) | 201 | static struct dentry *blk_create_tree(const char *blk_name) |
203 | { | 202 | { |
204 | struct dentry *dir = NULL; | 203 | struct dentry *dir = NULL; |
204 | int created = 0; | ||
205 | 205 | ||
206 | mutex_lock(&blk_tree_mutex); | 206 | mutex_lock(&blk_tree_mutex); |
207 | 207 | ||
@@ -209,13 +209,17 @@ static struct dentry *blk_create_tree(const char *blk_name) | |||
209 | blk_tree_root = debugfs_create_dir("block", NULL); | 209 | blk_tree_root = debugfs_create_dir("block", NULL); |
210 | if (!blk_tree_root) | 210 | if (!blk_tree_root) |
211 | goto err; | 211 | goto err; |
212 | created = 1; | ||
212 | } | 213 | } |
213 | 214 | ||
214 | dir = debugfs_create_dir(blk_name, blk_tree_root); | 215 | dir = debugfs_create_dir(blk_name, blk_tree_root); |
215 | if (dir) | 216 | if (dir) |
216 | root_users++; | 217 | root_users++; |
217 | else | 218 | else { |
218 | blk_remove_root(); | 219 | /* Delete root only if we created it */ |
220 | if (created) | ||
221 | blk_remove_root(); | ||
222 | } | ||
219 | 223 | ||
220 | err: | 224 | err: |
221 | mutex_unlock(&blk_tree_mutex); | 225 | mutex_unlock(&blk_tree_mutex); |
@@ -231,7 +235,7 @@ static void blk_trace_cleanup(struct blk_trace *bt) | |||
231 | kfree(bt); | 235 | kfree(bt); |
232 | } | 236 | } |
233 | 237 | ||
234 | static int blk_trace_remove(struct request_queue *q) | 238 | int blk_trace_remove(struct request_queue *q) |
235 | { | 239 | { |
236 | struct blk_trace *bt; | 240 | struct blk_trace *bt; |
237 | 241 | ||
@@ -245,6 +249,7 @@ static int blk_trace_remove(struct request_queue *q) | |||
245 | 249 | ||
246 | return 0; | 250 | return 0; |
247 | } | 251 | } |
252 | EXPORT_SYMBOL_GPL(blk_trace_remove); | ||
248 | 253 | ||
249 | static int blk_dropped_open(struct inode *inode, struct file *filp) | 254 | static int blk_dropped_open(struct inode *inode, struct file *filp) |
250 | { | 255 | { |
@@ -312,18 +317,17 @@ static struct rchan_callbacks blk_relay_callbacks = { | |||
312 | /* | 317 | /* |
313 | * Setup everything required to start tracing | 318 | * Setup everything required to start tracing |
314 | */ | 319 | */ |
315 | int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev, | 320 | int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
316 | struct blk_user_trace_setup *buts) | 321 | struct blk_user_trace_setup *buts) |
317 | { | 322 | { |
318 | struct blk_trace *old_bt, *bt = NULL; | 323 | struct blk_trace *old_bt, *bt = NULL; |
319 | struct dentry *dir = NULL; | 324 | struct dentry *dir = NULL; |
320 | char b[BDEVNAME_SIZE]; | ||
321 | int ret, i; | 325 | int ret, i; |
322 | 326 | ||
323 | if (!buts->buf_size || !buts->buf_nr) | 327 | if (!buts->buf_size || !buts->buf_nr) |
324 | return -EINVAL; | 328 | return -EINVAL; |
325 | 329 | ||
326 | strcpy(buts->name, bdevname(bdev, b)); | 330 | strcpy(buts->name, name); |
327 | 331 | ||
328 | /* | 332 | /* |
329 | * some device names have larger paths - convert the slashes | 333 | * some device names have larger paths - convert the slashes |
@@ -348,7 +352,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev, | |||
348 | goto err; | 352 | goto err; |
349 | 353 | ||
350 | bt->dir = dir; | 354 | bt->dir = dir; |
351 | bt->dev = bdev->bd_dev; | 355 | bt->dev = dev; |
352 | atomic_set(&bt->dropped, 0); | 356 | atomic_set(&bt->dropped, 0); |
353 | 357 | ||
354 | ret = -EIO; | 358 | ret = -EIO; |
@@ -395,8 +399,8 @@ err: | |||
395 | return ret; | 399 | return ret; |
396 | } | 400 | } |
397 | 401 | ||
398 | static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, | 402 | int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
399 | char __user *arg) | 403 | char __user *arg) |
400 | { | 404 | { |
401 | struct blk_user_trace_setup buts; | 405 | struct blk_user_trace_setup buts; |
402 | int ret; | 406 | int ret; |
@@ -405,7 +409,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, | |||
405 | if (ret) | 409 | if (ret) |
406 | return -EFAULT; | 410 | return -EFAULT; |
407 | 411 | ||
408 | ret = do_blk_trace_setup(q, bdev, &buts); | 412 | ret = do_blk_trace_setup(q, name, dev, &buts); |
409 | if (ret) | 413 | if (ret) |
410 | return ret; | 414 | return ret; |
411 | 415 | ||
@@ -414,8 +418,9 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, | |||
414 | 418 | ||
415 | return 0; | 419 | return 0; |
416 | } | 420 | } |
421 | EXPORT_SYMBOL_GPL(blk_trace_setup); | ||
417 | 422 | ||
418 | static int blk_trace_startstop(struct request_queue *q, int start) | 423 | int blk_trace_startstop(struct request_queue *q, int start) |
419 | { | 424 | { |
420 | struct blk_trace *bt; | 425 | struct blk_trace *bt; |
421 | int ret; | 426 | int ret; |
@@ -448,6 +453,7 @@ static int blk_trace_startstop(struct request_queue *q, int start) | |||
448 | 453 | ||
449 | return ret; | 454 | return ret; |
450 | } | 455 | } |
456 | EXPORT_SYMBOL_GPL(blk_trace_startstop); | ||
451 | 457 | ||
452 | /** | 458 | /** |
453 | * blk_trace_ioctl: - handle the ioctls associated with tracing | 459 | * blk_trace_ioctl: - handle the ioctls associated with tracing |
@@ -460,6 +466,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
460 | { | 466 | { |
461 | struct request_queue *q; | 467 | struct request_queue *q; |
462 | int ret, start = 0; | 468 | int ret, start = 0; |
469 | char b[BDEVNAME_SIZE]; | ||
463 | 470 | ||
464 | q = bdev_get_queue(bdev); | 471 | q = bdev_get_queue(bdev); |
465 | if (!q) | 472 | if (!q) |
@@ -469,7 +476,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
469 | 476 | ||
470 | switch (cmd) { | 477 | switch (cmd) { |
471 | case BLKTRACESETUP: | 478 | case BLKTRACESETUP: |
472 | ret = blk_trace_setup(q, bdev, arg); | 479 | strcpy(b, bdevname(bdev, b)); |
480 | ret = blk_trace_setup(q, b, bdev->bd_dev, arg); | ||
473 | break; | 481 | break; |
474 | case BLKTRACESTART: | 482 | case BLKTRACESTART: |
475 | start = 1; | 483 | start = 1; |
@@ -500,77 +508,3 @@ void blk_trace_shutdown(struct request_queue *q) | |||
500 | blk_trace_remove(q); | 508 | blk_trace_remove(q); |
501 | } | 509 | } |
502 | } | 510 | } |
503 | |||
504 | /* | ||
505 | * Average offset over two calls to cpu_clock() with a gettimeofday() | ||
506 | * in the middle | ||
507 | */ | ||
508 | static void blk_check_time(unsigned long long *t, int this_cpu) | ||
509 | { | ||
510 | unsigned long long a, b; | ||
511 | struct timeval tv; | ||
512 | |||
513 | a = cpu_clock(this_cpu); | ||
514 | do_gettimeofday(&tv); | ||
515 | b = cpu_clock(this_cpu); | ||
516 | |||
517 | *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; | ||
518 | *t -= (a + b) / 2; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * calibrate our inter-CPU timings | ||
523 | */ | ||
524 | static void blk_trace_check_cpu_time(void *data) | ||
525 | { | ||
526 | unsigned long long *t; | ||
527 | int this_cpu = get_cpu(); | ||
528 | |||
529 | t = &per_cpu(blk_trace_cpu_offset, this_cpu); | ||
530 | |||
531 | /* | ||
532 | * Just call it twice, hopefully the second call will be cache hot | ||
533 | * and a little more precise | ||
534 | */ | ||
535 | blk_check_time(t, this_cpu); | ||
536 | blk_check_time(t, this_cpu); | ||
537 | |||
538 | put_cpu(); | ||
539 | } | ||
540 | |||
541 | static void blk_trace_set_ht_offsets(void) | ||
542 | { | ||
543 | #if defined(CONFIG_SCHED_SMT) | ||
544 | int cpu, i; | ||
545 | |||
546 | /* | ||
547 | * now make sure HT siblings have the same time offset | ||
548 | */ | ||
549 | preempt_disable(); | ||
550 | for_each_online_cpu(cpu) { | ||
551 | unsigned long long *cpu_off, *sibling_off; | ||
552 | |||
553 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) { | ||
554 | if (i == cpu) | ||
555 | continue; | ||
556 | |||
557 | cpu_off = &per_cpu(blk_trace_cpu_offset, cpu); | ||
558 | sibling_off = &per_cpu(blk_trace_cpu_offset, i); | ||
559 | *sibling_off = *cpu_off; | ||
560 | } | ||
561 | } | ||
562 | preempt_enable(); | ||
563 | #endif | ||
564 | } | ||
565 | |||
566 | static __init int blk_trace_init(void) | ||
567 | { | ||
568 | mutex_init(&blk_tree_mutex); | ||
569 | on_each_cpu(blk_trace_check_cpu_time, NULL, 1, 1); | ||
570 | blk_trace_set_ht_offsets(); | ||
571 | |||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | module_init(blk_trace_init); | ||
576 | |||
diff --git a/block/bsg.c b/block/bsg.c index 8e181ab3afb9..8917c5174dc2 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -279,6 +279,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) | |||
279 | goto out; | 279 | goto out; |
280 | } | 280 | } |
281 | rq->next_rq = next_rq; | 281 | rq->next_rq = next_rq; |
282 | next_rq->cmd_type = rq->cmd_type; | ||
282 | 283 | ||
283 | dxferp = (void*)(unsigned long)hdr->din_xferp; | 284 | dxferp = (void*)(unsigned long)hdr->din_xferp; |
284 | ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); | 285 | ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); |
@@ -445,6 +446,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |||
445 | else | 446 | else |
446 | hdr->dout_resid = rq->data_len; | 447 | hdr->dout_resid = rq->data_len; |
447 | 448 | ||
449 | /* | ||
450 | * If the request generated a negative error number, return it | ||
451 | * (providing we aren't already returning an error); if it's | ||
452 | * just a protocol response (i.e. non negative), that gets | ||
453 | * processed above. | ||
454 | */ | ||
455 | if (!ret && rq->errors < 0) | ||
456 | ret = rq->errors; | ||
457 | |||
448 | blk_rq_unmap_user(bio); | 458 | blk_rq_unmap_user(bio); |
449 | blk_put_request(rq); | 459 | blk_put_request(rq); |
450 | 460 | ||
@@ -837,6 +847,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
837 | { | 847 | { |
838 | struct bsg_device *bd = file->private_data; | 848 | struct bsg_device *bd = file->private_data; |
839 | int __user *uarg = (int __user *) arg; | 849 | int __user *uarg = (int __user *) arg; |
850 | int ret; | ||
840 | 851 | ||
841 | switch (cmd) { | 852 | switch (cmd) { |
842 | /* | 853 | /* |
@@ -889,12 +900,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
889 | if (rq->next_rq) | 900 | if (rq->next_rq) |
890 | bidi_bio = rq->next_rq->bio; | 901 | bidi_bio = rq->next_rq->bio; |
891 | blk_execute_rq(bd->queue, NULL, rq, 0); | 902 | blk_execute_rq(bd->queue, NULL, rq, 0); |
892 | blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); | 903 | ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); |
893 | 904 | ||
894 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) | 905 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) |
895 | return -EFAULT; | 906 | return -EFAULT; |
896 | 907 | ||
897 | return 0; | 908 | return ret; |
898 | } | 909 | } |
899 | /* | 910 | /* |
900 | * block device ioctls | 911 | * block device ioctls |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 54dc05439009..ca198e61fa65 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -15,20 +15,22 @@ | |||
15 | /* | 15 | /* |
16 | * tunables | 16 | * tunables |
17 | */ | 17 | */ |
18 | static const int cfq_quantum = 4; /* max queue in one round of service */ | 18 | /* max queue in one round of service */ |
19 | static const int cfq_quantum = 4; | ||
19 | static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; | 20 | static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; |
20 | static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ | 21 | /* maximum backwards seek, in KiB */ |
21 | static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ | 22 | static const int cfq_back_max = 16 * 1024; |
22 | 23 | /* penalty of a backwards seek */ | |
24 | static const int cfq_back_penalty = 2; | ||
23 | static const int cfq_slice_sync = HZ / 10; | 25 | static const int cfq_slice_sync = HZ / 10; |
24 | static int cfq_slice_async = HZ / 25; | 26 | static int cfq_slice_async = HZ / 25; |
25 | static const int cfq_slice_async_rq = 2; | 27 | static const int cfq_slice_async_rq = 2; |
26 | static int cfq_slice_idle = HZ / 125; | 28 | static int cfq_slice_idle = HZ / 125; |
27 | 29 | ||
28 | /* | 30 | /* |
29 | * grace period before allowing idle class to get disk access | 31 | * offset from end of service tree |
30 | */ | 32 | */ |
31 | #define CFQ_IDLE_GRACE (HZ / 10) | 33 | #define CFQ_IDLE_DELAY (HZ / 5) |
32 | 34 | ||
33 | /* | 35 | /* |
34 | * below this threshold, we consider thinktime immediate | 36 | * below this threshold, we consider thinktime immediate |
@@ -37,7 +39,8 @@ static int cfq_slice_idle = HZ / 125; | |||
37 | 39 | ||
38 | #define CFQ_SLICE_SCALE (5) | 40 | #define CFQ_SLICE_SCALE (5) |
39 | 41 | ||
40 | #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) | 42 | #define RQ_CIC(rq) \ |
43 | ((struct cfq_io_context *) (rq)->elevator_private) | ||
41 | #define RQ_CFQQ(rq) ((rq)->elevator_private2) | 44 | #define RQ_CFQQ(rq) ((rq)->elevator_private2) |
42 | 45 | ||
43 | static struct kmem_cache *cfq_pool; | 46 | static struct kmem_cache *cfq_pool; |
@@ -98,8 +101,6 @@ struct cfq_data { | |||
98 | struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; | 101 | struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; |
99 | struct cfq_queue *async_idle_cfqq; | 102 | struct cfq_queue *async_idle_cfqq; |
100 | 103 | ||
101 | struct timer_list idle_class_timer; | ||
102 | |||
103 | sector_t last_position; | 104 | sector_t last_position; |
104 | unsigned long last_end_request; | 105 | unsigned long last_end_request; |
105 | 106 | ||
@@ -173,15 +174,15 @@ enum cfqq_state_flags { | |||
173 | #define CFQ_CFQQ_FNS(name) \ | 174 | #define CFQ_CFQQ_FNS(name) \ |
174 | static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ | 175 | static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ |
175 | { \ | 176 | { \ |
176 | cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ | 177 | (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ |
177 | } \ | 178 | } \ |
178 | static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ | 179 | static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ |
179 | { \ | 180 | { \ |
180 | cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ | 181 | (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ |
181 | } \ | 182 | } \ |
182 | static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ | 183 | static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ |
183 | { \ | 184 | { \ |
184 | return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ | 185 | return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ |
185 | } | 186 | } |
186 | 187 | ||
187 | CFQ_CFQQ_FNS(on_rr); | 188 | CFQ_CFQQ_FNS(on_rr); |
@@ -199,8 +200,8 @@ CFQ_CFQQ_FNS(sync); | |||
199 | 200 | ||
200 | static void cfq_dispatch_insert(struct request_queue *, struct request *); | 201 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
201 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, | 202 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, |
202 | struct task_struct *, gfp_t); | 203 | struct io_context *, gfp_t); |
203 | static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, | 204 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, |
204 | struct io_context *); | 205 | struct io_context *); |
205 | 206 | ||
206 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, | 207 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, |
@@ -384,12 +385,15 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) | |||
384 | /* | 385 | /* |
385 | * The below is leftmost cache rbtree addon | 386 | * The below is leftmost cache rbtree addon |
386 | */ | 387 | */ |
387 | static struct rb_node *cfq_rb_first(struct cfq_rb_root *root) | 388 | static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) |
388 | { | 389 | { |
389 | if (!root->left) | 390 | if (!root->left) |
390 | root->left = rb_first(&root->rb); | 391 | root->left = rb_first(&root->rb); |
391 | 392 | ||
392 | return root->left; | 393 | if (root->left) |
394 | return rb_entry(root->left, struct cfq_queue, rb_node); | ||
395 | |||
396 | return NULL; | ||
393 | } | 397 | } |
394 | 398 | ||
395 | static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) | 399 | static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) |
@@ -446,12 +450,20 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd, | |||
446 | static void cfq_service_tree_add(struct cfq_data *cfqd, | 450 | static void cfq_service_tree_add(struct cfq_data *cfqd, |
447 | struct cfq_queue *cfqq, int add_front) | 451 | struct cfq_queue *cfqq, int add_front) |
448 | { | 452 | { |
449 | struct rb_node **p = &cfqd->service_tree.rb.rb_node; | 453 | struct rb_node **p, *parent; |
450 | struct rb_node *parent = NULL; | 454 | struct cfq_queue *__cfqq; |
451 | unsigned long rb_key; | 455 | unsigned long rb_key; |
452 | int left; | 456 | int left; |
453 | 457 | ||
454 | if (!add_front) { | 458 | if (cfq_class_idle(cfqq)) { |
459 | rb_key = CFQ_IDLE_DELAY; | ||
460 | parent = rb_last(&cfqd->service_tree.rb); | ||
461 | if (parent && parent != &cfqq->rb_node) { | ||
462 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); | ||
463 | rb_key += __cfqq->rb_key; | ||
464 | } else | ||
465 | rb_key += jiffies; | ||
466 | } else if (!add_front) { | ||
455 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; | 467 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; |
456 | rb_key += cfqq->slice_resid; | 468 | rb_key += cfqq->slice_resid; |
457 | cfqq->slice_resid = 0; | 469 | cfqq->slice_resid = 0; |
@@ -469,8 +481,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, | |||
469 | } | 481 | } |
470 | 482 | ||
471 | left = 1; | 483 | left = 1; |
484 | parent = NULL; | ||
485 | p = &cfqd->service_tree.rb.rb_node; | ||
472 | while (*p) { | 486 | while (*p) { |
473 | struct cfq_queue *__cfqq; | ||
474 | struct rb_node **n; | 487 | struct rb_node **n; |
475 | 488 | ||
476 | parent = *p; | 489 | parent = *p; |
@@ -524,8 +537,7 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
524 | * add to busy list of queues for service, trying to be fair in ordering | 537 | * add to busy list of queues for service, trying to be fair in ordering |
525 | * the pending list according to last request service | 538 | * the pending list according to last request service |
526 | */ | 539 | */ |
527 | static inline void | 540 | static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
528 | cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
529 | { | 541 | { |
530 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 542 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
531 | cfq_mark_cfqq_on_rr(cfqq); | 543 | cfq_mark_cfqq_on_rr(cfqq); |
@@ -538,8 +550,7 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
538 | * Called when the cfqq no longer has requests pending, remove it from | 550 | * Called when the cfqq no longer has requests pending, remove it from |
539 | * the service tree. | 551 | * the service tree. |
540 | */ | 552 | */ |
541 | static inline void | 553 | static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
542 | cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
543 | { | 554 | { |
544 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); | 555 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
545 | cfq_clear_cfqq_on_rr(cfqq); | 556 | cfq_clear_cfqq_on_rr(cfqq); |
@@ -554,7 +565,7 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
554 | /* | 565 | /* |
555 | * rb tree support functions | 566 | * rb tree support functions |
556 | */ | 567 | */ |
557 | static inline void cfq_del_rq_rb(struct request *rq) | 568 | static void cfq_del_rq_rb(struct request *rq) |
558 | { | 569 | { |
559 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 570 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
560 | struct cfq_data *cfqd = cfqq->cfqd; | 571 | struct cfq_data *cfqd = cfqq->cfqd; |
@@ -594,8 +605,7 @@ static void cfq_add_rq_rb(struct request *rq) | |||
594 | BUG_ON(!cfqq->next_rq); | 605 | BUG_ON(!cfqq->next_rq); |
595 | } | 606 | } |
596 | 607 | ||
597 | static inline void | 608 | static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) |
598 | cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) | ||
599 | { | 609 | { |
600 | elv_rb_del(&cfqq->sort_list, rq); | 610 | elv_rb_del(&cfqq->sort_list, rq); |
601 | cfqq->queued[rq_is_sync(rq)]--; | 611 | cfqq->queued[rq_is_sync(rq)]--; |
@@ -609,7 +619,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) | |||
609 | struct cfq_io_context *cic; | 619 | struct cfq_io_context *cic; |
610 | struct cfq_queue *cfqq; | 620 | struct cfq_queue *cfqq; |
611 | 621 | ||
612 | cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); | 622 | cic = cfq_cic_lookup(cfqd, tsk->io_context); |
613 | if (!cic) | 623 | if (!cic) |
614 | return NULL; | 624 | return NULL; |
615 | 625 | ||
@@ -721,7 +731,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
721 | * Lookup the cfqq that this bio will be queued with. Allow | 731 | * Lookup the cfqq that this bio will be queued with. Allow |
722 | * merge only if rq is queued there. | 732 | * merge only if rq is queued there. |
723 | */ | 733 | */ |
724 | cic = cfq_cic_rb_lookup(cfqd, current->io_context); | 734 | cic = cfq_cic_lookup(cfqd, current->io_context); |
725 | if (!cic) | 735 | if (!cic) |
726 | return 0; | 736 | return 0; |
727 | 737 | ||
@@ -732,15 +742,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
732 | return 0; | 742 | return 0; |
733 | } | 743 | } |
734 | 744 | ||
735 | static inline void | 745 | static void __cfq_set_active_queue(struct cfq_data *cfqd, |
736 | __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 746 | struct cfq_queue *cfqq) |
737 | { | 747 | { |
738 | if (cfqq) { | 748 | if (cfqq) { |
739 | /* | ||
740 | * stop potential idle class queues waiting service | ||
741 | */ | ||
742 | del_timer(&cfqd->idle_class_timer); | ||
743 | |||
744 | cfqq->slice_end = 0; | 749 | cfqq->slice_end = 0; |
745 | cfq_clear_cfqq_must_alloc_slice(cfqq); | 750 | cfq_clear_cfqq_must_alloc_slice(cfqq); |
746 | cfq_clear_cfqq_fifo_expire(cfqq); | 751 | cfq_clear_cfqq_fifo_expire(cfqq); |
@@ -795,32 +800,10 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) | |||
795 | */ | 800 | */ |
796 | static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | 801 | static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) |
797 | { | 802 | { |
798 | struct cfq_queue *cfqq; | ||
799 | struct rb_node *n; | ||
800 | |||
801 | if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) | 803 | if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) |
802 | return NULL; | 804 | return NULL; |
803 | 805 | ||
804 | n = cfq_rb_first(&cfqd->service_tree); | 806 | return cfq_rb_first(&cfqd->service_tree); |
805 | cfqq = rb_entry(n, struct cfq_queue, rb_node); | ||
806 | |||
807 | if (cfq_class_idle(cfqq)) { | ||
808 | unsigned long end; | ||
809 | |||
810 | /* | ||
811 | * if we have idle queues and no rt or be queues had | ||
812 | * pending requests, either allow immediate service if | ||
813 | * the grace period has passed or arm the idle grace | ||
814 | * timer | ||
815 | */ | ||
816 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; | ||
817 | if (time_before(jiffies, end)) { | ||
818 | mod_timer(&cfqd->idle_class_timer, end); | ||
819 | cfqq = NULL; | ||
820 | } | ||
821 | } | ||
822 | |||
823 | return cfqq; | ||
824 | } | 807 | } |
825 | 808 | ||
826 | /* | 809 | /* |
@@ -886,7 +869,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
886 | * task has exited, don't wait | 869 | * task has exited, don't wait |
887 | */ | 870 | */ |
888 | cic = cfqd->active_cic; | 871 | cic = cfqd->active_cic; |
889 | if (!cic || !cic->ioc->task) | 872 | if (!cic || !atomic_read(&cic->ioc->nr_tasks)) |
890 | return; | 873 | return; |
891 | 874 | ||
892 | /* | 875 | /* |
@@ -930,7 +913,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
930 | /* | 913 | /* |
931 | * return expired entry, or NULL to just start from scratch in rbtree | 914 | * return expired entry, or NULL to just start from scratch in rbtree |
932 | */ | 915 | */ |
933 | static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) | 916 | static struct request *cfq_check_fifo(struct cfq_queue *cfqq) |
934 | { | 917 | { |
935 | struct cfq_data *cfqd = cfqq->cfqd; | 918 | struct cfq_data *cfqd = cfqq->cfqd; |
936 | struct request *rq; | 919 | struct request *rq; |
@@ -1025,7 +1008,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1025 | /* | 1008 | /* |
1026 | * follow expired path, else get first next available | 1009 | * follow expired path, else get first next available |
1027 | */ | 1010 | */ |
1028 | if ((rq = cfq_check_fifo(cfqq)) == NULL) | 1011 | rq = cfq_check_fifo(cfqq); |
1012 | if (rq == NULL) | ||
1029 | rq = cfqq->next_rq; | 1013 | rq = cfqq->next_rq; |
1030 | 1014 | ||
1031 | /* | 1015 | /* |
@@ -1059,7 +1043,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1059 | return dispatched; | 1043 | return dispatched; |
1060 | } | 1044 | } |
1061 | 1045 | ||
1062 | static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) | 1046 | static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) |
1063 | { | 1047 | { |
1064 | int dispatched = 0; | 1048 | int dispatched = 0; |
1065 | 1049 | ||
@@ -1078,14 +1062,11 @@ static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) | |||
1078 | */ | 1062 | */ |
1079 | static int cfq_forced_dispatch(struct cfq_data *cfqd) | 1063 | static int cfq_forced_dispatch(struct cfq_data *cfqd) |
1080 | { | 1064 | { |
1065 | struct cfq_queue *cfqq; | ||
1081 | int dispatched = 0; | 1066 | int dispatched = 0; |
1082 | struct rb_node *n; | ||
1083 | |||
1084 | while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) { | ||
1085 | struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node); | ||
1086 | 1067 | ||
1068 | while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) | ||
1087 | dispatched += __cfq_forced_dispatch_cfqq(cfqq); | 1069 | dispatched += __cfq_forced_dispatch_cfqq(cfqq); |
1088 | } | ||
1089 | 1070 | ||
1090 | cfq_slice_expired(cfqd, 0); | 1071 | cfq_slice_expired(cfqd, 0); |
1091 | 1072 | ||
@@ -1161,20 +1142,69 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1161 | kmem_cache_free(cfq_pool, cfqq); | 1142 | kmem_cache_free(cfq_pool, cfqq); |
1162 | } | 1143 | } |
1163 | 1144 | ||
1164 | static void cfq_free_io_context(struct io_context *ioc) | 1145 | /* |
1146 | * Call func for each cic attached to this ioc. Returns number of cic's seen. | ||
1147 | */ | ||
1148 | #define CIC_GANG_NR 16 | ||
1149 | static unsigned int | ||
1150 | call_for_each_cic(struct io_context *ioc, | ||
1151 | void (*func)(struct io_context *, struct cfq_io_context *)) | ||
1165 | { | 1152 | { |
1166 | struct cfq_io_context *__cic; | 1153 | struct cfq_io_context *cics[CIC_GANG_NR]; |
1167 | struct rb_node *n; | 1154 | unsigned long index = 0; |
1168 | int freed = 0; | 1155 | unsigned int called = 0; |
1156 | int nr; | ||
1169 | 1157 | ||
1170 | ioc->ioc_data = NULL; | 1158 | rcu_read_lock(); |
1171 | 1159 | ||
1172 | while ((n = rb_first(&ioc->cic_root)) != NULL) { | 1160 | do { |
1173 | __cic = rb_entry(n, struct cfq_io_context, rb_node); | 1161 | int i; |
1174 | rb_erase(&__cic->rb_node, &ioc->cic_root); | 1162 | |
1175 | kmem_cache_free(cfq_ioc_pool, __cic); | 1163 | /* |
1176 | freed++; | 1164 | * Perhaps there's a better way - this just gang lookups from |
1177 | } | 1165 | * 0 to the end, restarting after each CIC_GANG_NR from the |
1166 | * last key + 1. | ||
1167 | */ | ||
1168 | nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics, | ||
1169 | index, CIC_GANG_NR); | ||
1170 | if (!nr) | ||
1171 | break; | ||
1172 | |||
1173 | called += nr; | ||
1174 | index = 1 + (unsigned long) cics[nr - 1]->key; | ||
1175 | |||
1176 | for (i = 0; i < nr; i++) | ||
1177 | func(ioc, cics[i]); | ||
1178 | } while (nr == CIC_GANG_NR); | ||
1179 | |||
1180 | rcu_read_unlock(); | ||
1181 | |||
1182 | return called; | ||
1183 | } | ||
1184 | |||
1185 | static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) | ||
1186 | { | ||
1187 | unsigned long flags; | ||
1188 | |||
1189 | BUG_ON(!cic->dead_key); | ||
1190 | |||
1191 | spin_lock_irqsave(&ioc->lock, flags); | ||
1192 | radix_tree_delete(&ioc->radix_root, cic->dead_key); | ||
1193 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
1194 | |||
1195 | kmem_cache_free(cfq_ioc_pool, cic); | ||
1196 | } | ||
1197 | |||
1198 | static void cfq_free_io_context(struct io_context *ioc) | ||
1199 | { | ||
1200 | int freed; | ||
1201 | |||
1202 | /* | ||
1203 | * ioc->refcount is zero here, so no more cic's are allowed to be | ||
1204 | * linked into this ioc. So it should be ok to iterate over the known | ||
1205 | * list, we will see all cic's since no new ones are added. | ||
1206 | */ | ||
1207 | freed = call_for_each_cic(ioc, cic_free_func); | ||
1178 | 1208 | ||
1179 | elv_ioc_count_mod(ioc_count, -freed); | 1209 | elv_ioc_count_mod(ioc_count, -freed); |
1180 | 1210 | ||
@@ -1196,7 +1226,12 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, | |||
1196 | struct cfq_io_context *cic) | 1226 | struct cfq_io_context *cic) |
1197 | { | 1227 | { |
1198 | list_del_init(&cic->queue_list); | 1228 | list_del_init(&cic->queue_list); |
1229 | |||
1230 | /* | ||
1231 | * Make sure key == NULL is seen for dead queues | ||
1232 | */ | ||
1199 | smp_wmb(); | 1233 | smp_wmb(); |
1234 | cic->dead_key = (unsigned long) cic->key; | ||
1200 | cic->key = NULL; | 1235 | cic->key = NULL; |
1201 | 1236 | ||
1202 | if (cic->cfqq[ASYNC]) { | 1237 | if (cic->cfqq[ASYNC]) { |
@@ -1210,16 +1245,18 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, | |||
1210 | } | 1245 | } |
1211 | } | 1246 | } |
1212 | 1247 | ||
1213 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) | 1248 | static void cfq_exit_single_io_context(struct io_context *ioc, |
1249 | struct cfq_io_context *cic) | ||
1214 | { | 1250 | { |
1215 | struct cfq_data *cfqd = cic->key; | 1251 | struct cfq_data *cfqd = cic->key; |
1216 | 1252 | ||
1217 | if (cfqd) { | 1253 | if (cfqd) { |
1218 | struct request_queue *q = cfqd->queue; | 1254 | struct request_queue *q = cfqd->queue; |
1255 | unsigned long flags; | ||
1219 | 1256 | ||
1220 | spin_lock_irq(q->queue_lock); | 1257 | spin_lock_irqsave(q->queue_lock, flags); |
1221 | __cfq_exit_single_io_context(cfqd, cic); | 1258 | __cfq_exit_single_io_context(cfqd, cic); |
1222 | spin_unlock_irq(q->queue_lock); | 1259 | spin_unlock_irqrestore(q->queue_lock, flags); |
1223 | } | 1260 | } |
1224 | } | 1261 | } |
1225 | 1262 | ||
@@ -1229,21 +1266,8 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) | |||
1229 | */ | 1266 | */ |
1230 | static void cfq_exit_io_context(struct io_context *ioc) | 1267 | static void cfq_exit_io_context(struct io_context *ioc) |
1231 | { | 1268 | { |
1232 | struct cfq_io_context *__cic; | 1269 | rcu_assign_pointer(ioc->ioc_data, NULL); |
1233 | struct rb_node *n; | 1270 | call_for_each_cic(ioc, cfq_exit_single_io_context); |
1234 | |||
1235 | ioc->ioc_data = NULL; | ||
1236 | |||
1237 | /* | ||
1238 | * put the reference this task is holding to the various queues | ||
1239 | */ | ||
1240 | n = rb_first(&ioc->cic_root); | ||
1241 | while (n != NULL) { | ||
1242 | __cic = rb_entry(n, struct cfq_io_context, rb_node); | ||
1243 | |||
1244 | cfq_exit_single_io_context(__cic); | ||
1245 | n = rb_next(n); | ||
1246 | } | ||
1247 | } | 1271 | } |
1248 | 1272 | ||
1249 | static struct cfq_io_context * | 1273 | static struct cfq_io_context * |
@@ -1264,7 +1288,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1264 | return cic; | 1288 | return cic; |
1265 | } | 1289 | } |
1266 | 1290 | ||
1267 | static void cfq_init_prio_data(struct cfq_queue *cfqq) | 1291 | static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) |
1268 | { | 1292 | { |
1269 | struct task_struct *tsk = current; | 1293 | struct task_struct *tsk = current; |
1270 | int ioprio_class; | 1294 | int ioprio_class; |
@@ -1272,30 +1296,30 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) | |||
1272 | if (!cfq_cfqq_prio_changed(cfqq)) | 1296 | if (!cfq_cfqq_prio_changed(cfqq)) |
1273 | return; | 1297 | return; |
1274 | 1298 | ||
1275 | ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); | 1299 | ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); |
1276 | switch (ioprio_class) { | 1300 | switch (ioprio_class) { |
1277 | default: | 1301 | default: |
1278 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); | 1302 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); |
1279 | case IOPRIO_CLASS_NONE: | 1303 | case IOPRIO_CLASS_NONE: |
1280 | /* | 1304 | /* |
1281 | * no prio set, place us in the middle of the BE classes | 1305 | * no prio set, place us in the middle of the BE classes |
1282 | */ | 1306 | */ |
1283 | cfqq->ioprio = task_nice_ioprio(tsk); | 1307 | cfqq->ioprio = task_nice_ioprio(tsk); |
1284 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | 1308 | cfqq->ioprio_class = IOPRIO_CLASS_BE; |
1285 | break; | 1309 | break; |
1286 | case IOPRIO_CLASS_RT: | 1310 | case IOPRIO_CLASS_RT: |
1287 | cfqq->ioprio = task_ioprio(tsk); | 1311 | cfqq->ioprio = task_ioprio(ioc); |
1288 | cfqq->ioprio_class = IOPRIO_CLASS_RT; | 1312 | cfqq->ioprio_class = IOPRIO_CLASS_RT; |
1289 | break; | 1313 | break; |
1290 | case IOPRIO_CLASS_BE: | 1314 | case IOPRIO_CLASS_BE: |
1291 | cfqq->ioprio = task_ioprio(tsk); | 1315 | cfqq->ioprio = task_ioprio(ioc); |
1292 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | 1316 | cfqq->ioprio_class = IOPRIO_CLASS_BE; |
1293 | break; | 1317 | break; |
1294 | case IOPRIO_CLASS_IDLE: | 1318 | case IOPRIO_CLASS_IDLE: |
1295 | cfqq->ioprio_class = IOPRIO_CLASS_IDLE; | 1319 | cfqq->ioprio_class = IOPRIO_CLASS_IDLE; |
1296 | cfqq->ioprio = 7; | 1320 | cfqq->ioprio = 7; |
1297 | cfq_clear_cfqq_idle_window(cfqq); | 1321 | cfq_clear_cfqq_idle_window(cfqq); |
1298 | break; | 1322 | break; |
1299 | } | 1323 | } |
1300 | 1324 | ||
1301 | /* | 1325 | /* |
@@ -1307,7 +1331,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) | |||
1307 | cfq_clear_cfqq_prio_changed(cfqq); | 1331 | cfq_clear_cfqq_prio_changed(cfqq); |
1308 | } | 1332 | } |
1309 | 1333 | ||
1310 | static inline void changed_ioprio(struct cfq_io_context *cic) | 1334 | static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) |
1311 | { | 1335 | { |
1312 | struct cfq_data *cfqd = cic->key; | 1336 | struct cfq_data *cfqd = cic->key; |
1313 | struct cfq_queue *cfqq; | 1337 | struct cfq_queue *cfqq; |
@@ -1321,8 +1345,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic) | |||
1321 | cfqq = cic->cfqq[ASYNC]; | 1345 | cfqq = cic->cfqq[ASYNC]; |
1322 | if (cfqq) { | 1346 | if (cfqq) { |
1323 | struct cfq_queue *new_cfqq; | 1347 | struct cfq_queue *new_cfqq; |
1324 | new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task, | 1348 | new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); |
1325 | GFP_ATOMIC); | ||
1326 | if (new_cfqq) { | 1349 | if (new_cfqq) { |
1327 | cic->cfqq[ASYNC] = new_cfqq; | 1350 | cic->cfqq[ASYNC] = new_cfqq; |
1328 | cfq_put_queue(cfqq); | 1351 | cfq_put_queue(cfqq); |
@@ -1338,29 +1361,19 @@ static inline void changed_ioprio(struct cfq_io_context *cic) | |||
1338 | 1361 | ||
1339 | static void cfq_ioc_set_ioprio(struct io_context *ioc) | 1362 | static void cfq_ioc_set_ioprio(struct io_context *ioc) |
1340 | { | 1363 | { |
1341 | struct cfq_io_context *cic; | 1364 | call_for_each_cic(ioc, changed_ioprio); |
1342 | struct rb_node *n; | ||
1343 | |||
1344 | ioc->ioprio_changed = 0; | 1365 | ioc->ioprio_changed = 0; |
1345 | |||
1346 | n = rb_first(&ioc->cic_root); | ||
1347 | while (n != NULL) { | ||
1348 | cic = rb_entry(n, struct cfq_io_context, rb_node); | ||
1349 | |||
1350 | changed_ioprio(cic); | ||
1351 | n = rb_next(n); | ||
1352 | } | ||
1353 | } | 1366 | } |
1354 | 1367 | ||
1355 | static struct cfq_queue * | 1368 | static struct cfq_queue * |
1356 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, | 1369 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, |
1357 | struct task_struct *tsk, gfp_t gfp_mask) | 1370 | struct io_context *ioc, gfp_t gfp_mask) |
1358 | { | 1371 | { |
1359 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1372 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
1360 | struct cfq_io_context *cic; | 1373 | struct cfq_io_context *cic; |
1361 | 1374 | ||
1362 | retry: | 1375 | retry: |
1363 | cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); | 1376 | cic = cfq_cic_lookup(cfqd, ioc); |
1364 | /* cic always exists here */ | 1377 | /* cic always exists here */ |
1365 | cfqq = cic_to_cfqq(cic, is_sync); | 1378 | cfqq = cic_to_cfqq(cic, is_sync); |
1366 | 1379 | ||
@@ -1395,15 +1408,16 @@ retry: | |||
1395 | atomic_set(&cfqq->ref, 0); | 1408 | atomic_set(&cfqq->ref, 0); |
1396 | cfqq->cfqd = cfqd; | 1409 | cfqq->cfqd = cfqd; |
1397 | 1410 | ||
1398 | if (is_sync) { | ||
1399 | cfq_mark_cfqq_idle_window(cfqq); | ||
1400 | cfq_mark_cfqq_sync(cfqq); | ||
1401 | } | ||
1402 | |||
1403 | cfq_mark_cfqq_prio_changed(cfqq); | 1411 | cfq_mark_cfqq_prio_changed(cfqq); |
1404 | cfq_mark_cfqq_queue_new(cfqq); | 1412 | cfq_mark_cfqq_queue_new(cfqq); |
1405 | 1413 | ||
1406 | cfq_init_prio_data(cfqq); | 1414 | cfq_init_prio_data(cfqq, ioc); |
1415 | |||
1416 | if (is_sync) { | ||
1417 | if (!cfq_class_idle(cfqq)) | ||
1418 | cfq_mark_cfqq_idle_window(cfqq); | ||
1419 | cfq_mark_cfqq_sync(cfqq); | ||
1420 | } | ||
1407 | } | 1421 | } |
1408 | 1422 | ||
1409 | if (new_cfqq) | 1423 | if (new_cfqq) |
@@ -1417,7 +1431,7 @@ out: | |||
1417 | static struct cfq_queue ** | 1431 | static struct cfq_queue ** |
1418 | cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) | 1432 | cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) |
1419 | { | 1433 | { |
1420 | switch(ioprio_class) { | 1434 | switch (ioprio_class) { |
1421 | case IOPRIO_CLASS_RT: | 1435 | case IOPRIO_CLASS_RT: |
1422 | return &cfqd->async_cfqq[0][ioprio]; | 1436 | return &cfqd->async_cfqq[0][ioprio]; |
1423 | case IOPRIO_CLASS_BE: | 1437 | case IOPRIO_CLASS_BE: |
@@ -1430,11 +1444,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) | |||
1430 | } | 1444 | } |
1431 | 1445 | ||
1432 | static struct cfq_queue * | 1446 | static struct cfq_queue * |
1433 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | 1447 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, |
1434 | gfp_t gfp_mask) | 1448 | gfp_t gfp_mask) |
1435 | { | 1449 | { |
1436 | const int ioprio = task_ioprio(tsk); | 1450 | const int ioprio = task_ioprio(ioc); |
1437 | const int ioprio_class = task_ioprio_class(tsk); | 1451 | const int ioprio_class = task_ioprio_class(ioc); |
1438 | struct cfq_queue **async_cfqq = NULL; | 1452 | struct cfq_queue **async_cfqq = NULL; |
1439 | struct cfq_queue *cfqq = NULL; | 1453 | struct cfq_queue *cfqq = NULL; |
1440 | 1454 | ||
@@ -1443,8 +1457,11 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | |||
1443 | cfqq = *async_cfqq; | 1457 | cfqq = *async_cfqq; |
1444 | } | 1458 | } |
1445 | 1459 | ||
1446 | if (!cfqq) | 1460 | if (!cfqq) { |
1447 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); | 1461 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); |
1462 | if (!cfqq) | ||
1463 | return NULL; | ||
1464 | } | ||
1448 | 1465 | ||
1449 | /* | 1466 | /* |
1450 | * pin the queue now that it's allocated, scheduler exit will prune it | 1467 | * pin the queue now that it's allocated, scheduler exit will prune it |
@@ -1458,28 +1475,42 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | |||
1458 | return cfqq; | 1475 | return cfqq; |
1459 | } | 1476 | } |
1460 | 1477 | ||
1478 | static void cfq_cic_free(struct cfq_io_context *cic) | ||
1479 | { | ||
1480 | kmem_cache_free(cfq_ioc_pool, cic); | ||
1481 | elv_ioc_count_dec(ioc_count); | ||
1482 | |||
1483 | if (ioc_gone && !elv_ioc_count_read(ioc_count)) | ||
1484 | complete(ioc_gone); | ||
1485 | } | ||
1486 | |||
1461 | /* | 1487 | /* |
1462 | * We drop cfq io contexts lazily, so we may find a dead one. | 1488 | * We drop cfq io contexts lazily, so we may find a dead one. |
1463 | */ | 1489 | */ |
1464 | static void | 1490 | static void |
1465 | cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) | 1491 | cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, |
1492 | struct cfq_io_context *cic) | ||
1466 | { | 1493 | { |
1494 | unsigned long flags; | ||
1495 | |||
1467 | WARN_ON(!list_empty(&cic->queue_list)); | 1496 | WARN_ON(!list_empty(&cic->queue_list)); |
1468 | 1497 | ||
1498 | spin_lock_irqsave(&ioc->lock, flags); | ||
1499 | |||
1469 | if (ioc->ioc_data == cic) | 1500 | if (ioc->ioc_data == cic) |
1470 | ioc->ioc_data = NULL; | 1501 | rcu_assign_pointer(ioc->ioc_data, NULL); |
1471 | 1502 | ||
1472 | rb_erase(&cic->rb_node, &ioc->cic_root); | 1503 | radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); |
1473 | kmem_cache_free(cfq_ioc_pool, cic); | 1504 | spin_unlock_irqrestore(&ioc->lock, flags); |
1474 | elv_ioc_count_dec(ioc_count); | 1505 | |
1506 | cfq_cic_free(cic); | ||
1475 | } | 1507 | } |
1476 | 1508 | ||
1477 | static struct cfq_io_context * | 1509 | static struct cfq_io_context * |
1478 | cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) | 1510 | cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) |
1479 | { | 1511 | { |
1480 | struct rb_node *n; | ||
1481 | struct cfq_io_context *cic; | 1512 | struct cfq_io_context *cic; |
1482 | void *k, *key = cfqd; | 1513 | void *k; |
1483 | 1514 | ||
1484 | if (unlikely(!ioc)) | 1515 | if (unlikely(!ioc)) |
1485 | return NULL; | 1516 | return NULL; |
@@ -1487,74 +1518,64 @@ cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) | |||
1487 | /* | 1518 | /* |
1488 | * we maintain a last-hit cache, to avoid browsing over the tree | 1519 | * we maintain a last-hit cache, to avoid browsing over the tree |
1489 | */ | 1520 | */ |
1490 | cic = ioc->ioc_data; | 1521 | cic = rcu_dereference(ioc->ioc_data); |
1491 | if (cic && cic->key == cfqd) | 1522 | if (cic && cic->key == cfqd) |
1492 | return cic; | 1523 | return cic; |
1493 | 1524 | ||
1494 | restart: | 1525 | do { |
1495 | n = ioc->cic_root.rb_node; | 1526 | rcu_read_lock(); |
1496 | while (n) { | 1527 | cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); |
1497 | cic = rb_entry(n, struct cfq_io_context, rb_node); | 1528 | rcu_read_unlock(); |
1529 | if (!cic) | ||
1530 | break; | ||
1498 | /* ->key must be copied to avoid race with cfq_exit_queue() */ | 1531 | /* ->key must be copied to avoid race with cfq_exit_queue() */ |
1499 | k = cic->key; | 1532 | k = cic->key; |
1500 | if (unlikely(!k)) { | 1533 | if (unlikely(!k)) { |
1501 | cfq_drop_dead_cic(ioc, cic); | 1534 | cfq_drop_dead_cic(cfqd, ioc, cic); |
1502 | goto restart; | 1535 | continue; |
1503 | } | 1536 | } |
1504 | 1537 | ||
1505 | if (key < k) | 1538 | rcu_assign_pointer(ioc->ioc_data, cic); |
1506 | n = n->rb_left; | 1539 | break; |
1507 | else if (key > k) | 1540 | } while (1); |
1508 | n = n->rb_right; | ||
1509 | else { | ||
1510 | ioc->ioc_data = cic; | ||
1511 | return cic; | ||
1512 | } | ||
1513 | } | ||
1514 | 1541 | ||
1515 | return NULL; | 1542 | return cic; |
1516 | } | 1543 | } |
1517 | 1544 | ||
1518 | static inline void | 1545 | /* |
1519 | cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, | 1546 | * Add cic into ioc, using cfqd as the search key. This enables us to lookup |
1520 | struct cfq_io_context *cic) | 1547 | * the process specific cfq io context when entered from the block layer. |
1548 | * Also adds the cic to a per-cfqd list, used when this queue is removed. | ||
1549 | */ | ||
1550 | static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, | ||
1551 | struct cfq_io_context *cic, gfp_t gfp_mask) | ||
1521 | { | 1552 | { |
1522 | struct rb_node **p; | ||
1523 | struct rb_node *parent; | ||
1524 | struct cfq_io_context *__cic; | ||
1525 | unsigned long flags; | 1553 | unsigned long flags; |
1526 | void *k; | 1554 | int ret; |
1527 | 1555 | ||
1528 | cic->ioc = ioc; | 1556 | ret = radix_tree_preload(gfp_mask); |
1529 | cic->key = cfqd; | 1557 | if (!ret) { |
1558 | cic->ioc = ioc; | ||
1559 | cic->key = cfqd; | ||
1530 | 1560 | ||
1531 | restart: | 1561 | spin_lock_irqsave(&ioc->lock, flags); |
1532 | parent = NULL; | 1562 | ret = radix_tree_insert(&ioc->radix_root, |
1533 | p = &ioc->cic_root.rb_node; | 1563 | (unsigned long) cfqd, cic); |
1534 | while (*p) { | 1564 | spin_unlock_irqrestore(&ioc->lock, flags); |
1535 | parent = *p; | ||
1536 | __cic = rb_entry(parent, struct cfq_io_context, rb_node); | ||
1537 | /* ->key must be copied to avoid race with cfq_exit_queue() */ | ||
1538 | k = __cic->key; | ||
1539 | if (unlikely(!k)) { | ||
1540 | cfq_drop_dead_cic(ioc, __cic); | ||
1541 | goto restart; | ||
1542 | } | ||
1543 | 1565 | ||
1544 | if (cic->key < k) | 1566 | radix_tree_preload_end(); |
1545 | p = &(*p)->rb_left; | 1567 | |
1546 | else if (cic->key > k) | 1568 | if (!ret) { |
1547 | p = &(*p)->rb_right; | 1569 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
1548 | else | 1570 | list_add(&cic->queue_list, &cfqd->cic_list); |
1549 | BUG(); | 1571 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
1572 | } | ||
1550 | } | 1573 | } |
1551 | 1574 | ||
1552 | rb_link_node(&cic->rb_node, parent, p); | 1575 | if (ret) |
1553 | rb_insert_color(&cic->rb_node, &ioc->cic_root); | 1576 | printk(KERN_ERR "cfq: cic link failed!\n"); |
1554 | 1577 | ||
1555 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | 1578 | return ret; |
1556 | list_add(&cic->queue_list, &cfqd->cic_list); | ||
1557 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | ||
1558 | } | 1579 | } |
1559 | 1580 | ||
1560 | /* | 1581 | /* |
@@ -1574,7 +1595,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1574 | if (!ioc) | 1595 | if (!ioc) |
1575 | return NULL; | 1596 | return NULL; |
1576 | 1597 | ||
1577 | cic = cfq_cic_rb_lookup(cfqd, ioc); | 1598 | cic = cfq_cic_lookup(cfqd, ioc); |
1578 | if (cic) | 1599 | if (cic) |
1579 | goto out; | 1600 | goto out; |
1580 | 1601 | ||
@@ -1582,13 +1603,17 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1582 | if (cic == NULL) | 1603 | if (cic == NULL) |
1583 | goto err; | 1604 | goto err; |
1584 | 1605 | ||
1585 | cfq_cic_link(cfqd, ioc, cic); | 1606 | if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) |
1607 | goto err_free; | ||
1608 | |||
1586 | out: | 1609 | out: |
1587 | smp_read_barrier_depends(); | 1610 | smp_read_barrier_depends(); |
1588 | if (unlikely(ioc->ioprio_changed)) | 1611 | if (unlikely(ioc->ioprio_changed)) |
1589 | cfq_ioc_set_ioprio(ioc); | 1612 | cfq_ioc_set_ioprio(ioc); |
1590 | 1613 | ||
1591 | return cic; | 1614 | return cic; |
1615 | err_free: | ||
1616 | cfq_cic_free(cic); | ||
1592 | err: | 1617 | err: |
1593 | put_io_context(ioc); | 1618 | put_io_context(ioc); |
1594 | return NULL; | 1619 | return NULL; |
@@ -1643,12 +1668,15 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1643 | { | 1668 | { |
1644 | int enable_idle; | 1669 | int enable_idle; |
1645 | 1670 | ||
1646 | if (!cfq_cfqq_sync(cfqq)) | 1671 | /* |
1672 | * Don't idle for async or idle io prio class | ||
1673 | */ | ||
1674 | if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) | ||
1647 | return; | 1675 | return; |
1648 | 1676 | ||
1649 | enable_idle = cfq_cfqq_idle_window(cfqq); | 1677 | enable_idle = cfq_cfqq_idle_window(cfqq); |
1650 | 1678 | ||
1651 | if (!cic->ioc->task || !cfqd->cfq_slice_idle || | 1679 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
1652 | (cfqd->hw_tag && CIC_SEEKY(cic))) | 1680 | (cfqd->hw_tag && CIC_SEEKY(cic))) |
1653 | enable_idle = 0; | 1681 | enable_idle = 0; |
1654 | else if (sample_valid(cic->ttime_samples)) { | 1682 | else if (sample_valid(cic->ttime_samples)) { |
@@ -1781,7 +1809,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
1781 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1809 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1782 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 1810 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1783 | 1811 | ||
1784 | cfq_init_prio_data(cfqq); | 1812 | cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); |
1785 | 1813 | ||
1786 | cfq_add_rq_rb(rq); | 1814 | cfq_add_rq_rb(rq); |
1787 | 1815 | ||
@@ -1822,7 +1850,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
1822 | cfq_set_prio_slice(cfqd, cfqq); | 1850 | cfq_set_prio_slice(cfqd, cfqq); |
1823 | cfq_clear_cfqq_slice_new(cfqq); | 1851 | cfq_clear_cfqq_slice_new(cfqq); |
1824 | } | 1852 | } |
1825 | if (cfq_slice_used(cfqq)) | 1853 | if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) |
1826 | cfq_slice_expired(cfqd, 1); | 1854 | cfq_slice_expired(cfqd, 1); |
1827 | else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) | 1855 | else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) |
1828 | cfq_arm_slice_timer(cfqd); | 1856 | cfq_arm_slice_timer(cfqd); |
@@ -1882,13 +1910,13 @@ static int cfq_may_queue(struct request_queue *q, int rw) | |||
1882 | * so just lookup a possibly existing queue, or return 'may queue' | 1910 | * so just lookup a possibly existing queue, or return 'may queue' |
1883 | * if that fails | 1911 | * if that fails |
1884 | */ | 1912 | */ |
1885 | cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); | 1913 | cic = cfq_cic_lookup(cfqd, tsk->io_context); |
1886 | if (!cic) | 1914 | if (!cic) |
1887 | return ELV_MQUEUE_MAY; | 1915 | return ELV_MQUEUE_MAY; |
1888 | 1916 | ||
1889 | cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); | 1917 | cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); |
1890 | if (cfqq) { | 1918 | if (cfqq) { |
1891 | cfq_init_prio_data(cfqq); | 1919 | cfq_init_prio_data(cfqq, cic->ioc); |
1892 | cfq_prio_boost(cfqq); | 1920 | cfq_prio_boost(cfqq); |
1893 | 1921 | ||
1894 | return __cfq_may_queue(cfqq); | 1922 | return __cfq_may_queue(cfqq); |
@@ -1926,7 +1954,6 @@ static int | |||
1926 | cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | 1954 | cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1927 | { | 1955 | { |
1928 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1956 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1929 | struct task_struct *tsk = current; | ||
1930 | struct cfq_io_context *cic; | 1957 | struct cfq_io_context *cic; |
1931 | const int rw = rq_data_dir(rq); | 1958 | const int rw = rq_data_dir(rq); |
1932 | const int is_sync = rq_is_sync(rq); | 1959 | const int is_sync = rq_is_sync(rq); |
@@ -1944,7 +1971,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
1944 | 1971 | ||
1945 | cfqq = cic_to_cfqq(cic, is_sync); | 1972 | cfqq = cic_to_cfqq(cic, is_sync); |
1946 | if (!cfqq) { | 1973 | if (!cfqq) { |
1947 | cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask); | 1974 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); |
1948 | 1975 | ||
1949 | if (!cfqq) | 1976 | if (!cfqq) |
1950 | goto queue_fail; | 1977 | goto queue_fail; |
@@ -1995,7 +2022,8 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
1995 | 2022 | ||
1996 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | 2023 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
1997 | 2024 | ||
1998 | if ((cfqq = cfqd->active_queue) != NULL) { | 2025 | cfqq = cfqd->active_queue; |
2026 | if (cfqq) { | ||
1999 | timed_out = 0; | 2027 | timed_out = 0; |
2000 | 2028 | ||
2001 | /* | 2029 | /* |
@@ -2027,33 +2055,10 @@ out_cont: | |||
2027 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 2055 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
2028 | } | 2056 | } |
2029 | 2057 | ||
2030 | /* | ||
2031 | * Timer running if an idle class queue is waiting for service | ||
2032 | */ | ||
2033 | static void cfq_idle_class_timer(unsigned long data) | ||
2034 | { | ||
2035 | struct cfq_data *cfqd = (struct cfq_data *) data; | ||
2036 | unsigned long flags, end; | ||
2037 | |||
2038 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | ||
2039 | |||
2040 | /* | ||
2041 | * race with a non-idle queue, reset timer | ||
2042 | */ | ||
2043 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; | ||
2044 | if (!time_after_eq(jiffies, end)) | ||
2045 | mod_timer(&cfqd->idle_class_timer, end); | ||
2046 | else | ||
2047 | cfq_schedule_dispatch(cfqd); | ||
2048 | |||
2049 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | ||
2050 | } | ||
2051 | |||
2052 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | 2058 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
2053 | { | 2059 | { |
2054 | del_timer_sync(&cfqd->idle_slice_timer); | 2060 | del_timer_sync(&cfqd->idle_slice_timer); |
2055 | del_timer_sync(&cfqd->idle_class_timer); | 2061 | kblockd_flush_work(&cfqd->unplug_work); |
2056 | blk_sync_queue(cfqd->queue); | ||
2057 | } | 2062 | } |
2058 | 2063 | ||
2059 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2064 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
@@ -2065,9 +2070,10 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) | |||
2065 | cfq_put_queue(cfqd->async_cfqq[0][i]); | 2070 | cfq_put_queue(cfqd->async_cfqq[0][i]); |
2066 | if (cfqd->async_cfqq[1][i]) | 2071 | if (cfqd->async_cfqq[1][i]) |
2067 | cfq_put_queue(cfqd->async_cfqq[1][i]); | 2072 | cfq_put_queue(cfqd->async_cfqq[1][i]); |
2068 | if (cfqd->async_idle_cfqq) | ||
2069 | cfq_put_queue(cfqd->async_idle_cfqq); | ||
2070 | } | 2073 | } |
2074 | |||
2075 | if (cfqd->async_idle_cfqq) | ||
2076 | cfq_put_queue(cfqd->async_idle_cfqq); | ||
2071 | } | 2077 | } |
2072 | 2078 | ||
2073 | static void cfq_exit_queue(elevator_t *e) | 2079 | static void cfq_exit_queue(elevator_t *e) |
@@ -2116,12 +2122,9 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2116 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | 2122 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
2117 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | 2123 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
2118 | 2124 | ||
2119 | init_timer(&cfqd->idle_class_timer); | ||
2120 | cfqd->idle_class_timer.function = cfq_idle_class_timer; | ||
2121 | cfqd->idle_class_timer.data = (unsigned long) cfqd; | ||
2122 | |||
2123 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2125 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
2124 | 2126 | ||
2127 | cfqd->last_end_request = jiffies; | ||
2125 | cfqd->cfq_quantum = cfq_quantum; | 2128 | cfqd->cfq_quantum = cfq_quantum; |
2126 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2129 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
2127 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; | 2130 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |
@@ -2149,7 +2152,7 @@ static int __init cfq_slab_setup(void) | |||
2149 | if (!cfq_pool) | 2152 | if (!cfq_pool) |
2150 | goto fail; | 2153 | goto fail; |
2151 | 2154 | ||
2152 | cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); | 2155 | cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU); |
2153 | if (!cfq_ioc_pool) | 2156 | if (!cfq_ioc_pool) |
2154 | goto fail; | 2157 | goto fail; |
2155 | 2158 | ||
@@ -2214,14 +2217,18 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ | |||
2214 | return ret; \ | 2217 | return ret; \ |
2215 | } | 2218 | } |
2216 | STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); | 2219 | STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); |
2217 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); | 2220 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, |
2218 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); | 2221 | UINT_MAX, 1); |
2222 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, | ||
2223 | UINT_MAX, 1); | ||
2219 | STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | 2224 | STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
2220 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); | 2225 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, |
2226 | UINT_MAX, 0); | ||
2221 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | 2227 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
2222 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | 2228 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
2223 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 2229 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
2224 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); | 2230 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
2231 | UINT_MAX, 0); | ||
2225 | #undef STORE_FUNCTION | 2232 | #undef STORE_FUNCTION |
2226 | 2233 | ||
2227 | #define CFQ_ATTR(name) \ | 2234 | #define CFQ_ATTR(name) \ |
@@ -2268,8 +2275,6 @@ static struct elevator_type iosched_cfq = { | |||
2268 | 2275 | ||
2269 | static int __init cfq_init(void) | 2276 | static int __init cfq_init(void) |
2270 | { | 2277 | { |
2271 | int ret; | ||
2272 | |||
2273 | /* | 2278 | /* |
2274 | * could be 0 on HZ < 1000 setups | 2279 | * could be 0 on HZ < 1000 setups |
2275 | */ | 2280 | */ |
@@ -2281,11 +2286,9 @@ static int __init cfq_init(void) | |||
2281 | if (cfq_slab_setup()) | 2286 | if (cfq_slab_setup()) |
2282 | return -ENOMEM; | 2287 | return -ENOMEM; |
2283 | 2288 | ||
2284 | ret = elv_register(&iosched_cfq); | 2289 | elv_register(&iosched_cfq); |
2285 | if (ret) | ||
2286 | cfq_slab_kill(); | ||
2287 | 2290 | ||
2288 | return ret; | 2291 | return 0; |
2289 | } | 2292 | } |
2290 | 2293 | ||
2291 | static void __exit cfq_exit(void) | 2294 | static void __exit cfq_exit(void) |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index f84093b97f70..b73373216b0e 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) | |||
545 | struct blk_user_trace_setup buts; | 545 | struct blk_user_trace_setup buts; |
546 | struct compat_blk_user_trace_setup cbuts; | 546 | struct compat_blk_user_trace_setup cbuts; |
547 | struct request_queue *q; | 547 | struct request_queue *q; |
548 | char b[BDEVNAME_SIZE]; | ||
548 | int ret; | 549 | int ret; |
549 | 550 | ||
550 | q = bdev_get_queue(bdev); | 551 | q = bdev_get_queue(bdev); |
@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) | |||
554 | if (copy_from_user(&cbuts, arg, sizeof(cbuts))) | 555 | if (copy_from_user(&cbuts, arg, sizeof(cbuts))) |
555 | return -EFAULT; | 556 | return -EFAULT; |
556 | 557 | ||
558 | strcpy(b, bdevname(bdev, b)); | ||
559 | |||
557 | buts = (struct blk_user_trace_setup) { | 560 | buts = (struct blk_user_trace_setup) { |
558 | .act_mask = cbuts.act_mask, | 561 | .act_mask = cbuts.act_mask, |
559 | .buf_size = cbuts.buf_size, | 562 | .buf_size = cbuts.buf_size, |
@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) | |||
565 | memcpy(&buts.name, &cbuts.name, 32); | 568 | memcpy(&buts.name, &cbuts.name, 32); |
566 | 569 | ||
567 | mutex_lock(&bdev->bd_mutex); | 570 | mutex_lock(&bdev->bd_mutex); |
568 | ret = do_blk_trace_setup(q, bdev, &buts); | 571 | ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts); |
569 | mutex_unlock(&bdev->bd_mutex); | 572 | mutex_unlock(&bdev->bd_mutex); |
570 | if (ret) | 573 | if (ret) |
571 | return ret; | 574 | return ret; |
@@ -581,7 +584,7 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file, | |||
581 | { | 584 | { |
582 | int ret; | 585 | int ret; |
583 | 586 | ||
584 | switch (arg) { | 587 | switch (cmd) { |
585 | case HDIO_GET_UNMASKINTR: | 588 | case HDIO_GET_UNMASKINTR: |
586 | case HDIO_GET_MULTCOUNT: | 589 | case HDIO_GET_MULTCOUNT: |
587 | case HDIO_GET_KEEPSETTINGS: | 590 | case HDIO_GET_KEEPSETTINGS: |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 1a511ffaf8a4..342448c3d2dd 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -55,6 +55,20 @@ static void deadline_move_request(struct deadline_data *, struct request *); | |||
55 | 55 | ||
56 | #define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))]) | 56 | #define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))]) |
57 | 57 | ||
58 | /* | ||
59 | * get the request after `rq' in sector-sorted order | ||
60 | */ | ||
61 | static inline struct request * | ||
62 | deadline_latter_request(struct request *rq) | ||
63 | { | ||
64 | struct rb_node *node = rb_next(&rq->rb_node); | ||
65 | |||
66 | if (node) | ||
67 | return rb_entry_rq(node); | ||
68 | |||
69 | return NULL; | ||
70 | } | ||
71 | |||
58 | static void | 72 | static void |
59 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) | 73 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) |
60 | { | 74 | { |
@@ -74,13 +88,8 @@ deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) | |||
74 | { | 88 | { |
75 | const int data_dir = rq_data_dir(rq); | 89 | const int data_dir = rq_data_dir(rq); |
76 | 90 | ||
77 | if (dd->next_rq[data_dir] == rq) { | 91 | if (dd->next_rq[data_dir] == rq) |
78 | struct rb_node *rbnext = rb_next(&rq->rb_node); | 92 | dd->next_rq[data_dir] = deadline_latter_request(rq); |
79 | |||
80 | dd->next_rq[data_dir] = NULL; | ||
81 | if (rbnext) | ||
82 | dd->next_rq[data_dir] = rb_entry_rq(rbnext); | ||
83 | } | ||
84 | 93 | ||
85 | elv_rb_del(RQ_RB_ROOT(dd, rq), rq); | 94 | elv_rb_del(RQ_RB_ROOT(dd, rq), rq); |
86 | } | 95 | } |
@@ -198,14 +207,11 @@ static void | |||
198 | deadline_move_request(struct deadline_data *dd, struct request *rq) | 207 | deadline_move_request(struct deadline_data *dd, struct request *rq) |
199 | { | 208 | { |
200 | const int data_dir = rq_data_dir(rq); | 209 | const int data_dir = rq_data_dir(rq); |
201 | struct rb_node *rbnext = rb_next(&rq->rb_node); | ||
202 | 210 | ||
203 | dd->next_rq[READ] = NULL; | 211 | dd->next_rq[READ] = NULL; |
204 | dd->next_rq[WRITE] = NULL; | 212 | dd->next_rq[WRITE] = NULL; |
213 | dd->next_rq[data_dir] = deadline_latter_request(rq); | ||
205 | 214 | ||
206 | if (rbnext) | ||
207 | dd->next_rq[data_dir] = rb_entry_rq(rbnext); | ||
208 | |||
209 | dd->last_sector = rq->sector + rq->nr_sectors; | 215 | dd->last_sector = rq->sector + rq->nr_sectors; |
210 | 216 | ||
211 | /* | 217 | /* |
@@ -301,30 +307,23 @@ dispatch_find_request: | |||
301 | /* | 307 | /* |
302 | * we are not running a batch, find best request for selected data_dir | 308 | * we are not running a batch, find best request for selected data_dir |
303 | */ | 309 | */ |
304 | if (deadline_check_fifo(dd, data_dir)) { | 310 | if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) { |
305 | /* An expired request exists - satisfy it */ | 311 | /* |
306 | dd->batching = 0; | 312 | * A deadline has expired, the last request was in the other |
313 | * direction, or we have run out of higher-sectored requests. | ||
314 | * Start again from the request with the earliest expiry time. | ||
315 | */ | ||
307 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); | 316 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); |
308 | 317 | } else { | |
309 | } else if (dd->next_rq[data_dir]) { | ||
310 | /* | 318 | /* |
311 | * The last req was the same dir and we have a next request in | 319 | * The last req was the same dir and we have a next request in |
312 | * sort order. No expired requests so continue on from here. | 320 | * sort order. No expired requests so continue on from here. |
313 | */ | 321 | */ |
314 | rq = dd->next_rq[data_dir]; | 322 | rq = dd->next_rq[data_dir]; |
315 | } else { | ||
316 | struct rb_node *node; | ||
317 | /* | ||
318 | * The last req was the other direction or we have run out of | ||
319 | * higher-sectored requests. Go back to the lowest sectored | ||
320 | * request (1 way elevator) and start a new batch. | ||
321 | */ | ||
322 | dd->batching = 0; | ||
323 | node = rb_first(&dd->sort_list[data_dir]); | ||
324 | if (node) | ||
325 | rq = rb_entry_rq(node); | ||
326 | } | 323 | } |
327 | 324 | ||
325 | dd->batching = 0; | ||
326 | |||
328 | dispatch_request: | 327 | dispatch_request: |
329 | /* | 328 | /* |
330 | * rq is the selected appropriate request. | 329 | * rq is the selected appropriate request. |
@@ -468,7 +467,9 @@ static struct elevator_type iosched_deadline = { | |||
468 | 467 | ||
469 | static int __init deadline_init(void) | 468 | static int __init deadline_init(void) |
470 | { | 469 | { |
471 | return elv_register(&iosched_deadline); | 470 | elv_register(&iosched_deadline); |
471 | |||
472 | return 0; | ||
472 | } | 473 | } |
473 | 474 | ||
474 | static void __exit deadline_exit(void) | 475 | static void __exit deadline_exit(void) |
diff --git a/block/elevator.c b/block/elevator.c index 446aea2a3cfb..bafbae0344d3 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -45,7 +45,8 @@ static LIST_HEAD(elv_list); | |||
45 | */ | 45 | */ |
46 | static const int elv_hash_shift = 6; | 46 | static const int elv_hash_shift = 6; |
47 | #define ELV_HASH_BLOCK(sec) ((sec) >> 3) | 47 | #define ELV_HASH_BLOCK(sec) ((sec) >> 3) |
48 | #define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) | 48 | #define ELV_HASH_FN(sec) \ |
49 | (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) | ||
49 | #define ELV_HASH_ENTRIES (1 << elv_hash_shift) | 50 | #define ELV_HASH_ENTRIES (1 << elv_hash_shift) |
50 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | 51 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
51 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | 52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
@@ -185,9 +186,7 @@ static elevator_t *elevator_alloc(struct request_queue *q, | |||
185 | 186 | ||
186 | eq->ops = &e->ops; | 187 | eq->ops = &e->ops; |
187 | eq->elevator_type = e; | 188 | eq->elevator_type = e; |
188 | kobject_init(&eq->kobj); | 189 | kobject_init(&eq->kobj, &elv_ktype); |
189 | kobject_set_name(&eq->kobj, "%s", "iosched"); | ||
190 | eq->kobj.ktype = &elv_ktype; | ||
191 | mutex_init(&eq->sysfs_lock); | 190 | mutex_init(&eq->sysfs_lock); |
192 | 191 | ||
193 | eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, | 192 | eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, |
@@ -226,15 +225,27 @@ int elevator_init(struct request_queue *q, char *name) | |||
226 | q->end_sector = 0; | 225 | q->end_sector = 0; |
227 | q->boundary_rq = NULL; | 226 | q->boundary_rq = NULL; |
228 | 227 | ||
229 | if (name && !(e = elevator_get(name))) | 228 | if (name) { |
230 | return -EINVAL; | 229 | e = elevator_get(name); |
230 | if (!e) | ||
231 | return -EINVAL; | ||
232 | } | ||
231 | 233 | ||
232 | if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) | 234 | if (!e && *chosen_elevator) { |
233 | printk("I/O scheduler %s not found\n", chosen_elevator); | 235 | e = elevator_get(chosen_elevator); |
236 | if (!e) | ||
237 | printk(KERN_ERR "I/O scheduler %s not found\n", | ||
238 | chosen_elevator); | ||
239 | } | ||
234 | 240 | ||
235 | if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { | 241 | if (!e) { |
236 | printk("Default I/O scheduler not found, using no-op\n"); | 242 | e = elevator_get(CONFIG_DEFAULT_IOSCHED); |
237 | e = elevator_get("noop"); | 243 | if (!e) { |
244 | printk(KERN_ERR | ||
245 | "Default I/O scheduler not found. " \ | ||
246 | "Using noop.\n"); | ||
247 | e = elevator_get("noop"); | ||
248 | } | ||
238 | } | 249 | } |
239 | 250 | ||
240 | eq = elevator_alloc(q, e); | 251 | eq = elevator_alloc(q, e); |
@@ -250,7 +261,6 @@ int elevator_init(struct request_queue *q, char *name) | |||
250 | elevator_attach(q, eq, data); | 261 | elevator_attach(q, eq, data); |
251 | return ret; | 262 | return ret; |
252 | } | 263 | } |
253 | |||
254 | EXPORT_SYMBOL(elevator_init); | 264 | EXPORT_SYMBOL(elevator_init); |
255 | 265 | ||
256 | void elevator_exit(elevator_t *e) | 266 | void elevator_exit(elevator_t *e) |
@@ -263,7 +273,6 @@ void elevator_exit(elevator_t *e) | |||
263 | 273 | ||
264 | kobject_put(&e->kobj); | 274 | kobject_put(&e->kobj); |
265 | } | 275 | } |
266 | |||
267 | EXPORT_SYMBOL(elevator_exit); | 276 | EXPORT_SYMBOL(elevator_exit); |
268 | 277 | ||
269 | static void elv_activate_rq(struct request_queue *q, struct request *rq) | 278 | static void elv_activate_rq(struct request_queue *q, struct request *rq) |
@@ -355,7 +364,6 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq) | |||
355 | rb_insert_color(&rq->rb_node, root); | 364 | rb_insert_color(&rq->rb_node, root); |
356 | return NULL; | 365 | return NULL; |
357 | } | 366 | } |
358 | |||
359 | EXPORT_SYMBOL(elv_rb_add); | 367 | EXPORT_SYMBOL(elv_rb_add); |
360 | 368 | ||
361 | void elv_rb_del(struct rb_root *root, struct request *rq) | 369 | void elv_rb_del(struct rb_root *root, struct request *rq) |
@@ -364,7 +372,6 @@ void elv_rb_del(struct rb_root *root, struct request *rq) | |||
364 | rb_erase(&rq->rb_node, root); | 372 | rb_erase(&rq->rb_node, root); |
365 | RB_CLEAR_NODE(&rq->rb_node); | 373 | RB_CLEAR_NODE(&rq->rb_node); |
366 | } | 374 | } |
367 | |||
368 | EXPORT_SYMBOL(elv_rb_del); | 375 | EXPORT_SYMBOL(elv_rb_del); |
369 | 376 | ||
370 | struct request *elv_rb_find(struct rb_root *root, sector_t sector) | 377 | struct request *elv_rb_find(struct rb_root *root, sector_t sector) |
@@ -385,7 +392,6 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector) | |||
385 | 392 | ||
386 | return NULL; | 393 | return NULL; |
387 | } | 394 | } |
388 | |||
389 | EXPORT_SYMBOL(elv_rb_find); | 395 | EXPORT_SYMBOL(elv_rb_find); |
390 | 396 | ||
391 | /* | 397 | /* |
@@ -397,6 +403,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
397 | { | 403 | { |
398 | sector_t boundary; | 404 | sector_t boundary; |
399 | struct list_head *entry; | 405 | struct list_head *entry; |
406 | int stop_flags; | ||
400 | 407 | ||
401 | if (q->last_merge == rq) | 408 | if (q->last_merge == rq) |
402 | q->last_merge = NULL; | 409 | q->last_merge = NULL; |
@@ -406,13 +413,13 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
406 | q->nr_sorted--; | 413 | q->nr_sorted--; |
407 | 414 | ||
408 | boundary = q->end_sector; | 415 | boundary = q->end_sector; |
409 | 416 | stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; | |
410 | list_for_each_prev(entry, &q->queue_head) { | 417 | list_for_each_prev(entry, &q->queue_head) { |
411 | struct request *pos = list_entry_rq(entry); | 418 | struct request *pos = list_entry_rq(entry); |
412 | 419 | ||
413 | if (rq_data_dir(rq) != rq_data_dir(pos)) | 420 | if (rq_data_dir(rq) != rq_data_dir(pos)) |
414 | break; | 421 | break; |
415 | if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) | 422 | if (pos->cmd_flags & stop_flags) |
416 | break; | 423 | break; |
417 | if (rq->sector >= boundary) { | 424 | if (rq->sector >= boundary) { |
418 | if (pos->sector < boundary) | 425 | if (pos->sector < boundary) |
@@ -427,7 +434,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
427 | 434 | ||
428 | list_add(&rq->queuelist, entry); | 435 | list_add(&rq->queuelist, entry); |
429 | } | 436 | } |
430 | |||
431 | EXPORT_SYMBOL(elv_dispatch_sort); | 437 | EXPORT_SYMBOL(elv_dispatch_sort); |
432 | 438 | ||
433 | /* | 439 | /* |
@@ -448,7 +454,6 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) | |||
448 | q->boundary_rq = rq; | 454 | q->boundary_rq = rq; |
449 | list_add_tail(&rq->queuelist, &q->queue_head); | 455 | list_add_tail(&rq->queuelist, &q->queue_head); |
450 | } | 456 | } |
451 | |||
452 | EXPORT_SYMBOL(elv_dispatch_add_tail); | 457 | EXPORT_SYMBOL(elv_dispatch_add_tail); |
453 | 458 | ||
454 | int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | 459 | int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
@@ -667,7 +672,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, | |||
667 | q->end_sector = rq_end_sector(rq); | 672 | q->end_sector = rq_end_sector(rq); |
668 | q->boundary_rq = rq; | 673 | q->boundary_rq = rq; |
669 | } | 674 | } |
670 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) | 675 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && |
676 | where == ELEVATOR_INSERT_SORT) | ||
671 | where = ELEVATOR_INSERT_BACK; | 677 | where = ELEVATOR_INSERT_BACK; |
672 | 678 | ||
673 | if (plug) | 679 | if (plug) |
@@ -675,7 +681,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, | |||
675 | 681 | ||
676 | elv_insert(q, rq, where); | 682 | elv_insert(q, rq, where); |
677 | } | 683 | } |
678 | |||
679 | EXPORT_SYMBOL(__elv_add_request); | 684 | EXPORT_SYMBOL(__elv_add_request); |
680 | 685 | ||
681 | void elv_add_request(struct request_queue *q, struct request *rq, int where, | 686 | void elv_add_request(struct request_queue *q, struct request *rq, int where, |
@@ -687,7 +692,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where, | |||
687 | __elv_add_request(q, rq, where, plug); | 692 | __elv_add_request(q, rq, where, plug); |
688 | spin_unlock_irqrestore(q->queue_lock, flags); | 693 | spin_unlock_irqrestore(q->queue_lock, flags); |
689 | } | 694 | } |
690 | |||
691 | EXPORT_SYMBOL(elv_add_request); | 695 | EXPORT_SYMBOL(elv_add_request); |
692 | 696 | ||
693 | static inline struct request *__elv_next_request(struct request_queue *q) | 697 | static inline struct request *__elv_next_request(struct request_queue *q) |
@@ -743,7 +747,21 @@ struct request *elv_next_request(struct request_queue *q) | |||
743 | q->boundary_rq = NULL; | 747 | q->boundary_rq = NULL; |
744 | } | 748 | } |
745 | 749 | ||
746 | if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) | 750 | if (rq->cmd_flags & REQ_DONTPREP) |
751 | break; | ||
752 | |||
753 | if (q->dma_drain_size && rq->data_len) { | ||
754 | /* | ||
755 | * make sure space for the drain appears we | ||
756 | * know we can do this because max_hw_segments | ||
757 | * has been adjusted to be one fewer than the | ||
758 | * device can handle | ||
759 | */ | ||
760 | rq->nr_phys_segments++; | ||
761 | rq->nr_hw_segments++; | ||
762 | } | ||
763 | |||
764 | if (!q->prep_rq_fn) | ||
747 | break; | 765 | break; |
748 | 766 | ||
749 | ret = q->prep_rq_fn(q, rq); | 767 | ret = q->prep_rq_fn(q, rq); |
@@ -756,6 +774,16 @@ struct request *elv_next_request(struct request_queue *q) | |||
756 | * avoid resource deadlock. REQ_STARTED will | 774 | * avoid resource deadlock. REQ_STARTED will |
757 | * prevent other fs requests from passing this one. | 775 | * prevent other fs requests from passing this one. |
758 | */ | 776 | */ |
777 | if (q->dma_drain_size && rq->data_len && | ||
778 | !(rq->cmd_flags & REQ_DONTPREP)) { | ||
779 | /* | ||
780 | * remove the space for the drain we added | ||
781 | * so that we don't add it again | ||
782 | */ | ||
783 | --rq->nr_phys_segments; | ||
784 | --rq->nr_hw_segments; | ||
785 | } | ||
786 | |||
759 | rq = NULL; | 787 | rq = NULL; |
760 | break; | 788 | break; |
761 | } else if (ret == BLKPREP_KILL) { | 789 | } else if (ret == BLKPREP_KILL) { |
@@ -770,7 +798,6 @@ struct request *elv_next_request(struct request_queue *q) | |||
770 | 798 | ||
771 | return rq; | 799 | return rq; |
772 | } | 800 | } |
773 | |||
774 | EXPORT_SYMBOL(elv_next_request); | 801 | EXPORT_SYMBOL(elv_next_request); |
775 | 802 | ||
776 | void elv_dequeue_request(struct request_queue *q, struct request *rq) | 803 | void elv_dequeue_request(struct request_queue *q, struct request *rq) |
@@ -788,7 +815,6 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) | |||
788 | if (blk_account_rq(rq)) | 815 | if (blk_account_rq(rq)) |
789 | q->in_flight++; | 816 | q->in_flight++; |
790 | } | 817 | } |
791 | |||
792 | EXPORT_SYMBOL(elv_dequeue_request); | 818 | EXPORT_SYMBOL(elv_dequeue_request); |
793 | 819 | ||
794 | int elv_queue_empty(struct request_queue *q) | 820 | int elv_queue_empty(struct request_queue *q) |
@@ -803,7 +829,6 @@ int elv_queue_empty(struct request_queue *q) | |||
803 | 829 | ||
804 | return 1; | 830 | return 1; |
805 | } | 831 | } |
806 | |||
807 | EXPORT_SYMBOL(elv_queue_empty); | 832 | EXPORT_SYMBOL(elv_queue_empty); |
808 | 833 | ||
809 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) | 834 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
@@ -931,9 +956,7 @@ int elv_register_queue(struct request_queue *q) | |||
931 | elevator_t *e = q->elevator; | 956 | elevator_t *e = q->elevator; |
932 | int error; | 957 | int error; |
933 | 958 | ||
934 | e->kobj.parent = &q->kobj; | 959 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
935 | |||
936 | error = kobject_add(&e->kobj); | ||
937 | if (!error) { | 960 | if (!error) { |
938 | struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; | 961 | struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; |
939 | if (attr) { | 962 | if (attr) { |
@@ -960,7 +983,7 @@ void elv_unregister_queue(struct request_queue *q) | |||
960 | __elv_unregister_queue(q->elevator); | 983 | __elv_unregister_queue(q->elevator); |
961 | } | 984 | } |
962 | 985 | ||
963 | int elv_register(struct elevator_type *e) | 986 | void elv_register(struct elevator_type *e) |
964 | { | 987 | { |
965 | char *def = ""; | 988 | char *def = ""; |
966 | 989 | ||
@@ -974,8 +997,8 @@ int elv_register(struct elevator_type *e) | |||
974 | !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) | 997 | !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) |
975 | def = " (default)"; | 998 | def = " (default)"; |
976 | 999 | ||
977 | printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); | 1000 | printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, |
978 | return 0; | 1001 | def); |
979 | } | 1002 | } |
980 | EXPORT_SYMBOL_GPL(elv_register); | 1003 | EXPORT_SYMBOL_GPL(elv_register); |
981 | 1004 | ||
@@ -1107,7 +1130,8 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, | |||
1107 | } | 1130 | } |
1108 | 1131 | ||
1109 | if (!elevator_switch(q, e)) | 1132 | if (!elevator_switch(q, e)) |
1110 | printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); | 1133 | printk(KERN_ERR "elevator: switch to %s failed\n", |
1134 | elevator_name); | ||
1111 | return count; | 1135 | return count; |
1112 | } | 1136 | } |
1113 | 1137 | ||
@@ -1141,7 +1165,6 @@ struct request *elv_rb_former_request(struct request_queue *q, | |||
1141 | 1165 | ||
1142 | return NULL; | 1166 | return NULL; |
1143 | } | 1167 | } |
1144 | |||
1145 | EXPORT_SYMBOL(elv_rb_former_request); | 1168 | EXPORT_SYMBOL(elv_rb_former_request); |
1146 | 1169 | ||
1147 | struct request *elv_rb_latter_request(struct request_queue *q, | 1170 | struct request *elv_rb_latter_request(struct request_queue *q, |
@@ -1154,5 +1177,4 @@ struct request *elv_rb_latter_request(struct request_queue *q, | |||
1154 | 1177 | ||
1155 | return NULL; | 1178 | return NULL; |
1156 | } | 1179 | } |
1157 | |||
1158 | EXPORT_SYMBOL(elv_rb_latter_request); | 1180 | EXPORT_SYMBOL(elv_rb_latter_request); |
diff --git a/block/genhd.c b/block/genhd.c index e609996f2e76..de2ebb2fab43 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -17,8 +17,10 @@ | |||
17 | #include <linux/buffer_head.h> | 17 | #include <linux/buffer_head.h> |
18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
19 | 19 | ||
20 | struct kset block_subsys; | 20 | static DEFINE_MUTEX(block_class_lock); |
21 | static DEFINE_MUTEX(block_subsys_lock); | 21 | #ifndef CONFIG_SYSFS_DEPRECATED |
22 | struct kobject *block_depr; | ||
23 | #endif | ||
22 | 24 | ||
23 | /* | 25 | /* |
24 | * Can be deleted altogether. Later. | 26 | * Can be deleted altogether. Later. |
@@ -37,19 +39,17 @@ static inline int major_to_index(int major) | |||
37 | } | 39 | } |
38 | 40 | ||
39 | #ifdef CONFIG_PROC_FS | 41 | #ifdef CONFIG_PROC_FS |
40 | |||
41 | void blkdev_show(struct seq_file *f, off_t offset) | 42 | void blkdev_show(struct seq_file *f, off_t offset) |
42 | { | 43 | { |
43 | struct blk_major_name *dp; | 44 | struct blk_major_name *dp; |
44 | 45 | ||
45 | if (offset < BLKDEV_MAJOR_HASH_SIZE) { | 46 | if (offset < BLKDEV_MAJOR_HASH_SIZE) { |
46 | mutex_lock(&block_subsys_lock); | 47 | mutex_lock(&block_class_lock); |
47 | for (dp = major_names[offset]; dp; dp = dp->next) | 48 | for (dp = major_names[offset]; dp; dp = dp->next) |
48 | seq_printf(f, "%3d %s\n", dp->major, dp->name); | 49 | seq_printf(f, "%3d %s\n", dp->major, dp->name); |
49 | mutex_unlock(&block_subsys_lock); | 50 | mutex_unlock(&block_class_lock); |
50 | } | 51 | } |
51 | } | 52 | } |
52 | |||
53 | #endif /* CONFIG_PROC_FS */ | 53 | #endif /* CONFIG_PROC_FS */ |
54 | 54 | ||
55 | int register_blkdev(unsigned int major, const char *name) | 55 | int register_blkdev(unsigned int major, const char *name) |
@@ -57,7 +57,7 @@ int register_blkdev(unsigned int major, const char *name) | |||
57 | struct blk_major_name **n, *p; | 57 | struct blk_major_name **n, *p; |
58 | int index, ret = 0; | 58 | int index, ret = 0; |
59 | 59 | ||
60 | mutex_lock(&block_subsys_lock); | 60 | mutex_lock(&block_class_lock); |
61 | 61 | ||
62 | /* temporary */ | 62 | /* temporary */ |
63 | if (major == 0) { | 63 | if (major == 0) { |
@@ -102,7 +102,7 @@ int register_blkdev(unsigned int major, const char *name) | |||
102 | kfree(p); | 102 | kfree(p); |
103 | } | 103 | } |
104 | out: | 104 | out: |
105 | mutex_unlock(&block_subsys_lock); | 105 | mutex_unlock(&block_class_lock); |
106 | return ret; | 106 | return ret; |
107 | } | 107 | } |
108 | 108 | ||
@@ -114,7 +114,7 @@ void unregister_blkdev(unsigned int major, const char *name) | |||
114 | struct blk_major_name *p = NULL; | 114 | struct blk_major_name *p = NULL; |
115 | int index = major_to_index(major); | 115 | int index = major_to_index(major); |
116 | 116 | ||
117 | mutex_lock(&block_subsys_lock); | 117 | mutex_lock(&block_class_lock); |
118 | for (n = &major_names[index]; *n; n = &(*n)->next) | 118 | for (n = &major_names[index]; *n; n = &(*n)->next) |
119 | if ((*n)->major == major) | 119 | if ((*n)->major == major) |
120 | break; | 120 | break; |
@@ -124,7 +124,7 @@ void unregister_blkdev(unsigned int major, const char *name) | |||
124 | p = *n; | 124 | p = *n; |
125 | *n = p->next; | 125 | *n = p->next; |
126 | } | 126 | } |
127 | mutex_unlock(&block_subsys_lock); | 127 | mutex_unlock(&block_class_lock); |
128 | kfree(p); | 128 | kfree(p); |
129 | } | 129 | } |
130 | 130 | ||
@@ -137,29 +137,30 @@ static struct kobj_map *bdev_map; | |||
137 | * range must be nonzero | 137 | * range must be nonzero |
138 | * The hash chain is sorted on range, so that subranges can override. | 138 | * The hash chain is sorted on range, so that subranges can override. |
139 | */ | 139 | */ |
140 | void blk_register_region(dev_t dev, unsigned long range, struct module *module, | 140 | void blk_register_region(dev_t devt, unsigned long range, struct module *module, |
141 | struct kobject *(*probe)(dev_t, int *, void *), | 141 | struct kobject *(*probe)(dev_t, int *, void *), |
142 | int (*lock)(dev_t, void *), void *data) | 142 | int (*lock)(dev_t, void *), void *data) |
143 | { | 143 | { |
144 | kobj_map(bdev_map, dev, range, module, probe, lock, data); | 144 | kobj_map(bdev_map, devt, range, module, probe, lock, data); |
145 | } | 145 | } |
146 | 146 | ||
147 | EXPORT_SYMBOL(blk_register_region); | 147 | EXPORT_SYMBOL(blk_register_region); |
148 | 148 | ||
149 | void blk_unregister_region(dev_t dev, unsigned long range) | 149 | void blk_unregister_region(dev_t devt, unsigned long range) |
150 | { | 150 | { |
151 | kobj_unmap(bdev_map, dev, range); | 151 | kobj_unmap(bdev_map, devt, range); |
152 | } | 152 | } |
153 | 153 | ||
154 | EXPORT_SYMBOL(blk_unregister_region); | 154 | EXPORT_SYMBOL(blk_unregister_region); |
155 | 155 | ||
156 | static struct kobject *exact_match(dev_t dev, int *part, void *data) | 156 | static struct kobject *exact_match(dev_t devt, int *part, void *data) |
157 | { | 157 | { |
158 | struct gendisk *p = data; | 158 | struct gendisk *p = data; |
159 | return &p->kobj; | 159 | |
160 | return &p->dev.kobj; | ||
160 | } | 161 | } |
161 | 162 | ||
162 | static int exact_lock(dev_t dev, void *data) | 163 | static int exact_lock(dev_t devt, void *data) |
163 | { | 164 | { |
164 | struct gendisk *p = data; | 165 | struct gendisk *p = data; |
165 | 166 | ||
@@ -194,8 +195,6 @@ void unlink_gendisk(struct gendisk *disk) | |||
194 | disk->minors); | 195 | disk->minors); |
195 | } | 196 | } |
196 | 197 | ||
197 | #define to_disk(obj) container_of(obj,struct gendisk,kobj) | ||
198 | |||
199 | /** | 198 | /** |
200 | * get_gendisk - get partitioning information for a given device | 199 | * get_gendisk - get partitioning information for a given device |
201 | * @dev: device to get partitioning information for | 200 | * @dev: device to get partitioning information for |
@@ -203,10 +202,12 @@ void unlink_gendisk(struct gendisk *disk) | |||
203 | * This function gets the structure containing partitioning | 202 | * This function gets the structure containing partitioning |
204 | * information for the given device @dev. | 203 | * information for the given device @dev. |
205 | */ | 204 | */ |
206 | struct gendisk *get_gendisk(dev_t dev, int *part) | 205 | struct gendisk *get_gendisk(dev_t devt, int *part) |
207 | { | 206 | { |
208 | struct kobject *kobj = kobj_lookup(bdev_map, dev, part); | 207 | struct kobject *kobj = kobj_lookup(bdev_map, devt, part); |
209 | return kobj ? to_disk(kobj) : NULL; | 208 | struct device *dev = kobj_to_dev(kobj); |
209 | |||
210 | return kobj ? dev_to_disk(dev) : NULL; | ||
210 | } | 211 | } |
211 | 212 | ||
212 | /* | 213 | /* |
@@ -216,13 +217,17 @@ struct gendisk *get_gendisk(dev_t dev, int *part) | |||
216 | */ | 217 | */ |
217 | void __init printk_all_partitions(void) | 218 | void __init printk_all_partitions(void) |
218 | { | 219 | { |
219 | int n; | 220 | struct device *dev; |
220 | struct gendisk *sgp; | 221 | struct gendisk *sgp; |
222 | char buf[BDEVNAME_SIZE]; | ||
223 | int n; | ||
221 | 224 | ||
222 | mutex_lock(&block_subsys_lock); | 225 | mutex_lock(&block_class_lock); |
223 | /* For each block device... */ | 226 | /* For each block device... */ |
224 | list_for_each_entry(sgp, &block_subsys.list, kobj.entry) { | 227 | list_for_each_entry(dev, &block_class.devices, node) { |
225 | char buf[BDEVNAME_SIZE]; | 228 | if (dev->type != &disk_type) |
229 | continue; | ||
230 | sgp = dev_to_disk(dev); | ||
226 | /* | 231 | /* |
227 | * Don't show empty devices or things that have been surpressed | 232 | * Don't show empty devices or things that have been surpressed |
228 | */ | 233 | */ |
@@ -255,38 +260,46 @@ void __init printk_all_partitions(void) | |||
255 | sgp->major, n + 1 + sgp->first_minor, | 260 | sgp->major, n + 1 + sgp->first_minor, |
256 | (unsigned long long)sgp->part[n]->nr_sects >> 1, | 261 | (unsigned long long)sgp->part[n]->nr_sects >> 1, |
257 | disk_name(sgp, n + 1, buf)); | 262 | disk_name(sgp, n + 1, buf)); |
258 | } /* partition subloop */ | 263 | } |
259 | } /* Block device loop */ | 264 | } |
260 | 265 | ||
261 | mutex_unlock(&block_subsys_lock); | 266 | mutex_unlock(&block_class_lock); |
262 | return; | ||
263 | } | 267 | } |
264 | 268 | ||
265 | #ifdef CONFIG_PROC_FS | 269 | #ifdef CONFIG_PROC_FS |
266 | /* iterator */ | 270 | /* iterator */ |
267 | static void *part_start(struct seq_file *part, loff_t *pos) | 271 | static void *part_start(struct seq_file *part, loff_t *pos) |
268 | { | 272 | { |
269 | struct list_head *p; | 273 | loff_t k = *pos; |
270 | loff_t l = *pos; | 274 | struct device *dev; |
271 | 275 | ||
272 | mutex_lock(&block_subsys_lock); | 276 | mutex_lock(&block_class_lock); |
273 | list_for_each(p, &block_subsys.list) | 277 | list_for_each_entry(dev, &block_class.devices, node) { |
274 | if (!l--) | 278 | if (dev->type != &disk_type) |
275 | return list_entry(p, struct gendisk, kobj.entry); | 279 | continue; |
280 | if (!k--) | ||
281 | return dev_to_disk(dev); | ||
282 | } | ||
276 | return NULL; | 283 | return NULL; |
277 | } | 284 | } |
278 | 285 | ||
279 | static void *part_next(struct seq_file *part, void *v, loff_t *pos) | 286 | static void *part_next(struct seq_file *part, void *v, loff_t *pos) |
280 | { | 287 | { |
281 | struct list_head *p = ((struct gendisk *)v)->kobj.entry.next; | 288 | struct gendisk *gp = v; |
289 | struct device *dev; | ||
282 | ++*pos; | 290 | ++*pos; |
283 | return p==&block_subsys.list ? NULL : | 291 | list_for_each_entry(dev, &gp->dev.node, node) { |
284 | list_entry(p, struct gendisk, kobj.entry); | 292 | if (&dev->node == &block_class.devices) |
293 | return NULL; | ||
294 | if (dev->type == &disk_type) | ||
295 | return dev_to_disk(dev); | ||
296 | } | ||
297 | return NULL; | ||
285 | } | 298 | } |
286 | 299 | ||
287 | static void part_stop(struct seq_file *part, void *v) | 300 | static void part_stop(struct seq_file *part, void *v) |
288 | { | 301 | { |
289 | mutex_unlock(&block_subsys_lock); | 302 | mutex_unlock(&block_class_lock); |
290 | } | 303 | } |
291 | 304 | ||
292 | static int show_partition(struct seq_file *part, void *v) | 305 | static int show_partition(struct seq_file *part, void *v) |
@@ -295,7 +308,7 @@ static int show_partition(struct seq_file *part, void *v) | |||
295 | int n; | 308 | int n; |
296 | char buf[BDEVNAME_SIZE]; | 309 | char buf[BDEVNAME_SIZE]; |
297 | 310 | ||
298 | if (&sgp->kobj.entry == block_subsys.list.next) | 311 | if (&sgp->dev.node == block_class.devices.next) |
299 | seq_puts(part, "major minor #blocks name\n\n"); | 312 | seq_puts(part, "major minor #blocks name\n\n"); |
300 | 313 | ||
301 | /* Don't show non-partitionable removeable devices or empty devices */ | 314 | /* Don't show non-partitionable removeable devices or empty devices */ |
@@ -324,111 +337,82 @@ static int show_partition(struct seq_file *part, void *v) | |||
324 | return 0; | 337 | return 0; |
325 | } | 338 | } |
326 | 339 | ||
327 | struct seq_operations partitions_op = { | 340 | const struct seq_operations partitions_op = { |
328 | .start =part_start, | 341 | .start = part_start, |
329 | .next = part_next, | 342 | .next = part_next, |
330 | .stop = part_stop, | 343 | .stop = part_stop, |
331 | .show = show_partition | 344 | .show = show_partition |
332 | }; | 345 | }; |
333 | #endif | 346 | #endif |
334 | 347 | ||
335 | 348 | ||
336 | extern int blk_dev_init(void); | 349 | extern int blk_dev_init(void); |
337 | 350 | ||
338 | static struct kobject *base_probe(dev_t dev, int *part, void *data) | 351 | static struct kobject *base_probe(dev_t devt, int *part, void *data) |
339 | { | 352 | { |
340 | if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) | 353 | if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) |
341 | /* Make old-style 2.4 aliases work */ | 354 | /* Make old-style 2.4 aliases work */ |
342 | request_module("block-major-%d", MAJOR(dev)); | 355 | request_module("block-major-%d", MAJOR(devt)); |
343 | return NULL; | 356 | return NULL; |
344 | } | 357 | } |
345 | 358 | ||
346 | static int __init genhd_device_init(void) | 359 | static int __init genhd_device_init(void) |
347 | { | 360 | { |
348 | int err; | 361 | class_register(&block_class); |
349 | 362 | bdev_map = kobj_map_init(base_probe, &block_class_lock); | |
350 | bdev_map = kobj_map_init(base_probe, &block_subsys_lock); | ||
351 | blk_dev_init(); | 363 | blk_dev_init(); |
352 | err = subsystem_register(&block_subsys); | 364 | |
353 | if (err < 0) | 365 | #ifndef CONFIG_SYSFS_DEPRECATED |
354 | printk(KERN_WARNING "%s: subsystem_register error: %d\n", | 366 | /* create top-level block dir */ |
355 | __FUNCTION__, err); | 367 | block_depr = kobject_create_and_add("block", NULL); |
356 | return err; | 368 | #endif |
369 | return 0; | ||
357 | } | 370 | } |
358 | 371 | ||
359 | subsys_initcall(genhd_device_init); | 372 | subsys_initcall(genhd_device_init); |
360 | 373 | ||
361 | 374 | static ssize_t disk_range_show(struct device *dev, | |
362 | 375 | struct device_attribute *attr, char *buf) | |
363 | /* | ||
364 | * kobject & sysfs bindings for block devices | ||
365 | */ | ||
366 | static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr, | ||
367 | char *page) | ||
368 | { | 376 | { |
369 | struct gendisk *disk = to_disk(kobj); | 377 | struct gendisk *disk = dev_to_disk(dev); |
370 | struct disk_attribute *disk_attr = | ||
371 | container_of(attr,struct disk_attribute,attr); | ||
372 | ssize_t ret = -EIO; | ||
373 | 378 | ||
374 | if (disk_attr->show) | 379 | return sprintf(buf, "%d\n", disk->minors); |
375 | ret = disk_attr->show(disk,page); | ||
376 | return ret; | ||
377 | } | 380 | } |
378 | 381 | ||
379 | static ssize_t disk_attr_store(struct kobject * kobj, struct attribute * attr, | 382 | static ssize_t disk_removable_show(struct device *dev, |
380 | const char *page, size_t count) | 383 | struct device_attribute *attr, char *buf) |
381 | { | 384 | { |
382 | struct gendisk *disk = to_disk(kobj); | 385 | struct gendisk *disk = dev_to_disk(dev); |
383 | struct disk_attribute *disk_attr = | ||
384 | container_of(attr,struct disk_attribute,attr); | ||
385 | ssize_t ret = 0; | ||
386 | 386 | ||
387 | if (disk_attr->store) | 387 | return sprintf(buf, "%d\n", |
388 | ret = disk_attr->store(disk, page, count); | 388 | (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0)); |
389 | return ret; | ||
390 | } | 389 | } |
391 | 390 | ||
392 | static struct sysfs_ops disk_sysfs_ops = { | 391 | static ssize_t disk_size_show(struct device *dev, |
393 | .show = &disk_attr_show, | 392 | struct device_attribute *attr, char *buf) |
394 | .store = &disk_attr_store, | ||
395 | }; | ||
396 | |||
397 | static ssize_t disk_uevent_store(struct gendisk * disk, | ||
398 | const char *buf, size_t count) | ||
399 | { | ||
400 | kobject_uevent(&disk->kobj, KOBJ_ADD); | ||
401 | return count; | ||
402 | } | ||
403 | static ssize_t disk_dev_read(struct gendisk * disk, char *page) | ||
404 | { | ||
405 | dev_t base = MKDEV(disk->major, disk->first_minor); | ||
406 | return print_dev_t(page, base); | ||
407 | } | ||
408 | static ssize_t disk_range_read(struct gendisk * disk, char *page) | ||
409 | { | 393 | { |
410 | return sprintf(page, "%d\n", disk->minors); | 394 | struct gendisk *disk = dev_to_disk(dev); |
411 | } | ||
412 | static ssize_t disk_removable_read(struct gendisk * disk, char *page) | ||
413 | { | ||
414 | return sprintf(page, "%d\n", | ||
415 | (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0)); | ||
416 | 395 | ||
396 | return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk)); | ||
417 | } | 397 | } |
418 | static ssize_t disk_size_read(struct gendisk * disk, char *page) | 398 | |
419 | { | 399 | static ssize_t disk_capability_show(struct device *dev, |
420 | return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk)); | 400 | struct device_attribute *attr, char *buf) |
421 | } | ||
422 | static ssize_t disk_capability_read(struct gendisk *disk, char *page) | ||
423 | { | 401 | { |
424 | return sprintf(page, "%x\n", disk->flags); | 402 | struct gendisk *disk = dev_to_disk(dev); |
403 | |||
404 | return sprintf(buf, "%x\n", disk->flags); | ||
425 | } | 405 | } |
426 | static ssize_t disk_stats_read(struct gendisk * disk, char *page) | 406 | |
407 | static ssize_t disk_stat_show(struct device *dev, | ||
408 | struct device_attribute *attr, char *buf) | ||
427 | { | 409 | { |
410 | struct gendisk *disk = dev_to_disk(dev); | ||
411 | |||
428 | preempt_disable(); | 412 | preempt_disable(); |
429 | disk_round_stats(disk); | 413 | disk_round_stats(disk); |
430 | preempt_enable(); | 414 | preempt_enable(); |
431 | return sprintf(page, | 415 | return sprintf(buf, |
432 | "%8lu %8lu %8llu %8u " | 416 | "%8lu %8lu %8llu %8u " |
433 | "%8lu %8lu %8llu %8u " | 417 | "%8lu %8lu %8llu %8u " |
434 | "%8u %8u %8u" | 418 | "%8u %8u %8u" |
@@ -445,40 +429,21 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page) | |||
445 | jiffies_to_msecs(disk_stat_read(disk, io_ticks)), | 429 | jiffies_to_msecs(disk_stat_read(disk, io_ticks)), |
446 | jiffies_to_msecs(disk_stat_read(disk, time_in_queue))); | 430 | jiffies_to_msecs(disk_stat_read(disk, time_in_queue))); |
447 | } | 431 | } |
448 | static struct disk_attribute disk_attr_uevent = { | ||
449 | .attr = {.name = "uevent", .mode = S_IWUSR }, | ||
450 | .store = disk_uevent_store | ||
451 | }; | ||
452 | static struct disk_attribute disk_attr_dev = { | ||
453 | .attr = {.name = "dev", .mode = S_IRUGO }, | ||
454 | .show = disk_dev_read | ||
455 | }; | ||
456 | static struct disk_attribute disk_attr_range = { | ||
457 | .attr = {.name = "range", .mode = S_IRUGO }, | ||
458 | .show = disk_range_read | ||
459 | }; | ||
460 | static struct disk_attribute disk_attr_removable = { | ||
461 | .attr = {.name = "removable", .mode = S_IRUGO }, | ||
462 | .show = disk_removable_read | ||
463 | }; | ||
464 | static struct disk_attribute disk_attr_size = { | ||
465 | .attr = {.name = "size", .mode = S_IRUGO }, | ||
466 | .show = disk_size_read | ||
467 | }; | ||
468 | static struct disk_attribute disk_attr_capability = { | ||
469 | .attr = {.name = "capability", .mode = S_IRUGO }, | ||
470 | .show = disk_capability_read | ||
471 | }; | ||
472 | static struct disk_attribute disk_attr_stat = { | ||
473 | .attr = {.name = "stat", .mode = S_IRUGO }, | ||
474 | .show = disk_stats_read | ||
475 | }; | ||
476 | 432 | ||
477 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 433 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
434 | static ssize_t disk_fail_show(struct device *dev, | ||
435 | struct device_attribute *attr, char *buf) | ||
436 | { | ||
437 | struct gendisk *disk = dev_to_disk(dev); | ||
438 | |||
439 | return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0); | ||
440 | } | ||
478 | 441 | ||
479 | static ssize_t disk_fail_store(struct gendisk * disk, | 442 | static ssize_t disk_fail_store(struct device *dev, |
443 | struct device_attribute *attr, | ||
480 | const char *buf, size_t count) | 444 | const char *buf, size_t count) |
481 | { | 445 | { |
446 | struct gendisk *disk = dev_to_disk(dev); | ||
482 | int i; | 447 | int i; |
483 | 448 | ||
484 | if (count > 0 && sscanf(buf, "%d", &i) > 0) { | 449 | if (count > 0 && sscanf(buf, "%d", &i) > 0) { |
@@ -490,136 +455,100 @@ static ssize_t disk_fail_store(struct gendisk * disk, | |||
490 | 455 | ||
491 | return count; | 456 | return count; |
492 | } | 457 | } |
493 | static ssize_t disk_fail_read(struct gendisk * disk, char *page) | ||
494 | { | ||
495 | return sprintf(page, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0); | ||
496 | } | ||
497 | static struct disk_attribute disk_attr_fail = { | ||
498 | .attr = {.name = "make-it-fail", .mode = S_IRUGO | S_IWUSR }, | ||
499 | .store = disk_fail_store, | ||
500 | .show = disk_fail_read | ||
501 | }; | ||
502 | 458 | ||
503 | #endif | 459 | #endif |
504 | 460 | ||
505 | static struct attribute * default_attrs[] = { | 461 | static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); |
506 | &disk_attr_uevent.attr, | 462 | static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); |
507 | &disk_attr_dev.attr, | 463 | static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL); |
508 | &disk_attr_range.attr, | 464 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); |
509 | &disk_attr_removable.attr, | 465 | static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL); |
510 | &disk_attr_size.attr, | 466 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
511 | &disk_attr_stat.attr, | 467 | static struct device_attribute dev_attr_fail = |
512 | &disk_attr_capability.attr, | 468 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store); |
469 | #endif | ||
470 | |||
471 | static struct attribute *disk_attrs[] = { | ||
472 | &dev_attr_range.attr, | ||
473 | &dev_attr_removable.attr, | ||
474 | &dev_attr_size.attr, | ||
475 | &dev_attr_capability.attr, | ||
476 | &dev_attr_stat.attr, | ||
513 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 477 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
514 | &disk_attr_fail.attr, | 478 | &dev_attr_fail.attr, |
515 | #endif | 479 | #endif |
516 | NULL, | 480 | NULL |
481 | }; | ||
482 | |||
483 | static struct attribute_group disk_attr_group = { | ||
484 | .attrs = disk_attrs, | ||
517 | }; | 485 | }; |
518 | 486 | ||
519 | static void disk_release(struct kobject * kobj) | 487 | static struct attribute_group *disk_attr_groups[] = { |
488 | &disk_attr_group, | ||
489 | NULL | ||
490 | }; | ||
491 | |||
492 | static void disk_release(struct device *dev) | ||
520 | { | 493 | { |
521 | struct gendisk *disk = to_disk(kobj); | 494 | struct gendisk *disk = dev_to_disk(dev); |
495 | |||
522 | kfree(disk->random); | 496 | kfree(disk->random); |
523 | kfree(disk->part); | 497 | kfree(disk->part); |
524 | free_disk_stats(disk); | 498 | free_disk_stats(disk); |
525 | kfree(disk); | 499 | kfree(disk); |
526 | } | 500 | } |
527 | 501 | struct class block_class = { | |
528 | static struct kobj_type ktype_block = { | 502 | .name = "block", |
529 | .release = disk_release, | ||
530 | .sysfs_ops = &disk_sysfs_ops, | ||
531 | .default_attrs = default_attrs, | ||
532 | }; | 503 | }; |
533 | 504 | ||
534 | extern struct kobj_type ktype_part; | 505 | struct device_type disk_type = { |
535 | 506 | .name = "disk", | |
536 | static int block_uevent_filter(struct kset *kset, struct kobject *kobj) | 507 | .groups = disk_attr_groups, |
537 | { | 508 | .release = disk_release, |
538 | struct kobj_type *ktype = get_ktype(kobj); | ||
539 | |||
540 | return ((ktype == &ktype_block) || (ktype == &ktype_part)); | ||
541 | } | ||
542 | |||
543 | static int block_uevent(struct kset *kset, struct kobject *kobj, | ||
544 | struct kobj_uevent_env *env) | ||
545 | { | ||
546 | struct kobj_type *ktype = get_ktype(kobj); | ||
547 | struct device *physdev; | ||
548 | struct gendisk *disk; | ||
549 | struct hd_struct *part; | ||
550 | |||
551 | if (ktype == &ktype_block) { | ||
552 | disk = container_of(kobj, struct gendisk, kobj); | ||
553 | add_uevent_var(env, "MINOR=%u", disk->first_minor); | ||
554 | } else if (ktype == &ktype_part) { | ||
555 | disk = container_of(kobj->parent, struct gendisk, kobj); | ||
556 | part = container_of(kobj, struct hd_struct, kobj); | ||
557 | add_uevent_var(env, "MINOR=%u", | ||
558 | disk->first_minor + part->partno); | ||
559 | } else | ||
560 | return 0; | ||
561 | |||
562 | add_uevent_var(env, "MAJOR=%u", disk->major); | ||
563 | |||
564 | /* add physical device, backing this device */ | ||
565 | physdev = disk->driverfs_dev; | ||
566 | if (physdev) { | ||
567 | char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL); | ||
568 | |||
569 | add_uevent_var(env, "PHYSDEVPATH=%s", path); | ||
570 | kfree(path); | ||
571 | |||
572 | if (physdev->bus) | ||
573 | add_uevent_var(env, "PHYSDEVBUS=%s", physdev->bus->name); | ||
574 | |||
575 | if (physdev->driver) | ||
576 | add_uevent_var(env, physdev->driver->name); | ||
577 | } | ||
578 | |||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | static struct kset_uevent_ops block_uevent_ops = { | ||
583 | .filter = block_uevent_filter, | ||
584 | .uevent = block_uevent, | ||
585 | }; | 509 | }; |
586 | 510 | ||
587 | decl_subsys(block, &ktype_block, &block_uevent_ops); | ||
588 | |||
589 | /* | 511 | /* |
590 | * aggregate disk stat collector. Uses the same stats that the sysfs | 512 | * aggregate disk stat collector. Uses the same stats that the sysfs |
591 | * entries do, above, but makes them available through one seq_file. | 513 | * entries do, above, but makes them available through one seq_file. |
592 | * Watching a few disks may be efficient through sysfs, but watching | ||
593 | * all of them will be more efficient through this interface. | ||
594 | * | 514 | * |
595 | * The output looks suspiciously like /proc/partitions with a bunch of | 515 | * The output looks suspiciously like /proc/partitions with a bunch of |
596 | * extra fields. | 516 | * extra fields. |
597 | */ | 517 | */ |
598 | 518 | ||
599 | /* iterator */ | ||
600 | static void *diskstats_start(struct seq_file *part, loff_t *pos) | 519 | static void *diskstats_start(struct seq_file *part, loff_t *pos) |
601 | { | 520 | { |
602 | loff_t k = *pos; | 521 | loff_t k = *pos; |
603 | struct list_head *p; | 522 | struct device *dev; |
604 | 523 | ||
605 | mutex_lock(&block_subsys_lock); | 524 | mutex_lock(&block_class_lock); |
606 | list_for_each(p, &block_subsys.list) | 525 | list_for_each_entry(dev, &block_class.devices, node) { |
526 | if (dev->type != &disk_type) | ||
527 | continue; | ||
607 | if (!k--) | 528 | if (!k--) |
608 | return list_entry(p, struct gendisk, kobj.entry); | 529 | return dev_to_disk(dev); |
530 | } | ||
609 | return NULL; | 531 | return NULL; |
610 | } | 532 | } |
611 | 533 | ||
612 | static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos) | 534 | static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos) |
613 | { | 535 | { |
614 | struct list_head *p = ((struct gendisk *)v)->kobj.entry.next; | 536 | struct gendisk *gp = v; |
537 | struct device *dev; | ||
538 | |||
615 | ++*pos; | 539 | ++*pos; |
616 | return p==&block_subsys.list ? NULL : | 540 | list_for_each_entry(dev, &gp->dev.node, node) { |
617 | list_entry(p, struct gendisk, kobj.entry); | 541 | if (&dev->node == &block_class.devices) |
542 | return NULL; | ||
543 | if (dev->type == &disk_type) | ||
544 | return dev_to_disk(dev); | ||
545 | } | ||
546 | return NULL; | ||
618 | } | 547 | } |
619 | 548 | ||
620 | static void diskstats_stop(struct seq_file *part, void *v) | 549 | static void diskstats_stop(struct seq_file *part, void *v) |
621 | { | 550 | { |
622 | mutex_unlock(&block_subsys_lock); | 551 | mutex_unlock(&block_class_lock); |
623 | } | 552 | } |
624 | 553 | ||
625 | static int diskstats_show(struct seq_file *s, void *v) | 554 | static int diskstats_show(struct seq_file *s, void *v) |
@@ -629,7 +558,7 @@ static int diskstats_show(struct seq_file *s, void *v) | |||
629 | int n = 0; | 558 | int n = 0; |
630 | 559 | ||
631 | /* | 560 | /* |
632 | if (&sgp->kobj.entry == block_subsys.kset.list.next) | 561 | if (&gp->dev.kobj.entry == block_class.devices.next) |
633 | seq_puts(s, "major minor name" | 562 | seq_puts(s, "major minor name" |
634 | " rio rmerge rsect ruse wio wmerge " | 563 | " rio rmerge rsect ruse wio wmerge " |
635 | "wsect wuse running use aveq" | 564 | "wsect wuse running use aveq" |
@@ -666,7 +595,7 @@ static int diskstats_show(struct seq_file *s, void *v) | |||
666 | return 0; | 595 | return 0; |
667 | } | 596 | } |
668 | 597 | ||
669 | struct seq_operations diskstats_op = { | 598 | const struct seq_operations diskstats_op = { |
670 | .start = diskstats_start, | 599 | .start = diskstats_start, |
671 | .next = diskstats_next, | 600 | .next = diskstats_next, |
672 | .stop = diskstats_stop, | 601 | .stop = diskstats_stop, |
@@ -683,7 +612,7 @@ static void media_change_notify_thread(struct work_struct *work) | |||
683 | * set enviroment vars to indicate which event this is for | 612 | * set enviroment vars to indicate which event this is for |
684 | * so that user space will know to go check the media status. | 613 | * so that user space will know to go check the media status. |
685 | */ | 614 | */ |
686 | kobject_uevent_env(&gd->kobj, KOBJ_CHANGE, envp); | 615 | kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp); |
687 | put_device(gd->driverfs_dev); | 616 | put_device(gd->driverfs_dev); |
688 | } | 617 | } |
689 | 618 | ||
@@ -694,6 +623,25 @@ void genhd_media_change_notify(struct gendisk *disk) | |||
694 | } | 623 | } |
695 | EXPORT_SYMBOL_GPL(genhd_media_change_notify); | 624 | EXPORT_SYMBOL_GPL(genhd_media_change_notify); |
696 | 625 | ||
626 | dev_t blk_lookup_devt(const char *name) | ||
627 | { | ||
628 | struct device *dev; | ||
629 | dev_t devt = MKDEV(0, 0); | ||
630 | |||
631 | mutex_lock(&block_class_lock); | ||
632 | list_for_each_entry(dev, &block_class.devices, node) { | ||
633 | if (strcmp(dev->bus_id, name) == 0) { | ||
634 | devt = dev->devt; | ||
635 | break; | ||
636 | } | ||
637 | } | ||
638 | mutex_unlock(&block_class_lock); | ||
639 | |||
640 | return devt; | ||
641 | } | ||
642 | |||
643 | EXPORT_SYMBOL(blk_lookup_devt); | ||
644 | |||
697 | struct gendisk *alloc_disk(int minors) | 645 | struct gendisk *alloc_disk(int minors) |
698 | { | 646 | { |
699 | return alloc_disk_node(minors, -1); | 647 | return alloc_disk_node(minors, -1); |
@@ -715,14 +663,16 @@ struct gendisk *alloc_disk_node(int minors, int node_id) | |||
715 | disk->part = kmalloc_node(size, | 663 | disk->part = kmalloc_node(size, |
716 | GFP_KERNEL | __GFP_ZERO, node_id); | 664 | GFP_KERNEL | __GFP_ZERO, node_id); |
717 | if (!disk->part) { | 665 | if (!disk->part) { |
666 | free_disk_stats(disk); | ||
718 | kfree(disk); | 667 | kfree(disk); |
719 | return NULL; | 668 | return NULL; |
720 | } | 669 | } |
721 | } | 670 | } |
722 | disk->minors = minors; | 671 | disk->minors = minors; |
723 | kobj_set_kset_s(disk,block_subsys); | ||
724 | kobject_init(&disk->kobj); | ||
725 | rand_initialize_disk(disk); | 672 | rand_initialize_disk(disk); |
673 | disk->dev.class = &block_class; | ||
674 | disk->dev.type = &disk_type; | ||
675 | device_initialize(&disk->dev); | ||
726 | INIT_WORK(&disk->async_notify, | 676 | INIT_WORK(&disk->async_notify, |
727 | media_change_notify_thread); | 677 | media_change_notify_thread); |
728 | } | 678 | } |
@@ -742,7 +692,7 @@ struct kobject *get_disk(struct gendisk *disk) | |||
742 | owner = disk->fops->owner; | 692 | owner = disk->fops->owner; |
743 | if (owner && !try_module_get(owner)) | 693 | if (owner && !try_module_get(owner)) |
744 | return NULL; | 694 | return NULL; |
745 | kobj = kobject_get(&disk->kobj); | 695 | kobj = kobject_get(&disk->dev.kobj); |
746 | if (kobj == NULL) { | 696 | if (kobj == NULL) { |
747 | module_put(owner); | 697 | module_put(owner); |
748 | return NULL; | 698 | return NULL; |
@@ -756,7 +706,7 @@ EXPORT_SYMBOL(get_disk); | |||
756 | void put_disk(struct gendisk *disk) | 706 | void put_disk(struct gendisk *disk) |
757 | { | 707 | { |
758 | if (disk) | 708 | if (disk) |
759 | kobject_put(&disk->kobj); | 709 | kobject_put(&disk->dev.kobj); |
760 | } | 710 | } |
761 | 711 | ||
762 | EXPORT_SYMBOL(put_disk); | 712 | EXPORT_SYMBOL(put_disk); |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c deleted file mode 100644 index b01dee3ae7f3..000000000000 --- a/block/ll_rw_blk.c +++ /dev/null | |||
@@ -1,4242 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright (C) 1994, Karl Keyte: Added support for disk statistics | ||
4 | * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | ||
5 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | ||
6 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 | ||
7 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * This handles all read/write requests to block devices | ||
12 | */ | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/backing-dev.h> | ||
16 | #include <linux/bio.h> | ||
17 | #include <linux/blkdev.h> | ||
18 | #include <linux/highmem.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | ||
24 | #include <linux/completion.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/swap.h> | ||
27 | #include <linux/writeback.h> | ||
28 | #include <linux/task_io_accounting_ops.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/cpu.h> | ||
31 | #include <linux/blktrace_api.h> | ||
32 | #include <linux/fault-inject.h> | ||
33 | #include <linux/scatterlist.h> | ||
34 | |||
35 | /* | ||
36 | * for max sense size | ||
37 | */ | ||
38 | #include <scsi/scsi_cmnd.h> | ||
39 | |||
40 | static void blk_unplug_work(struct work_struct *work); | ||
41 | static void blk_unplug_timeout(unsigned long data); | ||
42 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | ||
43 | static void init_request_from_bio(struct request *req, struct bio *bio); | ||
44 | static int __make_request(struct request_queue *q, struct bio *bio); | ||
45 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | ||
46 | static void blk_recalc_rq_segments(struct request *rq); | ||
47 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | ||
48 | struct bio *bio); | ||
49 | |||
50 | /* | ||
51 | * For the allocated request tables | ||
52 | */ | ||
53 | static struct kmem_cache *request_cachep; | ||
54 | |||
55 | /* | ||
56 | * For queue allocation | ||
57 | */ | ||
58 | static struct kmem_cache *requestq_cachep; | ||
59 | |||
60 | /* | ||
61 | * For io context allocations | ||
62 | */ | ||
63 | static struct kmem_cache *iocontext_cachep; | ||
64 | |||
65 | /* | ||
66 | * Controlling structure to kblockd | ||
67 | */ | ||
68 | static struct workqueue_struct *kblockd_workqueue; | ||
69 | |||
70 | unsigned long blk_max_low_pfn, blk_max_pfn; | ||
71 | |||
72 | EXPORT_SYMBOL(blk_max_low_pfn); | ||
73 | EXPORT_SYMBOL(blk_max_pfn); | ||
74 | |||
75 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | ||
76 | |||
77 | /* Amount of time in which a process may batch requests */ | ||
78 | #define BLK_BATCH_TIME (HZ/50UL) | ||
79 | |||
80 | /* Number of requests a "batching" process may submit */ | ||
81 | #define BLK_BATCH_REQ 32 | ||
82 | |||
83 | /* | ||
84 | * Return the threshold (number of used requests) at which the queue is | ||
85 | * considered to be congested. It include a little hysteresis to keep the | ||
86 | * context switch rate down. | ||
87 | */ | ||
88 | static inline int queue_congestion_on_threshold(struct request_queue *q) | ||
89 | { | ||
90 | return q->nr_congestion_on; | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * The threshold at which a queue is considered to be uncongested | ||
95 | */ | ||
96 | static inline int queue_congestion_off_threshold(struct request_queue *q) | ||
97 | { | ||
98 | return q->nr_congestion_off; | ||
99 | } | ||
100 | |||
101 | static void blk_queue_congestion_threshold(struct request_queue *q) | ||
102 | { | ||
103 | int nr; | ||
104 | |||
105 | nr = q->nr_requests - (q->nr_requests / 8) + 1; | ||
106 | if (nr > q->nr_requests) | ||
107 | nr = q->nr_requests; | ||
108 | q->nr_congestion_on = nr; | ||
109 | |||
110 | nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; | ||
111 | if (nr < 1) | ||
112 | nr = 1; | ||
113 | q->nr_congestion_off = nr; | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | ||
118 | * @bdev: device | ||
119 | * | ||
120 | * Locates the passed device's request queue and returns the address of its | ||
121 | * backing_dev_info | ||
122 | * | ||
123 | * Will return NULL if the request queue cannot be located. | ||
124 | */ | ||
125 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | ||
126 | { | ||
127 | struct backing_dev_info *ret = NULL; | ||
128 | struct request_queue *q = bdev_get_queue(bdev); | ||
129 | |||
130 | if (q) | ||
131 | ret = &q->backing_dev_info; | ||
132 | return ret; | ||
133 | } | ||
134 | EXPORT_SYMBOL(blk_get_backing_dev_info); | ||
135 | |||
136 | /** | ||
137 | * blk_queue_prep_rq - set a prepare_request function for queue | ||
138 | * @q: queue | ||
139 | * @pfn: prepare_request function | ||
140 | * | ||
141 | * It's possible for a queue to register a prepare_request callback which | ||
142 | * is invoked before the request is handed to the request_fn. The goal of | ||
143 | * the function is to prepare a request for I/O, it can be used to build a | ||
144 | * cdb from the request data for instance. | ||
145 | * | ||
146 | */ | ||
147 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | ||
148 | { | ||
149 | q->prep_rq_fn = pfn; | ||
150 | } | ||
151 | |||
152 | EXPORT_SYMBOL(blk_queue_prep_rq); | ||
153 | |||
154 | /** | ||
155 | * blk_queue_merge_bvec - set a merge_bvec function for queue | ||
156 | * @q: queue | ||
157 | * @mbfn: merge_bvec_fn | ||
158 | * | ||
159 | * Usually queues have static limitations on the max sectors or segments that | ||
160 | * we can put in a request. Stacking drivers may have some settings that | ||
161 | * are dynamic, and thus we have to query the queue whether it is ok to | ||
162 | * add a new bio_vec to a bio at a given offset or not. If the block device | ||
163 | * has such limitations, it needs to register a merge_bvec_fn to control | ||
164 | * the size of bio's sent to it. Note that a block device *must* allow a | ||
165 | * single page to be added to an empty bio. The block device driver may want | ||
166 | * to use the bio_split() function to deal with these bio's. By default | ||
167 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | ||
168 | * honored. | ||
169 | */ | ||
170 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | ||
171 | { | ||
172 | q->merge_bvec_fn = mbfn; | ||
173 | } | ||
174 | |||
175 | EXPORT_SYMBOL(blk_queue_merge_bvec); | ||
176 | |||
177 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | ||
178 | { | ||
179 | q->softirq_done_fn = fn; | ||
180 | } | ||
181 | |||
182 | EXPORT_SYMBOL(blk_queue_softirq_done); | ||
183 | |||
184 | /** | ||
185 | * blk_queue_make_request - define an alternate make_request function for a device | ||
186 | * @q: the request queue for the device to be affected | ||
187 | * @mfn: the alternate make_request function | ||
188 | * | ||
189 | * Description: | ||
190 | * The normal way for &struct bios to be passed to a device | ||
191 | * driver is for them to be collected into requests on a request | ||
192 | * queue, and then to allow the device driver to select requests | ||
193 | * off that queue when it is ready. This works well for many block | ||
194 | * devices. However some block devices (typically virtual devices | ||
195 | * such as md or lvm) do not benefit from the processing on the | ||
196 | * request queue, and are served best by having the requests passed | ||
197 | * directly to them. This can be achieved by providing a function | ||
198 | * to blk_queue_make_request(). | ||
199 | * | ||
200 | * Caveat: | ||
201 | * The driver that does this *must* be able to deal appropriately | ||
202 | * with buffers in "highmemory". This can be accomplished by either calling | ||
203 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | ||
204 | * blk_queue_bounce() to create a buffer in normal memory. | ||
205 | **/ | ||
206 | void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) | ||
207 | { | ||
208 | /* | ||
209 | * set defaults | ||
210 | */ | ||
211 | q->nr_requests = BLKDEV_MAX_RQ; | ||
212 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
213 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
214 | q->make_request_fn = mfn; | ||
215 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | ||
216 | q->backing_dev_info.state = 0; | ||
217 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | ||
218 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | ||
219 | blk_queue_hardsect_size(q, 512); | ||
220 | blk_queue_dma_alignment(q, 511); | ||
221 | blk_queue_congestion_threshold(q); | ||
222 | q->nr_batching = BLK_BATCH_REQ; | ||
223 | |||
224 | q->unplug_thresh = 4; /* hmm */ | ||
225 | q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ | ||
226 | if (q->unplug_delay == 0) | ||
227 | q->unplug_delay = 1; | ||
228 | |||
229 | INIT_WORK(&q->unplug_work, blk_unplug_work); | ||
230 | |||
231 | q->unplug_timer.function = blk_unplug_timeout; | ||
232 | q->unplug_timer.data = (unsigned long)q; | ||
233 | |||
234 | /* | ||
235 | * by default assume old behaviour and bounce for any highmem page | ||
236 | */ | ||
237 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | ||
238 | } | ||
239 | |||
240 | EXPORT_SYMBOL(blk_queue_make_request); | ||
241 | |||
242 | static void rq_init(struct request_queue *q, struct request *rq) | ||
243 | { | ||
244 | INIT_LIST_HEAD(&rq->queuelist); | ||
245 | INIT_LIST_HEAD(&rq->donelist); | ||
246 | |||
247 | rq->errors = 0; | ||
248 | rq->bio = rq->biotail = NULL; | ||
249 | INIT_HLIST_NODE(&rq->hash); | ||
250 | RB_CLEAR_NODE(&rq->rb_node); | ||
251 | rq->ioprio = 0; | ||
252 | rq->buffer = NULL; | ||
253 | rq->ref_count = 1; | ||
254 | rq->q = q; | ||
255 | rq->special = NULL; | ||
256 | rq->data_len = 0; | ||
257 | rq->data = NULL; | ||
258 | rq->nr_phys_segments = 0; | ||
259 | rq->sense = NULL; | ||
260 | rq->end_io = NULL; | ||
261 | rq->end_io_data = NULL; | ||
262 | rq->completion_data = NULL; | ||
263 | rq->next_rq = NULL; | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * blk_queue_ordered - does this queue support ordered writes | ||
268 | * @q: the request queue | ||
269 | * @ordered: one of QUEUE_ORDERED_* | ||
270 | * @prepare_flush_fn: rq setup helper for cache flush ordered writes | ||
271 | * | ||
272 | * Description: | ||
273 | * For journalled file systems, doing ordered writes on a commit | ||
274 | * block instead of explicitly doing wait_on_buffer (which is bad | ||
275 | * for performance) can be a big win. Block drivers supporting this | ||
276 | * feature should call this function and indicate so. | ||
277 | * | ||
278 | **/ | ||
279 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, | ||
280 | prepare_flush_fn *prepare_flush_fn) | ||
281 | { | ||
282 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && | ||
283 | prepare_flush_fn == NULL) { | ||
284 | printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); | ||
285 | return -EINVAL; | ||
286 | } | ||
287 | |||
288 | if (ordered != QUEUE_ORDERED_NONE && | ||
289 | ordered != QUEUE_ORDERED_DRAIN && | ||
290 | ordered != QUEUE_ORDERED_DRAIN_FLUSH && | ||
291 | ordered != QUEUE_ORDERED_DRAIN_FUA && | ||
292 | ordered != QUEUE_ORDERED_TAG && | ||
293 | ordered != QUEUE_ORDERED_TAG_FLUSH && | ||
294 | ordered != QUEUE_ORDERED_TAG_FUA) { | ||
295 | printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | |||
299 | q->ordered = ordered; | ||
300 | q->next_ordered = ordered; | ||
301 | q->prepare_flush_fn = prepare_flush_fn; | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | EXPORT_SYMBOL(blk_queue_ordered); | ||
307 | |||
308 | /* | ||
309 | * Cache flushing for ordered writes handling | ||
310 | */ | ||
311 | inline unsigned blk_ordered_cur_seq(struct request_queue *q) | ||
312 | { | ||
313 | if (!q->ordseq) | ||
314 | return 0; | ||
315 | return 1 << ffz(q->ordseq); | ||
316 | } | ||
317 | |||
318 | unsigned blk_ordered_req_seq(struct request *rq) | ||
319 | { | ||
320 | struct request_queue *q = rq->q; | ||
321 | |||
322 | BUG_ON(q->ordseq == 0); | ||
323 | |||
324 | if (rq == &q->pre_flush_rq) | ||
325 | return QUEUE_ORDSEQ_PREFLUSH; | ||
326 | if (rq == &q->bar_rq) | ||
327 | return QUEUE_ORDSEQ_BAR; | ||
328 | if (rq == &q->post_flush_rq) | ||
329 | return QUEUE_ORDSEQ_POSTFLUSH; | ||
330 | |||
331 | /* | ||
332 | * !fs requests don't need to follow barrier ordering. Always | ||
333 | * put them at the front. This fixes the following deadlock. | ||
334 | * | ||
335 | * http://thread.gmane.org/gmane.linux.kernel/537473 | ||
336 | */ | ||
337 | if (!blk_fs_request(rq)) | ||
338 | return QUEUE_ORDSEQ_DRAIN; | ||
339 | |||
340 | if ((rq->cmd_flags & REQ_ORDERED_COLOR) == | ||
341 | (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) | ||
342 | return QUEUE_ORDSEQ_DRAIN; | ||
343 | else | ||
344 | return QUEUE_ORDSEQ_DONE; | ||
345 | } | ||
346 | |||
347 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | ||
348 | { | ||
349 | struct request *rq; | ||
350 | int uptodate; | ||
351 | |||
352 | if (error && !q->orderr) | ||
353 | q->orderr = error; | ||
354 | |||
355 | BUG_ON(q->ordseq & seq); | ||
356 | q->ordseq |= seq; | ||
357 | |||
358 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) | ||
359 | return; | ||
360 | |||
361 | /* | ||
362 | * Okay, sequence complete. | ||
363 | */ | ||
364 | uptodate = 1; | ||
365 | if (q->orderr) | ||
366 | uptodate = q->orderr; | ||
367 | |||
368 | q->ordseq = 0; | ||
369 | rq = q->orig_bar_rq; | ||
370 | |||
371 | end_that_request_first(rq, uptodate, rq->hard_nr_sectors); | ||
372 | end_that_request_last(rq, uptodate); | ||
373 | } | ||
374 | |||
375 | static void pre_flush_end_io(struct request *rq, int error) | ||
376 | { | ||
377 | elv_completed_request(rq->q, rq); | ||
378 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); | ||
379 | } | ||
380 | |||
381 | static void bar_end_io(struct request *rq, int error) | ||
382 | { | ||
383 | elv_completed_request(rq->q, rq); | ||
384 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); | ||
385 | } | ||
386 | |||
387 | static void post_flush_end_io(struct request *rq, int error) | ||
388 | { | ||
389 | elv_completed_request(rq->q, rq); | ||
390 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); | ||
391 | } | ||
392 | |||
393 | static void queue_flush(struct request_queue *q, unsigned which) | ||
394 | { | ||
395 | struct request *rq; | ||
396 | rq_end_io_fn *end_io; | ||
397 | |||
398 | if (which == QUEUE_ORDERED_PREFLUSH) { | ||
399 | rq = &q->pre_flush_rq; | ||
400 | end_io = pre_flush_end_io; | ||
401 | } else { | ||
402 | rq = &q->post_flush_rq; | ||
403 | end_io = post_flush_end_io; | ||
404 | } | ||
405 | |||
406 | rq->cmd_flags = REQ_HARDBARRIER; | ||
407 | rq_init(q, rq); | ||
408 | rq->elevator_private = NULL; | ||
409 | rq->elevator_private2 = NULL; | ||
410 | rq->rq_disk = q->bar_rq.rq_disk; | ||
411 | rq->end_io = end_io; | ||
412 | q->prepare_flush_fn(q, rq); | ||
413 | |||
414 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
415 | } | ||
416 | |||
417 | static inline struct request *start_ordered(struct request_queue *q, | ||
418 | struct request *rq) | ||
419 | { | ||
420 | q->orderr = 0; | ||
421 | q->ordered = q->next_ordered; | ||
422 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | ||
423 | |||
424 | /* | ||
425 | * Prep proxy barrier request. | ||
426 | */ | ||
427 | blkdev_dequeue_request(rq); | ||
428 | q->orig_bar_rq = rq; | ||
429 | rq = &q->bar_rq; | ||
430 | rq->cmd_flags = 0; | ||
431 | rq_init(q, rq); | ||
432 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | ||
433 | rq->cmd_flags |= REQ_RW; | ||
434 | if (q->ordered & QUEUE_ORDERED_FUA) | ||
435 | rq->cmd_flags |= REQ_FUA; | ||
436 | rq->elevator_private = NULL; | ||
437 | rq->elevator_private2 = NULL; | ||
438 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
439 | rq->end_io = bar_end_io; | ||
440 | |||
441 | /* | ||
442 | * Queue ordered sequence. As we stack them at the head, we | ||
443 | * need to queue in reverse order. Note that we rely on that | ||
444 | * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs | ||
445 | * request gets inbetween ordered sequence. If this request is | ||
446 | * an empty barrier, we don't need to do a postflush ever since | ||
447 | * there will be no data written between the pre and post flush. | ||
448 | * Hence a single flush will suffice. | ||
449 | */ | ||
450 | if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) | ||
451 | queue_flush(q, QUEUE_ORDERED_POSTFLUSH); | ||
452 | else | ||
453 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; | ||
454 | |||
455 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
456 | |||
457 | if (q->ordered & QUEUE_ORDERED_PREFLUSH) { | ||
458 | queue_flush(q, QUEUE_ORDERED_PREFLUSH); | ||
459 | rq = &q->pre_flush_rq; | ||
460 | } else | ||
461 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; | ||
462 | |||
463 | if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) | ||
464 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; | ||
465 | else | ||
466 | rq = NULL; | ||
467 | |||
468 | return rq; | ||
469 | } | ||
470 | |||
471 | int blk_do_ordered(struct request_queue *q, struct request **rqp) | ||
472 | { | ||
473 | struct request *rq = *rqp; | ||
474 | const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); | ||
475 | |||
476 | if (!q->ordseq) { | ||
477 | if (!is_barrier) | ||
478 | return 1; | ||
479 | |||
480 | if (q->next_ordered != QUEUE_ORDERED_NONE) { | ||
481 | *rqp = start_ordered(q, rq); | ||
482 | return 1; | ||
483 | } else { | ||
484 | /* | ||
485 | * This can happen when the queue switches to | ||
486 | * ORDERED_NONE while this request is on it. | ||
487 | */ | ||
488 | blkdev_dequeue_request(rq); | ||
489 | end_that_request_first(rq, -EOPNOTSUPP, | ||
490 | rq->hard_nr_sectors); | ||
491 | end_that_request_last(rq, -EOPNOTSUPP); | ||
492 | *rqp = NULL; | ||
493 | return 0; | ||
494 | } | ||
495 | } | ||
496 | |||
497 | /* | ||
498 | * Ordered sequence in progress | ||
499 | */ | ||
500 | |||
501 | /* Special requests are not subject to ordering rules. */ | ||
502 | if (!blk_fs_request(rq) && | ||
503 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) | ||
504 | return 1; | ||
505 | |||
506 | if (q->ordered & QUEUE_ORDERED_TAG) { | ||
507 | /* Ordered by tag. Blocking the next barrier is enough. */ | ||
508 | if (is_barrier && rq != &q->bar_rq) | ||
509 | *rqp = NULL; | ||
510 | } else { | ||
511 | /* Ordered by draining. Wait for turn. */ | ||
512 | WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); | ||
513 | if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) | ||
514 | *rqp = NULL; | ||
515 | } | ||
516 | |||
517 | return 1; | ||
518 | } | ||
519 | |||
520 | static void req_bio_endio(struct request *rq, struct bio *bio, | ||
521 | unsigned int nbytes, int error) | ||
522 | { | ||
523 | struct request_queue *q = rq->q; | ||
524 | |||
525 | if (&q->bar_rq != rq) { | ||
526 | if (error) | ||
527 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
528 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | ||
529 | error = -EIO; | ||
530 | |||
531 | if (unlikely(nbytes > bio->bi_size)) { | ||
532 | printk("%s: want %u bytes done, only %u left\n", | ||
533 | __FUNCTION__, nbytes, bio->bi_size); | ||
534 | nbytes = bio->bi_size; | ||
535 | } | ||
536 | |||
537 | bio->bi_size -= nbytes; | ||
538 | bio->bi_sector += (nbytes >> 9); | ||
539 | if (bio->bi_size == 0) | ||
540 | bio_endio(bio, error); | ||
541 | } else { | ||
542 | |||
543 | /* | ||
544 | * Okay, this is the barrier request in progress, just | ||
545 | * record the error; | ||
546 | */ | ||
547 | if (error && !q->orderr) | ||
548 | q->orderr = error; | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /** | ||
553 | * blk_queue_bounce_limit - set bounce buffer limit for queue | ||
554 | * @q: the request queue for the device | ||
555 | * @dma_addr: bus address limit | ||
556 | * | ||
557 | * Description: | ||
558 | * Different hardware can have different requirements as to what pages | ||
559 | * it can do I/O directly to. A low level driver can call | ||
560 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | ||
561 | * buffers for doing I/O to pages residing above @page. | ||
562 | **/ | ||
563 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | ||
564 | { | ||
565 | unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; | ||
566 | int dma = 0; | ||
567 | |||
568 | q->bounce_gfp = GFP_NOIO; | ||
569 | #if BITS_PER_LONG == 64 | ||
570 | /* Assume anything <= 4GB can be handled by IOMMU. | ||
571 | Actually some IOMMUs can handle everything, but I don't | ||
572 | know of a way to test this here. */ | ||
573 | if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | ||
574 | dma = 1; | ||
575 | q->bounce_pfn = max_low_pfn; | ||
576 | #else | ||
577 | if (bounce_pfn < blk_max_low_pfn) | ||
578 | dma = 1; | ||
579 | q->bounce_pfn = bounce_pfn; | ||
580 | #endif | ||
581 | if (dma) { | ||
582 | init_emergency_isa_pool(); | ||
583 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | ||
584 | q->bounce_pfn = bounce_pfn; | ||
585 | } | ||
586 | } | ||
587 | |||
588 | EXPORT_SYMBOL(blk_queue_bounce_limit); | ||
589 | |||
590 | /** | ||
591 | * blk_queue_max_sectors - set max sectors for a request for this queue | ||
592 | * @q: the request queue for the device | ||
593 | * @max_sectors: max sectors in the usual 512b unit | ||
594 | * | ||
595 | * Description: | ||
596 | * Enables a low level driver to set an upper limit on the size of | ||
597 | * received requests. | ||
598 | **/ | ||
599 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | ||
600 | { | ||
601 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | ||
602 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | ||
603 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); | ||
604 | } | ||
605 | |||
606 | if (BLK_DEF_MAX_SECTORS > max_sectors) | ||
607 | q->max_hw_sectors = q->max_sectors = max_sectors; | ||
608 | else { | ||
609 | q->max_sectors = BLK_DEF_MAX_SECTORS; | ||
610 | q->max_hw_sectors = max_sectors; | ||
611 | } | ||
612 | } | ||
613 | |||
614 | EXPORT_SYMBOL(blk_queue_max_sectors); | ||
615 | |||
616 | /** | ||
617 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | ||
618 | * @q: the request queue for the device | ||
619 | * @max_segments: max number of segments | ||
620 | * | ||
621 | * Description: | ||
622 | * Enables a low level driver to set an upper limit on the number of | ||
623 | * physical data segments in a request. This would be the largest sized | ||
624 | * scatter list the driver could handle. | ||
625 | **/ | ||
626 | void blk_queue_max_phys_segments(struct request_queue *q, | ||
627 | unsigned short max_segments) | ||
628 | { | ||
629 | if (!max_segments) { | ||
630 | max_segments = 1; | ||
631 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | ||
632 | } | ||
633 | |||
634 | q->max_phys_segments = max_segments; | ||
635 | } | ||
636 | |||
637 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | ||
638 | |||
639 | /** | ||
640 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | ||
641 | * @q: the request queue for the device | ||
642 | * @max_segments: max number of segments | ||
643 | * | ||
644 | * Description: | ||
645 | * Enables a low level driver to set an upper limit on the number of | ||
646 | * hw data segments in a request. This would be the largest number of | ||
647 | * address/length pairs the host adapter can actually give as once | ||
648 | * to the device. | ||
649 | **/ | ||
650 | void blk_queue_max_hw_segments(struct request_queue *q, | ||
651 | unsigned short max_segments) | ||
652 | { | ||
653 | if (!max_segments) { | ||
654 | max_segments = 1; | ||
655 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | ||
656 | } | ||
657 | |||
658 | q->max_hw_segments = max_segments; | ||
659 | } | ||
660 | |||
661 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | ||
662 | |||
663 | /** | ||
664 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | ||
665 | * @q: the request queue for the device | ||
666 | * @max_size: max size of segment in bytes | ||
667 | * | ||
668 | * Description: | ||
669 | * Enables a low level driver to set an upper limit on the size of a | ||
670 | * coalesced segment | ||
671 | **/ | ||
672 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | ||
673 | { | ||
674 | if (max_size < PAGE_CACHE_SIZE) { | ||
675 | max_size = PAGE_CACHE_SIZE; | ||
676 | printk("%s: set to minimum %d\n", __FUNCTION__, max_size); | ||
677 | } | ||
678 | |||
679 | q->max_segment_size = max_size; | ||
680 | } | ||
681 | |||
682 | EXPORT_SYMBOL(blk_queue_max_segment_size); | ||
683 | |||
684 | /** | ||
685 | * blk_queue_hardsect_size - set hardware sector size for the queue | ||
686 | * @q: the request queue for the device | ||
687 | * @size: the hardware sector size, in bytes | ||
688 | * | ||
689 | * Description: | ||
690 | * This should typically be set to the lowest possible sector size | ||
691 | * that the hardware can operate on (possible without reverting to | ||
692 | * even internal read-modify-write operations). Usually the default | ||
693 | * of 512 covers most hardware. | ||
694 | **/ | ||
695 | void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) | ||
696 | { | ||
697 | q->hardsect_size = size; | ||
698 | } | ||
699 | |||
700 | EXPORT_SYMBOL(blk_queue_hardsect_size); | ||
701 | |||
702 | /* | ||
703 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
704 | */ | ||
705 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
706 | |||
707 | /** | ||
708 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | ||
709 | * @t: the stacking driver (top) | ||
710 | * @b: the underlying device (bottom) | ||
711 | **/ | ||
712 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | ||
713 | { | ||
714 | /* zero is "infinity" */ | ||
715 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); | ||
716 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); | ||
717 | |||
718 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); | ||
719 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | ||
720 | t->max_segment_size = min(t->max_segment_size,b->max_segment_size); | ||
721 | t->hardsect_size = max(t->hardsect_size,b->hardsect_size); | ||
722 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | ||
723 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | ||
724 | } | ||
725 | |||
726 | EXPORT_SYMBOL(blk_queue_stack_limits); | ||
727 | |||
728 | /** | ||
729 | * blk_queue_segment_boundary - set boundary rules for segment merging | ||
730 | * @q: the request queue for the device | ||
731 | * @mask: the memory boundary mask | ||
732 | **/ | ||
733 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | ||
734 | { | ||
735 | if (mask < PAGE_CACHE_SIZE - 1) { | ||
736 | mask = PAGE_CACHE_SIZE - 1; | ||
737 | printk("%s: set to minimum %lx\n", __FUNCTION__, mask); | ||
738 | } | ||
739 | |||
740 | q->seg_boundary_mask = mask; | ||
741 | } | ||
742 | |||
743 | EXPORT_SYMBOL(blk_queue_segment_boundary); | ||
744 | |||
745 | /** | ||
746 | * blk_queue_dma_alignment - set dma length and memory alignment | ||
747 | * @q: the request queue for the device | ||
748 | * @mask: alignment mask | ||
749 | * | ||
750 | * description: | ||
751 | * set required memory and length aligment for direct dma transactions. | ||
752 | * this is used when buiding direct io requests for the queue. | ||
753 | * | ||
754 | **/ | ||
755 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | ||
756 | { | ||
757 | q->dma_alignment = mask; | ||
758 | } | ||
759 | |||
760 | EXPORT_SYMBOL(blk_queue_dma_alignment); | ||
761 | |||
762 | /** | ||
763 | * blk_queue_find_tag - find a request by its tag and queue | ||
764 | * @q: The request queue for the device | ||
765 | * @tag: The tag of the request | ||
766 | * | ||
767 | * Notes: | ||
768 | * Should be used when a device returns a tag and you want to match | ||
769 | * it with a request. | ||
770 | * | ||
771 | * no locks need be held. | ||
772 | **/ | ||
773 | struct request *blk_queue_find_tag(struct request_queue *q, int tag) | ||
774 | { | ||
775 | return blk_map_queue_find_tag(q->queue_tags, tag); | ||
776 | } | ||
777 | |||
778 | EXPORT_SYMBOL(blk_queue_find_tag); | ||
779 | |||
780 | /** | ||
781 | * __blk_free_tags - release a given set of tag maintenance info | ||
782 | * @bqt: the tag map to free | ||
783 | * | ||
784 | * Tries to free the specified @bqt@. Returns true if it was | ||
785 | * actually freed and false if there are still references using it | ||
786 | */ | ||
787 | static int __blk_free_tags(struct blk_queue_tag *bqt) | ||
788 | { | ||
789 | int retval; | ||
790 | |||
791 | retval = atomic_dec_and_test(&bqt->refcnt); | ||
792 | if (retval) { | ||
793 | BUG_ON(bqt->busy); | ||
794 | BUG_ON(!list_empty(&bqt->busy_list)); | ||
795 | |||
796 | kfree(bqt->tag_index); | ||
797 | bqt->tag_index = NULL; | ||
798 | |||
799 | kfree(bqt->tag_map); | ||
800 | bqt->tag_map = NULL; | ||
801 | |||
802 | kfree(bqt); | ||
803 | |||
804 | } | ||
805 | |||
806 | return retval; | ||
807 | } | ||
808 | |||
809 | /** | ||
810 | * __blk_queue_free_tags - release tag maintenance info | ||
811 | * @q: the request queue for the device | ||
812 | * | ||
813 | * Notes: | ||
814 | * blk_cleanup_queue() will take care of calling this function, if tagging | ||
815 | * has been used. So there's no need to call this directly. | ||
816 | **/ | ||
817 | static void __blk_queue_free_tags(struct request_queue *q) | ||
818 | { | ||
819 | struct blk_queue_tag *bqt = q->queue_tags; | ||
820 | |||
821 | if (!bqt) | ||
822 | return; | ||
823 | |||
824 | __blk_free_tags(bqt); | ||
825 | |||
826 | q->queue_tags = NULL; | ||
827 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | ||
828 | } | ||
829 | |||
830 | |||
831 | /** | ||
832 | * blk_free_tags - release a given set of tag maintenance info | ||
833 | * @bqt: the tag map to free | ||
834 | * | ||
835 | * For externally managed @bqt@ frees the map. Callers of this | ||
836 | * function must guarantee to have released all the queues that | ||
837 | * might have been using this tag map. | ||
838 | */ | ||
839 | void blk_free_tags(struct blk_queue_tag *bqt) | ||
840 | { | ||
841 | if (unlikely(!__blk_free_tags(bqt))) | ||
842 | BUG(); | ||
843 | } | ||
844 | EXPORT_SYMBOL(blk_free_tags); | ||
845 | |||
846 | /** | ||
847 | * blk_queue_free_tags - release tag maintenance info | ||
848 | * @q: the request queue for the device | ||
849 | * | ||
850 | * Notes: | ||
851 | * This is used to disabled tagged queuing to a device, yet leave | ||
852 | * queue in function. | ||
853 | **/ | ||
854 | void blk_queue_free_tags(struct request_queue *q) | ||
855 | { | ||
856 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | ||
857 | } | ||
858 | |||
859 | EXPORT_SYMBOL(blk_queue_free_tags); | ||
860 | |||
861 | static int | ||
862 | init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) | ||
863 | { | ||
864 | struct request **tag_index; | ||
865 | unsigned long *tag_map; | ||
866 | int nr_ulongs; | ||
867 | |||
868 | if (q && depth > q->nr_requests * 2) { | ||
869 | depth = q->nr_requests * 2; | ||
870 | printk(KERN_ERR "%s: adjusted depth to %d\n", | ||
871 | __FUNCTION__, depth); | ||
872 | } | ||
873 | |||
874 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); | ||
875 | if (!tag_index) | ||
876 | goto fail; | ||
877 | |||
878 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
879 | tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | ||
880 | if (!tag_map) | ||
881 | goto fail; | ||
882 | |||
883 | tags->real_max_depth = depth; | ||
884 | tags->max_depth = depth; | ||
885 | tags->tag_index = tag_index; | ||
886 | tags->tag_map = tag_map; | ||
887 | |||
888 | return 0; | ||
889 | fail: | ||
890 | kfree(tag_index); | ||
891 | return -ENOMEM; | ||
892 | } | ||
893 | |||
894 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | ||
895 | int depth) | ||
896 | { | ||
897 | struct blk_queue_tag *tags; | ||
898 | |||
899 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | ||
900 | if (!tags) | ||
901 | goto fail; | ||
902 | |||
903 | if (init_tag_map(q, tags, depth)) | ||
904 | goto fail; | ||
905 | |||
906 | INIT_LIST_HEAD(&tags->busy_list); | ||
907 | tags->busy = 0; | ||
908 | atomic_set(&tags->refcnt, 1); | ||
909 | return tags; | ||
910 | fail: | ||
911 | kfree(tags); | ||
912 | return NULL; | ||
913 | } | ||
914 | |||
915 | /** | ||
916 | * blk_init_tags - initialize the tag info for an external tag map | ||
917 | * @depth: the maximum queue depth supported | ||
918 | * @tags: the tag to use | ||
919 | **/ | ||
920 | struct blk_queue_tag *blk_init_tags(int depth) | ||
921 | { | ||
922 | return __blk_queue_init_tags(NULL, depth); | ||
923 | } | ||
924 | EXPORT_SYMBOL(blk_init_tags); | ||
925 | |||
926 | /** | ||
927 | * blk_queue_init_tags - initialize the queue tag info | ||
928 | * @q: the request queue for the device | ||
929 | * @depth: the maximum queue depth supported | ||
930 | * @tags: the tag to use | ||
931 | **/ | ||
932 | int blk_queue_init_tags(struct request_queue *q, int depth, | ||
933 | struct blk_queue_tag *tags) | ||
934 | { | ||
935 | int rc; | ||
936 | |||
937 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | ||
938 | |||
939 | if (!tags && !q->queue_tags) { | ||
940 | tags = __blk_queue_init_tags(q, depth); | ||
941 | |||
942 | if (!tags) | ||
943 | goto fail; | ||
944 | } else if (q->queue_tags) { | ||
945 | if ((rc = blk_queue_resize_tags(q, depth))) | ||
946 | return rc; | ||
947 | set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | ||
948 | return 0; | ||
949 | } else | ||
950 | atomic_inc(&tags->refcnt); | ||
951 | |||
952 | /* | ||
953 | * assign it, all done | ||
954 | */ | ||
955 | q->queue_tags = tags; | ||
956 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | ||
957 | return 0; | ||
958 | fail: | ||
959 | kfree(tags); | ||
960 | return -ENOMEM; | ||
961 | } | ||
962 | |||
963 | EXPORT_SYMBOL(blk_queue_init_tags); | ||
964 | |||
965 | /** | ||
966 | * blk_queue_resize_tags - change the queueing depth | ||
967 | * @q: the request queue for the device | ||
968 | * @new_depth: the new max command queueing depth | ||
969 | * | ||
970 | * Notes: | ||
971 | * Must be called with the queue lock held. | ||
972 | **/ | ||
973 | int blk_queue_resize_tags(struct request_queue *q, int new_depth) | ||
974 | { | ||
975 | struct blk_queue_tag *bqt = q->queue_tags; | ||
976 | struct request **tag_index; | ||
977 | unsigned long *tag_map; | ||
978 | int max_depth, nr_ulongs; | ||
979 | |||
980 | if (!bqt) | ||
981 | return -ENXIO; | ||
982 | |||
983 | /* | ||
984 | * if we already have large enough real_max_depth. just | ||
985 | * adjust max_depth. *NOTE* as requests with tag value | ||
986 | * between new_depth and real_max_depth can be in-flight, tag | ||
987 | * map can not be shrunk blindly here. | ||
988 | */ | ||
989 | if (new_depth <= bqt->real_max_depth) { | ||
990 | bqt->max_depth = new_depth; | ||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | /* | ||
995 | * Currently cannot replace a shared tag map with a new | ||
996 | * one, so error out if this is the case | ||
997 | */ | ||
998 | if (atomic_read(&bqt->refcnt) != 1) | ||
999 | return -EBUSY; | ||
1000 | |||
1001 | /* | ||
1002 | * save the old state info, so we can copy it back | ||
1003 | */ | ||
1004 | tag_index = bqt->tag_index; | ||
1005 | tag_map = bqt->tag_map; | ||
1006 | max_depth = bqt->real_max_depth; | ||
1007 | |||
1008 | if (init_tag_map(q, bqt, new_depth)) | ||
1009 | return -ENOMEM; | ||
1010 | |||
1011 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); | ||
1012 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; | ||
1013 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); | ||
1014 | |||
1015 | kfree(tag_index); | ||
1016 | kfree(tag_map); | ||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | EXPORT_SYMBOL(blk_queue_resize_tags); | ||
1021 | |||
1022 | /** | ||
1023 | * blk_queue_end_tag - end tag operations for a request | ||
1024 | * @q: the request queue for the device | ||
1025 | * @rq: the request that has completed | ||
1026 | * | ||
1027 | * Description: | ||
1028 | * Typically called when end_that_request_first() returns 0, meaning | ||
1029 | * all transfers have been done for a request. It's important to call | ||
1030 | * this function before end_that_request_last(), as that will put the | ||
1031 | * request back on the free list thus corrupting the internal tag list. | ||
1032 | * | ||
1033 | * Notes: | ||
1034 | * queue lock must be held. | ||
1035 | **/ | ||
1036 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) | ||
1037 | { | ||
1038 | struct blk_queue_tag *bqt = q->queue_tags; | ||
1039 | int tag = rq->tag; | ||
1040 | |||
1041 | BUG_ON(tag == -1); | ||
1042 | |||
1043 | if (unlikely(tag >= bqt->real_max_depth)) | ||
1044 | /* | ||
1045 | * This can happen after tag depth has been reduced. | ||
1046 | * FIXME: how about a warning or info message here? | ||
1047 | */ | ||
1048 | return; | ||
1049 | |||
1050 | list_del_init(&rq->queuelist); | ||
1051 | rq->cmd_flags &= ~REQ_QUEUED; | ||
1052 | rq->tag = -1; | ||
1053 | |||
1054 | if (unlikely(bqt->tag_index[tag] == NULL)) | ||
1055 | printk(KERN_ERR "%s: tag %d is missing\n", | ||
1056 | __FUNCTION__, tag); | ||
1057 | |||
1058 | bqt->tag_index[tag] = NULL; | ||
1059 | |||
1060 | /* | ||
1061 | * We use test_and_clear_bit's memory ordering properties here. | ||
1062 | * The tag_map bit acts as a lock for tag_index[bit], so we need | ||
1063 | * a barrer before clearing the bit (precisely: release semantics). | ||
1064 | * Could use clear_bit_unlock when it is merged. | ||
1065 | */ | ||
1066 | if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { | ||
1067 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | ||
1068 | __FUNCTION__, tag); | ||
1069 | return; | ||
1070 | } | ||
1071 | |||
1072 | bqt->busy--; | ||
1073 | } | ||
1074 | |||
1075 | EXPORT_SYMBOL(blk_queue_end_tag); | ||
1076 | |||
1077 | /** | ||
1078 | * blk_queue_start_tag - find a free tag and assign it | ||
1079 | * @q: the request queue for the device | ||
1080 | * @rq: the block request that needs tagging | ||
1081 | * | ||
1082 | * Description: | ||
1083 | * This can either be used as a stand-alone helper, or possibly be | ||
1084 | * assigned as the queue &prep_rq_fn (in which case &struct request | ||
1085 | * automagically gets a tag assigned). Note that this function | ||
1086 | * assumes that any type of request can be queued! if this is not | ||
1087 | * true for your device, you must check the request type before | ||
1088 | * calling this function. The request will also be removed from | ||
1089 | * the request queue, so it's the drivers responsibility to readd | ||
1090 | * it if it should need to be restarted for some reason. | ||
1091 | * | ||
1092 | * Notes: | ||
1093 | * queue lock must be held. | ||
1094 | **/ | ||
1095 | int blk_queue_start_tag(struct request_queue *q, struct request *rq) | ||
1096 | { | ||
1097 | struct blk_queue_tag *bqt = q->queue_tags; | ||
1098 | int tag; | ||
1099 | |||
1100 | if (unlikely((rq->cmd_flags & REQ_QUEUED))) { | ||
1101 | printk(KERN_ERR | ||
1102 | "%s: request %p for device [%s] already tagged %d", | ||
1103 | __FUNCTION__, rq, | ||
1104 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | ||
1105 | BUG(); | ||
1106 | } | ||
1107 | |||
1108 | /* | ||
1109 | * Protect against shared tag maps, as we may not have exclusive | ||
1110 | * access to the tag map. | ||
1111 | */ | ||
1112 | do { | ||
1113 | tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); | ||
1114 | if (tag >= bqt->max_depth) | ||
1115 | return 1; | ||
1116 | |||
1117 | } while (test_and_set_bit(tag, bqt->tag_map)); | ||
1118 | /* | ||
1119 | * We rely on test_and_set_bit providing lock memory ordering semantics | ||
1120 | * (could use test_and_set_bit_lock when it is merged). | ||
1121 | */ | ||
1122 | |||
1123 | rq->cmd_flags |= REQ_QUEUED; | ||
1124 | rq->tag = tag; | ||
1125 | bqt->tag_index[tag] = rq; | ||
1126 | blkdev_dequeue_request(rq); | ||
1127 | list_add(&rq->queuelist, &bqt->busy_list); | ||
1128 | bqt->busy++; | ||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1132 | EXPORT_SYMBOL(blk_queue_start_tag); | ||
1133 | |||
1134 | /** | ||
1135 | * blk_queue_invalidate_tags - invalidate all pending tags | ||
1136 | * @q: the request queue for the device | ||
1137 | * | ||
1138 | * Description: | ||
1139 | * Hardware conditions may dictate a need to stop all pending requests. | ||
1140 | * In this case, we will safely clear the block side of the tag queue and | ||
1141 | * readd all requests to the request queue in the right order. | ||
1142 | * | ||
1143 | * Notes: | ||
1144 | * queue lock must be held. | ||
1145 | **/ | ||
1146 | void blk_queue_invalidate_tags(struct request_queue *q) | ||
1147 | { | ||
1148 | struct blk_queue_tag *bqt = q->queue_tags; | ||
1149 | struct list_head *tmp, *n; | ||
1150 | struct request *rq; | ||
1151 | |||
1152 | list_for_each_safe(tmp, n, &bqt->busy_list) { | ||
1153 | rq = list_entry_rq(tmp); | ||
1154 | |||
1155 | if (rq->tag == -1) { | ||
1156 | printk(KERN_ERR | ||
1157 | "%s: bad tag found on list\n", __FUNCTION__); | ||
1158 | list_del_init(&rq->queuelist); | ||
1159 | rq->cmd_flags &= ~REQ_QUEUED; | ||
1160 | } else | ||
1161 | blk_queue_end_tag(q, rq); | ||
1162 | |||
1163 | rq->cmd_flags &= ~REQ_STARTED; | ||
1164 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | ||
1165 | } | ||
1166 | } | ||
1167 | |||
1168 | EXPORT_SYMBOL(blk_queue_invalidate_tags); | ||
1169 | |||
1170 | void blk_dump_rq_flags(struct request *rq, char *msg) | ||
1171 | { | ||
1172 | int bit; | ||
1173 | |||
1174 | printk("%s: dev %s: type=%x, flags=%x\n", msg, | ||
1175 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, | ||
1176 | rq->cmd_flags); | ||
1177 | |||
1178 | printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, | ||
1179 | rq->nr_sectors, | ||
1180 | rq->current_nr_sectors); | ||
1181 | printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); | ||
1182 | |||
1183 | if (blk_pc_request(rq)) { | ||
1184 | printk("cdb: "); | ||
1185 | for (bit = 0; bit < sizeof(rq->cmd); bit++) | ||
1186 | printk("%02x ", rq->cmd[bit]); | ||
1187 | printk("\n"); | ||
1188 | } | ||
1189 | } | ||
1190 | |||
1191 | EXPORT_SYMBOL(blk_dump_rq_flags); | ||
1192 | |||
1193 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | ||
1194 | { | ||
1195 | struct request rq; | ||
1196 | struct bio *nxt = bio->bi_next; | ||
1197 | rq.q = q; | ||
1198 | rq.bio = rq.biotail = bio; | ||
1199 | bio->bi_next = NULL; | ||
1200 | blk_recalc_rq_segments(&rq); | ||
1201 | bio->bi_next = nxt; | ||
1202 | bio->bi_phys_segments = rq.nr_phys_segments; | ||
1203 | bio->bi_hw_segments = rq.nr_hw_segments; | ||
1204 | bio->bi_flags |= (1 << BIO_SEG_VALID); | ||
1205 | } | ||
1206 | EXPORT_SYMBOL(blk_recount_segments); | ||
1207 | |||
1208 | static void blk_recalc_rq_segments(struct request *rq) | ||
1209 | { | ||
1210 | int nr_phys_segs; | ||
1211 | int nr_hw_segs; | ||
1212 | unsigned int phys_size; | ||
1213 | unsigned int hw_size; | ||
1214 | struct bio_vec *bv, *bvprv = NULL; | ||
1215 | int seg_size; | ||
1216 | int hw_seg_size; | ||
1217 | int cluster; | ||
1218 | struct req_iterator iter; | ||
1219 | int high, highprv = 1; | ||
1220 | struct request_queue *q = rq->q; | ||
1221 | |||
1222 | if (!rq->bio) | ||
1223 | return; | ||
1224 | |||
1225 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | ||
1226 | hw_seg_size = seg_size = 0; | ||
1227 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | ||
1228 | rq_for_each_segment(bv, rq, iter) { | ||
1229 | /* | ||
1230 | * the trick here is making sure that a high page is never | ||
1231 | * considered part of another segment, since that might | ||
1232 | * change with the bounce page. | ||
1233 | */ | ||
1234 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | ||
1235 | if (high || highprv) | ||
1236 | goto new_hw_segment; | ||
1237 | if (cluster) { | ||
1238 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
1239 | goto new_segment; | ||
1240 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
1241 | goto new_segment; | ||
1242 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
1243 | goto new_segment; | ||
1244 | if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | ||
1245 | goto new_hw_segment; | ||
1246 | |||
1247 | seg_size += bv->bv_len; | ||
1248 | hw_seg_size += bv->bv_len; | ||
1249 | bvprv = bv; | ||
1250 | continue; | ||
1251 | } | ||
1252 | new_segment: | ||
1253 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && | ||
1254 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | ||
1255 | hw_seg_size += bv->bv_len; | ||
1256 | else { | ||
1257 | new_hw_segment: | ||
1258 | if (nr_hw_segs == 1 && | ||
1259 | hw_seg_size > rq->bio->bi_hw_front_size) | ||
1260 | rq->bio->bi_hw_front_size = hw_seg_size; | ||
1261 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; | ||
1262 | nr_hw_segs++; | ||
1263 | } | ||
1264 | |||
1265 | nr_phys_segs++; | ||
1266 | bvprv = bv; | ||
1267 | seg_size = bv->bv_len; | ||
1268 | highprv = high; | ||
1269 | } | ||
1270 | |||
1271 | if (nr_hw_segs == 1 && | ||
1272 | hw_seg_size > rq->bio->bi_hw_front_size) | ||
1273 | rq->bio->bi_hw_front_size = hw_seg_size; | ||
1274 | if (hw_seg_size > rq->biotail->bi_hw_back_size) | ||
1275 | rq->biotail->bi_hw_back_size = hw_seg_size; | ||
1276 | rq->nr_phys_segments = nr_phys_segs; | ||
1277 | rq->nr_hw_segments = nr_hw_segs; | ||
1278 | } | ||
1279 | |||
1280 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | ||
1281 | struct bio *nxt) | ||
1282 | { | ||
1283 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | ||
1284 | return 0; | ||
1285 | |||
1286 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | ||
1287 | return 0; | ||
1288 | if (bio->bi_size + nxt->bi_size > q->max_segment_size) | ||
1289 | return 0; | ||
1290 | |||
1291 | /* | ||
1292 | * bio and nxt are contigous in memory, check if the queue allows | ||
1293 | * these two to be merged into one | ||
1294 | */ | ||
1295 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | ||
1296 | return 1; | ||
1297 | |||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | ||
1302 | struct bio *nxt) | ||
1303 | { | ||
1304 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
1305 | blk_recount_segments(q, bio); | ||
1306 | if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) | ||
1307 | blk_recount_segments(q, nxt); | ||
1308 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || | ||
1309 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) | ||
1310 | return 0; | ||
1311 | if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) | ||
1312 | return 0; | ||
1313 | |||
1314 | return 1; | ||
1315 | } | ||
1316 | |||
1317 | /* | ||
1318 | * map a request to scatterlist, return number of sg entries setup. Caller | ||
1319 | * must make sure sg can hold rq->nr_phys_segments entries | ||
1320 | */ | ||
1321 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | ||
1322 | struct scatterlist *sglist) | ||
1323 | { | ||
1324 | struct bio_vec *bvec, *bvprv; | ||
1325 | struct req_iterator iter; | ||
1326 | struct scatterlist *sg; | ||
1327 | int nsegs, cluster; | ||
1328 | |||
1329 | nsegs = 0; | ||
1330 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | ||
1331 | |||
1332 | /* | ||
1333 | * for each bio in rq | ||
1334 | */ | ||
1335 | bvprv = NULL; | ||
1336 | sg = NULL; | ||
1337 | rq_for_each_segment(bvec, rq, iter) { | ||
1338 | int nbytes = bvec->bv_len; | ||
1339 | |||
1340 | if (bvprv && cluster) { | ||
1341 | if (sg->length + nbytes > q->max_segment_size) | ||
1342 | goto new_segment; | ||
1343 | |||
1344 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | ||
1345 | goto new_segment; | ||
1346 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | ||
1347 | goto new_segment; | ||
1348 | |||
1349 | sg->length += nbytes; | ||
1350 | } else { | ||
1351 | new_segment: | ||
1352 | if (!sg) | ||
1353 | sg = sglist; | ||
1354 | else { | ||
1355 | /* | ||
1356 | * If the driver previously mapped a shorter | ||
1357 | * list, we could see a termination bit | ||
1358 | * prematurely unless it fully inits the sg | ||
1359 | * table on each mapping. We KNOW that there | ||
1360 | * must be more entries here or the driver | ||
1361 | * would be buggy, so force clear the | ||
1362 | * termination bit to avoid doing a full | ||
1363 | * sg_init_table() in drivers for each command. | ||
1364 | */ | ||
1365 | sg->page_link &= ~0x02; | ||
1366 | sg = sg_next(sg); | ||
1367 | } | ||
1368 | |||
1369 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); | ||
1370 | nsegs++; | ||
1371 | } | ||
1372 | bvprv = bvec; | ||
1373 | } /* segments in rq */ | ||
1374 | |||
1375 | if (sg) | ||
1376 | __sg_mark_end(sg); | ||
1377 | |||
1378 | return nsegs; | ||
1379 | } | ||
1380 | |||
1381 | EXPORT_SYMBOL(blk_rq_map_sg); | ||
1382 | |||
1383 | /* | ||
1384 | * the standard queue merge functions, can be overridden with device | ||
1385 | * specific ones if so desired | ||
1386 | */ | ||
1387 | |||
1388 | static inline int ll_new_mergeable(struct request_queue *q, | ||
1389 | struct request *req, | ||
1390 | struct bio *bio) | ||
1391 | { | ||
1392 | int nr_phys_segs = bio_phys_segments(q, bio); | ||
1393 | |||
1394 | if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | ||
1395 | req->cmd_flags |= REQ_NOMERGE; | ||
1396 | if (req == q->last_merge) | ||
1397 | q->last_merge = NULL; | ||
1398 | return 0; | ||
1399 | } | ||
1400 | |||
1401 | /* | ||
1402 | * A hw segment is just getting larger, bump just the phys | ||
1403 | * counter. | ||
1404 | */ | ||
1405 | req->nr_phys_segments += nr_phys_segs; | ||
1406 | return 1; | ||
1407 | } | ||
1408 | |||
1409 | static inline int ll_new_hw_segment(struct request_queue *q, | ||
1410 | struct request *req, | ||
1411 | struct bio *bio) | ||
1412 | { | ||
1413 | int nr_hw_segs = bio_hw_segments(q, bio); | ||
1414 | int nr_phys_segs = bio_phys_segments(q, bio); | ||
1415 | |||
1416 | if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments | ||
1417 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | ||
1418 | req->cmd_flags |= REQ_NOMERGE; | ||
1419 | if (req == q->last_merge) | ||
1420 | q->last_merge = NULL; | ||
1421 | return 0; | ||
1422 | } | ||
1423 | |||
1424 | /* | ||
1425 | * This will form the start of a new hw segment. Bump both | ||
1426 | * counters. | ||
1427 | */ | ||
1428 | req->nr_hw_segments += nr_hw_segs; | ||
1429 | req->nr_phys_segments += nr_phys_segs; | ||
1430 | return 1; | ||
1431 | } | ||
1432 | |||
1433 | static int ll_back_merge_fn(struct request_queue *q, struct request *req, | ||
1434 | struct bio *bio) | ||
1435 | { | ||
1436 | unsigned short max_sectors; | ||
1437 | int len; | ||
1438 | |||
1439 | if (unlikely(blk_pc_request(req))) | ||
1440 | max_sectors = q->max_hw_sectors; | ||
1441 | else | ||
1442 | max_sectors = q->max_sectors; | ||
1443 | |||
1444 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | ||
1445 | req->cmd_flags |= REQ_NOMERGE; | ||
1446 | if (req == q->last_merge) | ||
1447 | q->last_merge = NULL; | ||
1448 | return 0; | ||
1449 | } | ||
1450 | if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) | ||
1451 | blk_recount_segments(q, req->biotail); | ||
1452 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
1453 | blk_recount_segments(q, bio); | ||
1454 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; | ||
1455 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && | ||
1456 | !BIOVEC_VIRT_OVERSIZE(len)) { | ||
1457 | int mergeable = ll_new_mergeable(q, req, bio); | ||
1458 | |||
1459 | if (mergeable) { | ||
1460 | if (req->nr_hw_segments == 1) | ||
1461 | req->bio->bi_hw_front_size = len; | ||
1462 | if (bio->bi_hw_segments == 1) | ||
1463 | bio->bi_hw_back_size = len; | ||
1464 | } | ||
1465 | return mergeable; | ||
1466 | } | ||
1467 | |||
1468 | return ll_new_hw_segment(q, req, bio); | ||
1469 | } | ||
1470 | |||
1471 | static int ll_front_merge_fn(struct request_queue *q, struct request *req, | ||
1472 | struct bio *bio) | ||
1473 | { | ||
1474 | unsigned short max_sectors; | ||
1475 | int len; | ||
1476 | |||
1477 | if (unlikely(blk_pc_request(req))) | ||
1478 | max_sectors = q->max_hw_sectors; | ||
1479 | else | ||
1480 | max_sectors = q->max_sectors; | ||
1481 | |||
1482 | |||
1483 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | ||
1484 | req->cmd_flags |= REQ_NOMERGE; | ||
1485 | if (req == q->last_merge) | ||
1486 | q->last_merge = NULL; | ||
1487 | return 0; | ||
1488 | } | ||
1489 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; | ||
1490 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | ||
1491 | blk_recount_segments(q, bio); | ||
1492 | if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) | ||
1493 | blk_recount_segments(q, req->bio); | ||
1494 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && | ||
1495 | !BIOVEC_VIRT_OVERSIZE(len)) { | ||
1496 | int mergeable = ll_new_mergeable(q, req, bio); | ||
1497 | |||
1498 | if (mergeable) { | ||
1499 | if (bio->bi_hw_segments == 1) | ||
1500 | bio->bi_hw_front_size = len; | ||
1501 | if (req->nr_hw_segments == 1) | ||
1502 | req->biotail->bi_hw_back_size = len; | ||
1503 | } | ||
1504 | return mergeable; | ||
1505 | } | ||
1506 | |||
1507 | return ll_new_hw_segment(q, req, bio); | ||
1508 | } | ||
1509 | |||
1510 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | ||
1511 | struct request *next) | ||
1512 | { | ||
1513 | int total_phys_segments; | ||
1514 | int total_hw_segments; | ||
1515 | |||
1516 | /* | ||
1517 | * First check if the either of the requests are re-queued | ||
1518 | * requests. Can't merge them if they are. | ||
1519 | */ | ||
1520 | if (req->special || next->special) | ||
1521 | return 0; | ||
1522 | |||
1523 | /* | ||
1524 | * Will it become too large? | ||
1525 | */ | ||
1526 | if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) | ||
1527 | return 0; | ||
1528 | |||
1529 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | ||
1530 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) | ||
1531 | total_phys_segments--; | ||
1532 | |||
1533 | if (total_phys_segments > q->max_phys_segments) | ||
1534 | return 0; | ||
1535 | |||
1536 | total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; | ||
1537 | if (blk_hw_contig_segment(q, req->biotail, next->bio)) { | ||
1538 | int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; | ||
1539 | /* | ||
1540 | * propagate the combined length to the end of the requests | ||
1541 | */ | ||
1542 | if (req->nr_hw_segments == 1) | ||
1543 | req->bio->bi_hw_front_size = len; | ||
1544 | if (next->nr_hw_segments == 1) | ||
1545 | next->biotail->bi_hw_back_size = len; | ||
1546 | total_hw_segments--; | ||
1547 | } | ||
1548 | |||
1549 | if (total_hw_segments > q->max_hw_segments) | ||
1550 | return 0; | ||
1551 | |||
1552 | /* Merge is OK... */ | ||
1553 | req->nr_phys_segments = total_phys_segments; | ||
1554 | req->nr_hw_segments = total_hw_segments; | ||
1555 | return 1; | ||
1556 | } | ||
1557 | |||
1558 | /* | ||
1559 | * "plug" the device if there are no outstanding requests: this will | ||
1560 | * force the transfer to start only after we have put all the requests | ||
1561 | * on the list. | ||
1562 | * | ||
1563 | * This is called with interrupts off and no requests on the queue and | ||
1564 | * with the queue lock held. | ||
1565 | */ | ||
1566 | void blk_plug_device(struct request_queue *q) | ||
1567 | { | ||
1568 | WARN_ON(!irqs_disabled()); | ||
1569 | |||
1570 | /* | ||
1571 | * don't plug a stopped queue, it must be paired with blk_start_queue() | ||
1572 | * which will restart the queueing | ||
1573 | */ | ||
1574 | if (blk_queue_stopped(q)) | ||
1575 | return; | ||
1576 | |||
1577 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { | ||
1578 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | ||
1579 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | ||
1580 | } | ||
1581 | } | ||
1582 | |||
1583 | EXPORT_SYMBOL(blk_plug_device); | ||
1584 | |||
1585 | /* | ||
1586 | * remove the queue from the plugged list, if present. called with | ||
1587 | * queue lock held and interrupts disabled. | ||
1588 | */ | ||
1589 | int blk_remove_plug(struct request_queue *q) | ||
1590 | { | ||
1591 | WARN_ON(!irqs_disabled()); | ||
1592 | |||
1593 | if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | ||
1594 | return 0; | ||
1595 | |||
1596 | del_timer(&q->unplug_timer); | ||
1597 | return 1; | ||
1598 | } | ||
1599 | |||
1600 | EXPORT_SYMBOL(blk_remove_plug); | ||
1601 | |||
1602 | /* | ||
1603 | * remove the plug and let it rip.. | ||
1604 | */ | ||
1605 | void __generic_unplug_device(struct request_queue *q) | ||
1606 | { | ||
1607 | if (unlikely(blk_queue_stopped(q))) | ||
1608 | return; | ||
1609 | |||
1610 | if (!blk_remove_plug(q)) | ||
1611 | return; | ||
1612 | |||
1613 | q->request_fn(q); | ||
1614 | } | ||
1615 | EXPORT_SYMBOL(__generic_unplug_device); | ||
1616 | |||
1617 | /** | ||
1618 | * generic_unplug_device - fire a request queue | ||
1619 | * @q: The &struct request_queue in question | ||
1620 | * | ||
1621 | * Description: | ||
1622 | * Linux uses plugging to build bigger requests queues before letting | ||
1623 | * the device have at them. If a queue is plugged, the I/O scheduler | ||
1624 | * is still adding and merging requests on the queue. Once the queue | ||
1625 | * gets unplugged, the request_fn defined for the queue is invoked and | ||
1626 | * transfers started. | ||
1627 | **/ | ||
1628 | void generic_unplug_device(struct request_queue *q) | ||
1629 | { | ||
1630 | spin_lock_irq(q->queue_lock); | ||
1631 | __generic_unplug_device(q); | ||
1632 | spin_unlock_irq(q->queue_lock); | ||
1633 | } | ||
1634 | EXPORT_SYMBOL(generic_unplug_device); | ||
1635 | |||
1636 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | ||
1637 | struct page *page) | ||
1638 | { | ||
1639 | struct request_queue *q = bdi->unplug_io_data; | ||
1640 | |||
1641 | /* | ||
1642 | * devices don't necessarily have an ->unplug_fn defined | ||
1643 | */ | ||
1644 | if (q->unplug_fn) { | ||
1645 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1646 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1647 | |||
1648 | q->unplug_fn(q); | ||
1649 | } | ||
1650 | } | ||
1651 | |||
1652 | static void blk_unplug_work(struct work_struct *work) | ||
1653 | { | ||
1654 | struct request_queue *q = | ||
1655 | container_of(work, struct request_queue, unplug_work); | ||
1656 | |||
1657 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1658 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1659 | |||
1660 | q->unplug_fn(q); | ||
1661 | } | ||
1662 | |||
1663 | static void blk_unplug_timeout(unsigned long data) | ||
1664 | { | ||
1665 | struct request_queue *q = (struct request_queue *)data; | ||
1666 | |||
1667 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, | ||
1668 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1669 | |||
1670 | kblockd_schedule_work(&q->unplug_work); | ||
1671 | } | ||
1672 | |||
1673 | /** | ||
1674 | * blk_start_queue - restart a previously stopped queue | ||
1675 | * @q: The &struct request_queue in question | ||
1676 | * | ||
1677 | * Description: | ||
1678 | * blk_start_queue() will clear the stop flag on the queue, and call | ||
1679 | * the request_fn for the queue if it was in a stopped state when | ||
1680 | * entered. Also see blk_stop_queue(). Queue lock must be held. | ||
1681 | **/ | ||
1682 | void blk_start_queue(struct request_queue *q) | ||
1683 | { | ||
1684 | WARN_ON(!irqs_disabled()); | ||
1685 | |||
1686 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | ||
1687 | |||
1688 | /* | ||
1689 | * one level of recursion is ok and is much faster than kicking | ||
1690 | * the unplug handling | ||
1691 | */ | ||
1692 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | ||
1693 | q->request_fn(q); | ||
1694 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | ||
1695 | } else { | ||
1696 | blk_plug_device(q); | ||
1697 | kblockd_schedule_work(&q->unplug_work); | ||
1698 | } | ||
1699 | } | ||
1700 | |||
1701 | EXPORT_SYMBOL(blk_start_queue); | ||
1702 | |||
1703 | /** | ||
1704 | * blk_stop_queue - stop a queue | ||
1705 | * @q: The &struct request_queue in question | ||
1706 | * | ||
1707 | * Description: | ||
1708 | * The Linux block layer assumes that a block driver will consume all | ||
1709 | * entries on the request queue when the request_fn strategy is called. | ||
1710 | * Often this will not happen, because of hardware limitations (queue | ||
1711 | * depth settings). If a device driver gets a 'queue full' response, | ||
1712 | * or if it simply chooses not to queue more I/O at one point, it can | ||
1713 | * call this function to prevent the request_fn from being called until | ||
1714 | * the driver has signalled it's ready to go again. This happens by calling | ||
1715 | * blk_start_queue() to restart queue operations. Queue lock must be held. | ||
1716 | **/ | ||
1717 | void blk_stop_queue(struct request_queue *q) | ||
1718 | { | ||
1719 | blk_remove_plug(q); | ||
1720 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | ||
1721 | } | ||
1722 | EXPORT_SYMBOL(blk_stop_queue); | ||
1723 | |||
1724 | /** | ||
1725 | * blk_sync_queue - cancel any pending callbacks on a queue | ||
1726 | * @q: the queue | ||
1727 | * | ||
1728 | * Description: | ||
1729 | * The block layer may perform asynchronous callback activity | ||
1730 | * on a queue, such as calling the unplug function after a timeout. | ||
1731 | * A block device may call blk_sync_queue to ensure that any | ||
1732 | * such activity is cancelled, thus allowing it to release resources | ||
1733 | * that the callbacks might use. The caller must already have made sure | ||
1734 | * that its ->make_request_fn will not re-add plugging prior to calling | ||
1735 | * this function. | ||
1736 | * | ||
1737 | */ | ||
1738 | void blk_sync_queue(struct request_queue *q) | ||
1739 | { | ||
1740 | del_timer_sync(&q->unplug_timer); | ||
1741 | } | ||
1742 | EXPORT_SYMBOL(blk_sync_queue); | ||
1743 | |||
1744 | /** | ||
1745 | * blk_run_queue - run a single device queue | ||
1746 | * @q: The queue to run | ||
1747 | */ | ||
1748 | void blk_run_queue(struct request_queue *q) | ||
1749 | { | ||
1750 | unsigned long flags; | ||
1751 | |||
1752 | spin_lock_irqsave(q->queue_lock, flags); | ||
1753 | blk_remove_plug(q); | ||
1754 | |||
1755 | /* | ||
1756 | * Only recurse once to avoid overrunning the stack, let the unplug | ||
1757 | * handling reinvoke the handler shortly if we already got there. | ||
1758 | */ | ||
1759 | if (!elv_queue_empty(q)) { | ||
1760 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | ||
1761 | q->request_fn(q); | ||
1762 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | ||
1763 | } else { | ||
1764 | blk_plug_device(q); | ||
1765 | kblockd_schedule_work(&q->unplug_work); | ||
1766 | } | ||
1767 | } | ||
1768 | |||
1769 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1770 | } | ||
1771 | EXPORT_SYMBOL(blk_run_queue); | ||
1772 | |||
1773 | /** | ||
1774 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed | ||
1775 | * @kobj: the kobj belonging of the request queue to be released | ||
1776 | * | ||
1777 | * Description: | ||
1778 | * blk_cleanup_queue is the pair to blk_init_queue() or | ||
1779 | * blk_queue_make_request(). It should be called when a request queue is | ||
1780 | * being released; typically when a block device is being de-registered. | ||
1781 | * Currently, its primary task it to free all the &struct request | ||
1782 | * structures that were allocated to the queue and the queue itself. | ||
1783 | * | ||
1784 | * Caveat: | ||
1785 | * Hopefully the low level driver will have finished any | ||
1786 | * outstanding requests first... | ||
1787 | **/ | ||
1788 | static void blk_release_queue(struct kobject *kobj) | ||
1789 | { | ||
1790 | struct request_queue *q = | ||
1791 | container_of(kobj, struct request_queue, kobj); | ||
1792 | struct request_list *rl = &q->rq; | ||
1793 | |||
1794 | blk_sync_queue(q); | ||
1795 | |||
1796 | if (rl->rq_pool) | ||
1797 | mempool_destroy(rl->rq_pool); | ||
1798 | |||
1799 | if (q->queue_tags) | ||
1800 | __blk_queue_free_tags(q); | ||
1801 | |||
1802 | blk_trace_shutdown(q); | ||
1803 | |||
1804 | bdi_destroy(&q->backing_dev_info); | ||
1805 | kmem_cache_free(requestq_cachep, q); | ||
1806 | } | ||
1807 | |||
1808 | void blk_put_queue(struct request_queue *q) | ||
1809 | { | ||
1810 | kobject_put(&q->kobj); | ||
1811 | } | ||
1812 | EXPORT_SYMBOL(blk_put_queue); | ||
1813 | |||
1814 | void blk_cleanup_queue(struct request_queue * q) | ||
1815 | { | ||
1816 | mutex_lock(&q->sysfs_lock); | ||
1817 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | ||
1818 | mutex_unlock(&q->sysfs_lock); | ||
1819 | |||
1820 | if (q->elevator) | ||
1821 | elevator_exit(q->elevator); | ||
1822 | |||
1823 | blk_put_queue(q); | ||
1824 | } | ||
1825 | |||
1826 | EXPORT_SYMBOL(blk_cleanup_queue); | ||
1827 | |||
1828 | static int blk_init_free_list(struct request_queue *q) | ||
1829 | { | ||
1830 | struct request_list *rl = &q->rq; | ||
1831 | |||
1832 | rl->count[READ] = rl->count[WRITE] = 0; | ||
1833 | rl->starved[READ] = rl->starved[WRITE] = 0; | ||
1834 | rl->elvpriv = 0; | ||
1835 | init_waitqueue_head(&rl->wait[READ]); | ||
1836 | init_waitqueue_head(&rl->wait[WRITE]); | ||
1837 | |||
1838 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | ||
1839 | mempool_free_slab, request_cachep, q->node); | ||
1840 | |||
1841 | if (!rl->rq_pool) | ||
1842 | return -ENOMEM; | ||
1843 | |||
1844 | return 0; | ||
1845 | } | ||
1846 | |||
1847 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) | ||
1848 | { | ||
1849 | return blk_alloc_queue_node(gfp_mask, -1); | ||
1850 | } | ||
1851 | EXPORT_SYMBOL(blk_alloc_queue); | ||
1852 | |||
1853 | static struct kobj_type queue_ktype; | ||
1854 | |||
1855 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | ||
1856 | { | ||
1857 | struct request_queue *q; | ||
1858 | int err; | ||
1859 | |||
1860 | q = kmem_cache_alloc_node(requestq_cachep, | ||
1861 | gfp_mask | __GFP_ZERO, node_id); | ||
1862 | if (!q) | ||
1863 | return NULL; | ||
1864 | |||
1865 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | ||
1866 | q->backing_dev_info.unplug_io_data = q; | ||
1867 | err = bdi_init(&q->backing_dev_info); | ||
1868 | if (err) { | ||
1869 | kmem_cache_free(requestq_cachep, q); | ||
1870 | return NULL; | ||
1871 | } | ||
1872 | |||
1873 | init_timer(&q->unplug_timer); | ||
1874 | |||
1875 | kobject_set_name(&q->kobj, "%s", "queue"); | ||
1876 | q->kobj.ktype = &queue_ktype; | ||
1877 | kobject_init(&q->kobj); | ||
1878 | |||
1879 | mutex_init(&q->sysfs_lock); | ||
1880 | |||
1881 | return q; | ||
1882 | } | ||
1883 | EXPORT_SYMBOL(blk_alloc_queue_node); | ||
1884 | |||
1885 | /** | ||
1886 | * blk_init_queue - prepare a request queue for use with a block device | ||
1887 | * @rfn: The function to be called to process requests that have been | ||
1888 | * placed on the queue. | ||
1889 | * @lock: Request queue spin lock | ||
1890 | * | ||
1891 | * Description: | ||
1892 | * If a block device wishes to use the standard request handling procedures, | ||
1893 | * which sorts requests and coalesces adjacent requests, then it must | ||
1894 | * call blk_init_queue(). The function @rfn will be called when there | ||
1895 | * are requests on the queue that need to be processed. If the device | ||
1896 | * supports plugging, then @rfn may not be called immediately when requests | ||
1897 | * are available on the queue, but may be called at some time later instead. | ||
1898 | * Plugged queues are generally unplugged when a buffer belonging to one | ||
1899 | * of the requests on the queue is needed, or due to memory pressure. | ||
1900 | * | ||
1901 | * @rfn is not required, or even expected, to remove all requests off the | ||
1902 | * queue, but only as many as it can handle at a time. If it does leave | ||
1903 | * requests on the queue, it is responsible for arranging that the requests | ||
1904 | * get dealt with eventually. | ||
1905 | * | ||
1906 | * The queue spin lock must be held while manipulating the requests on the | ||
1907 | * request queue; this lock will be taken also from interrupt context, so irq | ||
1908 | * disabling is needed for it. | ||
1909 | * | ||
1910 | * Function returns a pointer to the initialized request queue, or NULL if | ||
1911 | * it didn't succeed. | ||
1912 | * | ||
1913 | * Note: | ||
1914 | * blk_init_queue() must be paired with a blk_cleanup_queue() call | ||
1915 | * when the block device is deactivated (such as at module unload). | ||
1916 | **/ | ||
1917 | |||
1918 | struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | ||
1919 | { | ||
1920 | return blk_init_queue_node(rfn, lock, -1); | ||
1921 | } | ||
1922 | EXPORT_SYMBOL(blk_init_queue); | ||
1923 | |||
1924 | struct request_queue * | ||
1925 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | ||
1926 | { | ||
1927 | struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); | ||
1928 | |||
1929 | if (!q) | ||
1930 | return NULL; | ||
1931 | |||
1932 | q->node = node_id; | ||
1933 | if (blk_init_free_list(q)) { | ||
1934 | kmem_cache_free(requestq_cachep, q); | ||
1935 | return NULL; | ||
1936 | } | ||
1937 | |||
1938 | /* | ||
1939 | * if caller didn't supply a lock, they get per-queue locking with | ||
1940 | * our embedded lock | ||
1941 | */ | ||
1942 | if (!lock) { | ||
1943 | spin_lock_init(&q->__queue_lock); | ||
1944 | lock = &q->__queue_lock; | ||
1945 | } | ||
1946 | |||
1947 | q->request_fn = rfn; | ||
1948 | q->prep_rq_fn = NULL; | ||
1949 | q->unplug_fn = generic_unplug_device; | ||
1950 | q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); | ||
1951 | q->queue_lock = lock; | ||
1952 | |||
1953 | blk_queue_segment_boundary(q, 0xffffffff); | ||
1954 | |||
1955 | blk_queue_make_request(q, __make_request); | ||
1956 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); | ||
1957 | |||
1958 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
1959 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
1960 | |||
1961 | q->sg_reserved_size = INT_MAX; | ||
1962 | |||
1963 | /* | ||
1964 | * all done | ||
1965 | */ | ||
1966 | if (!elevator_init(q, NULL)) { | ||
1967 | blk_queue_congestion_threshold(q); | ||
1968 | return q; | ||
1969 | } | ||
1970 | |||
1971 | blk_put_queue(q); | ||
1972 | return NULL; | ||
1973 | } | ||
1974 | EXPORT_SYMBOL(blk_init_queue_node); | ||
1975 | |||
1976 | int blk_get_queue(struct request_queue *q) | ||
1977 | { | ||
1978 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
1979 | kobject_get(&q->kobj); | ||
1980 | return 0; | ||
1981 | } | ||
1982 | |||
1983 | return 1; | ||
1984 | } | ||
1985 | |||
1986 | EXPORT_SYMBOL(blk_get_queue); | ||
1987 | |||
1988 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | ||
1989 | { | ||
1990 | if (rq->cmd_flags & REQ_ELVPRIV) | ||
1991 | elv_put_request(q, rq); | ||
1992 | mempool_free(rq, q->rq.rq_pool); | ||
1993 | } | ||
1994 | |||
1995 | static struct request * | ||
1996 | blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) | ||
1997 | { | ||
1998 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | ||
1999 | |||
2000 | if (!rq) | ||
2001 | return NULL; | ||
2002 | |||
2003 | /* | ||
2004 | * first three bits are identical in rq->cmd_flags and bio->bi_rw, | ||
2005 | * see bio.h and blkdev.h | ||
2006 | */ | ||
2007 | rq->cmd_flags = rw | REQ_ALLOCED; | ||
2008 | |||
2009 | if (priv) { | ||
2010 | if (unlikely(elv_set_request(q, rq, gfp_mask))) { | ||
2011 | mempool_free(rq, q->rq.rq_pool); | ||
2012 | return NULL; | ||
2013 | } | ||
2014 | rq->cmd_flags |= REQ_ELVPRIV; | ||
2015 | } | ||
2016 | |||
2017 | return rq; | ||
2018 | } | ||
2019 | |||
2020 | /* | ||
2021 | * ioc_batching returns true if the ioc is a valid batching request and | ||
2022 | * should be given priority access to a request. | ||
2023 | */ | ||
2024 | static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) | ||
2025 | { | ||
2026 | if (!ioc) | ||
2027 | return 0; | ||
2028 | |||
2029 | /* | ||
2030 | * Make sure the process is able to allocate at least 1 request | ||
2031 | * even if the batch times out, otherwise we could theoretically | ||
2032 | * lose wakeups. | ||
2033 | */ | ||
2034 | return ioc->nr_batch_requests == q->nr_batching || | ||
2035 | (ioc->nr_batch_requests > 0 | ||
2036 | && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); | ||
2037 | } | ||
2038 | |||
2039 | /* | ||
2040 | * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This | ||
2041 | * will cause the process to be a "batcher" on all queues in the system. This | ||
2042 | * is the behaviour we want though - once it gets a wakeup it should be given | ||
2043 | * a nice run. | ||
2044 | */ | ||
2045 | static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) | ||
2046 | { | ||
2047 | if (!ioc || ioc_batching(q, ioc)) | ||
2048 | return; | ||
2049 | |||
2050 | ioc->nr_batch_requests = q->nr_batching; | ||
2051 | ioc->last_waited = jiffies; | ||
2052 | } | ||
2053 | |||
2054 | static void __freed_request(struct request_queue *q, int rw) | ||
2055 | { | ||
2056 | struct request_list *rl = &q->rq; | ||
2057 | |||
2058 | if (rl->count[rw] < queue_congestion_off_threshold(q)) | ||
2059 | blk_clear_queue_congested(q, rw); | ||
2060 | |||
2061 | if (rl->count[rw] + 1 <= q->nr_requests) { | ||
2062 | if (waitqueue_active(&rl->wait[rw])) | ||
2063 | wake_up(&rl->wait[rw]); | ||
2064 | |||
2065 | blk_clear_queue_full(q, rw); | ||
2066 | } | ||
2067 | } | ||
2068 | |||
2069 | /* | ||
2070 | * A request has just been released. Account for it, update the full and | ||
2071 | * congestion status, wake up any waiters. Called under q->queue_lock. | ||
2072 | */ | ||
2073 | static void freed_request(struct request_queue *q, int rw, int priv) | ||
2074 | { | ||
2075 | struct request_list *rl = &q->rq; | ||
2076 | |||
2077 | rl->count[rw]--; | ||
2078 | if (priv) | ||
2079 | rl->elvpriv--; | ||
2080 | |||
2081 | __freed_request(q, rw); | ||
2082 | |||
2083 | if (unlikely(rl->starved[rw ^ 1])) | ||
2084 | __freed_request(q, rw ^ 1); | ||
2085 | } | ||
2086 | |||
2087 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) | ||
2088 | /* | ||
2089 | * Get a free request, queue_lock must be held. | ||
2090 | * Returns NULL on failure, with queue_lock held. | ||
2091 | * Returns !NULL on success, with queue_lock *not held*. | ||
2092 | */ | ||
2093 | static struct request *get_request(struct request_queue *q, int rw_flags, | ||
2094 | struct bio *bio, gfp_t gfp_mask) | ||
2095 | { | ||
2096 | struct request *rq = NULL; | ||
2097 | struct request_list *rl = &q->rq; | ||
2098 | struct io_context *ioc = NULL; | ||
2099 | const int rw = rw_flags & 0x01; | ||
2100 | int may_queue, priv; | ||
2101 | |||
2102 | may_queue = elv_may_queue(q, rw_flags); | ||
2103 | if (may_queue == ELV_MQUEUE_NO) | ||
2104 | goto rq_starved; | ||
2105 | |||
2106 | if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { | ||
2107 | if (rl->count[rw]+1 >= q->nr_requests) { | ||
2108 | ioc = current_io_context(GFP_ATOMIC, q->node); | ||
2109 | /* | ||
2110 | * The queue will fill after this allocation, so set | ||
2111 | * it as full, and mark this process as "batching". | ||
2112 | * This process will be allowed to complete a batch of | ||
2113 | * requests, others will be blocked. | ||
2114 | */ | ||
2115 | if (!blk_queue_full(q, rw)) { | ||
2116 | ioc_set_batching(q, ioc); | ||
2117 | blk_set_queue_full(q, rw); | ||
2118 | } else { | ||
2119 | if (may_queue != ELV_MQUEUE_MUST | ||
2120 | && !ioc_batching(q, ioc)) { | ||
2121 | /* | ||
2122 | * The queue is full and the allocating | ||
2123 | * process is not a "batcher", and not | ||
2124 | * exempted by the IO scheduler | ||
2125 | */ | ||
2126 | goto out; | ||
2127 | } | ||
2128 | } | ||
2129 | } | ||
2130 | blk_set_queue_congested(q, rw); | ||
2131 | } | ||
2132 | |||
2133 | /* | ||
2134 | * Only allow batching queuers to allocate up to 50% over the defined | ||
2135 | * limit of requests, otherwise we could have thousands of requests | ||
2136 | * allocated with any setting of ->nr_requests | ||
2137 | */ | ||
2138 | if (rl->count[rw] >= (3 * q->nr_requests / 2)) | ||
2139 | goto out; | ||
2140 | |||
2141 | rl->count[rw]++; | ||
2142 | rl->starved[rw] = 0; | ||
2143 | |||
2144 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | ||
2145 | if (priv) | ||
2146 | rl->elvpriv++; | ||
2147 | |||
2148 | spin_unlock_irq(q->queue_lock); | ||
2149 | |||
2150 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); | ||
2151 | if (unlikely(!rq)) { | ||
2152 | /* | ||
2153 | * Allocation failed presumably due to memory. Undo anything | ||
2154 | * we might have messed up. | ||
2155 | * | ||
2156 | * Allocating task should really be put onto the front of the | ||
2157 | * wait queue, but this is pretty rare. | ||
2158 | */ | ||
2159 | spin_lock_irq(q->queue_lock); | ||
2160 | freed_request(q, rw, priv); | ||
2161 | |||
2162 | /* | ||
2163 | * in the very unlikely event that allocation failed and no | ||
2164 | * requests for this direction was pending, mark us starved | ||
2165 | * so that freeing of a request in the other direction will | ||
2166 | * notice us. another possible fix would be to split the | ||
2167 | * rq mempool into READ and WRITE | ||
2168 | */ | ||
2169 | rq_starved: | ||
2170 | if (unlikely(rl->count[rw] == 0)) | ||
2171 | rl->starved[rw] = 1; | ||
2172 | |||
2173 | goto out; | ||
2174 | } | ||
2175 | |||
2176 | /* | ||
2177 | * ioc may be NULL here, and ioc_batching will be false. That's | ||
2178 | * OK, if the queue is under the request limit then requests need | ||
2179 | * not count toward the nr_batch_requests limit. There will always | ||
2180 | * be some limit enforced by BLK_BATCH_TIME. | ||
2181 | */ | ||
2182 | if (ioc_batching(q, ioc)) | ||
2183 | ioc->nr_batch_requests--; | ||
2184 | |||
2185 | rq_init(q, rq); | ||
2186 | |||
2187 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | ||
2188 | out: | ||
2189 | return rq; | ||
2190 | } | ||
2191 | |||
2192 | /* | ||
2193 | * No available requests for this queue, unplug the device and wait for some | ||
2194 | * requests to become available. | ||
2195 | * | ||
2196 | * Called with q->queue_lock held, and returns with it unlocked. | ||
2197 | */ | ||
2198 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | ||
2199 | struct bio *bio) | ||
2200 | { | ||
2201 | const int rw = rw_flags & 0x01; | ||
2202 | struct request *rq; | ||
2203 | |||
2204 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | ||
2205 | while (!rq) { | ||
2206 | DEFINE_WAIT(wait); | ||
2207 | struct request_list *rl = &q->rq; | ||
2208 | |||
2209 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | ||
2210 | TASK_UNINTERRUPTIBLE); | ||
2211 | |||
2212 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | ||
2213 | |||
2214 | if (!rq) { | ||
2215 | struct io_context *ioc; | ||
2216 | |||
2217 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | ||
2218 | |||
2219 | __generic_unplug_device(q); | ||
2220 | spin_unlock_irq(q->queue_lock); | ||
2221 | io_schedule(); | ||
2222 | |||
2223 | /* | ||
2224 | * After sleeping, we become a "batching" process and | ||
2225 | * will be able to allocate at least one request, and | ||
2226 | * up to a big batch of them for a small period time. | ||
2227 | * See ioc_batching, ioc_set_batching | ||
2228 | */ | ||
2229 | ioc = current_io_context(GFP_NOIO, q->node); | ||
2230 | ioc_set_batching(q, ioc); | ||
2231 | |||
2232 | spin_lock_irq(q->queue_lock); | ||
2233 | } | ||
2234 | finish_wait(&rl->wait[rw], &wait); | ||
2235 | } | ||
2236 | |||
2237 | return rq; | ||
2238 | } | ||
2239 | |||
2240 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | ||
2241 | { | ||
2242 | struct request *rq; | ||
2243 | |||
2244 | BUG_ON(rw != READ && rw != WRITE); | ||
2245 | |||
2246 | spin_lock_irq(q->queue_lock); | ||
2247 | if (gfp_mask & __GFP_WAIT) { | ||
2248 | rq = get_request_wait(q, rw, NULL); | ||
2249 | } else { | ||
2250 | rq = get_request(q, rw, NULL, gfp_mask); | ||
2251 | if (!rq) | ||
2252 | spin_unlock_irq(q->queue_lock); | ||
2253 | } | ||
2254 | /* q->queue_lock is unlocked at this point */ | ||
2255 | |||
2256 | return rq; | ||
2257 | } | ||
2258 | EXPORT_SYMBOL(blk_get_request); | ||
2259 | |||
2260 | /** | ||
2261 | * blk_start_queueing - initiate dispatch of requests to device | ||
2262 | * @q: request queue to kick into gear | ||
2263 | * | ||
2264 | * This is basically a helper to remove the need to know whether a queue | ||
2265 | * is plugged or not if someone just wants to initiate dispatch of requests | ||
2266 | * for this queue. | ||
2267 | * | ||
2268 | * The queue lock must be held with interrupts disabled. | ||
2269 | */ | ||
2270 | void blk_start_queueing(struct request_queue *q) | ||
2271 | { | ||
2272 | if (!blk_queue_plugged(q)) | ||
2273 | q->request_fn(q); | ||
2274 | else | ||
2275 | __generic_unplug_device(q); | ||
2276 | } | ||
2277 | EXPORT_SYMBOL(blk_start_queueing); | ||
2278 | |||
2279 | /** | ||
2280 | * blk_requeue_request - put a request back on queue | ||
2281 | * @q: request queue where request should be inserted | ||
2282 | * @rq: request to be inserted | ||
2283 | * | ||
2284 | * Description: | ||
2285 | * Drivers often keep queueing requests until the hardware cannot accept | ||
2286 | * more, when that condition happens we need to put the request back | ||
2287 | * on the queue. Must be called with queue lock held. | ||
2288 | */ | ||
2289 | void blk_requeue_request(struct request_queue *q, struct request *rq) | ||
2290 | { | ||
2291 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | ||
2292 | |||
2293 | if (blk_rq_tagged(rq)) | ||
2294 | blk_queue_end_tag(q, rq); | ||
2295 | |||
2296 | elv_requeue_request(q, rq); | ||
2297 | } | ||
2298 | |||
2299 | EXPORT_SYMBOL(blk_requeue_request); | ||
2300 | |||
2301 | /** | ||
2302 | * blk_insert_request - insert a special request in to a request queue | ||
2303 | * @q: request queue where request should be inserted | ||
2304 | * @rq: request to be inserted | ||
2305 | * @at_head: insert request at head or tail of queue | ||
2306 | * @data: private data | ||
2307 | * | ||
2308 | * Description: | ||
2309 | * Many block devices need to execute commands asynchronously, so they don't | ||
2310 | * block the whole kernel from preemption during request execution. This is | ||
2311 | * accomplished normally by inserting aritficial requests tagged as | ||
2312 | * REQ_SPECIAL in to the corresponding request queue, and letting them be | ||
2313 | * scheduled for actual execution by the request queue. | ||
2314 | * | ||
2315 | * We have the option of inserting the head or the tail of the queue. | ||
2316 | * Typically we use the tail for new ioctls and so forth. We use the head | ||
2317 | * of the queue for things like a QUEUE_FULL message from a device, or a | ||
2318 | * host that is unable to accept a particular command. | ||
2319 | */ | ||
2320 | void blk_insert_request(struct request_queue *q, struct request *rq, | ||
2321 | int at_head, void *data) | ||
2322 | { | ||
2323 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
2324 | unsigned long flags; | ||
2325 | |||
2326 | /* | ||
2327 | * tell I/O scheduler that this isn't a regular read/write (ie it | ||
2328 | * must not attempt merges on this) and that it acts as a soft | ||
2329 | * barrier | ||
2330 | */ | ||
2331 | rq->cmd_type = REQ_TYPE_SPECIAL; | ||
2332 | rq->cmd_flags |= REQ_SOFTBARRIER; | ||
2333 | |||
2334 | rq->special = data; | ||
2335 | |||
2336 | spin_lock_irqsave(q->queue_lock, flags); | ||
2337 | |||
2338 | /* | ||
2339 | * If command is tagged, release the tag | ||
2340 | */ | ||
2341 | if (blk_rq_tagged(rq)) | ||
2342 | blk_queue_end_tag(q, rq); | ||
2343 | |||
2344 | drive_stat_acct(rq, rq->nr_sectors, 1); | ||
2345 | __elv_add_request(q, rq, where, 0); | ||
2346 | blk_start_queueing(q); | ||
2347 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2348 | } | ||
2349 | |||
2350 | EXPORT_SYMBOL(blk_insert_request); | ||
2351 | |||
2352 | static int __blk_rq_unmap_user(struct bio *bio) | ||
2353 | { | ||
2354 | int ret = 0; | ||
2355 | |||
2356 | if (bio) { | ||
2357 | if (bio_flagged(bio, BIO_USER_MAPPED)) | ||
2358 | bio_unmap_user(bio); | ||
2359 | else | ||
2360 | ret = bio_uncopy_user(bio); | ||
2361 | } | ||
2362 | |||
2363 | return ret; | ||
2364 | } | ||
2365 | |||
2366 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
2367 | struct bio *bio) | ||
2368 | { | ||
2369 | if (!rq->bio) | ||
2370 | blk_rq_bio_prep(q, rq, bio); | ||
2371 | else if (!ll_back_merge_fn(q, rq, bio)) | ||
2372 | return -EINVAL; | ||
2373 | else { | ||
2374 | rq->biotail->bi_next = bio; | ||
2375 | rq->biotail = bio; | ||
2376 | |||
2377 | rq->data_len += bio->bi_size; | ||
2378 | } | ||
2379 | return 0; | ||
2380 | } | ||
2381 | EXPORT_SYMBOL(blk_rq_append_bio); | ||
2382 | |||
2383 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
2384 | void __user *ubuf, unsigned int len) | ||
2385 | { | ||
2386 | unsigned long uaddr; | ||
2387 | struct bio *bio, *orig_bio; | ||
2388 | int reading, ret; | ||
2389 | |||
2390 | reading = rq_data_dir(rq) == READ; | ||
2391 | |||
2392 | /* | ||
2393 | * if alignment requirement is satisfied, map in user pages for | ||
2394 | * direct dma. else, set up kernel bounce buffers | ||
2395 | */ | ||
2396 | uaddr = (unsigned long) ubuf; | ||
2397 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | ||
2398 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
2399 | else | ||
2400 | bio = bio_copy_user(q, uaddr, len, reading); | ||
2401 | |||
2402 | if (IS_ERR(bio)) | ||
2403 | return PTR_ERR(bio); | ||
2404 | |||
2405 | orig_bio = bio; | ||
2406 | blk_queue_bounce(q, &bio); | ||
2407 | |||
2408 | /* | ||
2409 | * We link the bounce buffer in and could have to traverse it | ||
2410 | * later so we have to get a ref to prevent it from being freed | ||
2411 | */ | ||
2412 | bio_get(bio); | ||
2413 | |||
2414 | ret = blk_rq_append_bio(q, rq, bio); | ||
2415 | if (!ret) | ||
2416 | return bio->bi_size; | ||
2417 | |||
2418 | /* if it was boucned we must call the end io function */ | ||
2419 | bio_endio(bio, 0); | ||
2420 | __blk_rq_unmap_user(orig_bio); | ||
2421 | bio_put(bio); | ||
2422 | return ret; | ||
2423 | } | ||
2424 | |||
2425 | /** | ||
2426 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage | ||
2427 | * @q: request queue where request should be inserted | ||
2428 | * @rq: request structure to fill | ||
2429 | * @ubuf: the user buffer | ||
2430 | * @len: length of user data | ||
2431 | * | ||
2432 | * Description: | ||
2433 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
2434 | * a kernel bounce buffer is used. | ||
2435 | * | ||
2436 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
2437 | * still in process context. | ||
2438 | * | ||
2439 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
2440 | * before being submitted to the device, as pages mapped may be out of | ||
2441 | * reach. It's the callers responsibility to make sure this happens. The | ||
2442 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
2443 | * unmapping. | ||
2444 | */ | ||
2445 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | ||
2446 | void __user *ubuf, unsigned long len) | ||
2447 | { | ||
2448 | unsigned long bytes_read = 0; | ||
2449 | struct bio *bio = NULL; | ||
2450 | int ret; | ||
2451 | |||
2452 | if (len > (q->max_hw_sectors << 9)) | ||
2453 | return -EINVAL; | ||
2454 | if (!len || !ubuf) | ||
2455 | return -EINVAL; | ||
2456 | |||
2457 | while (bytes_read != len) { | ||
2458 | unsigned long map_len, end, start; | ||
2459 | |||
2460 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); | ||
2461 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) | ||
2462 | >> PAGE_SHIFT; | ||
2463 | start = (unsigned long)ubuf >> PAGE_SHIFT; | ||
2464 | |||
2465 | /* | ||
2466 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 | ||
2467 | * pages. If this happens we just lower the requested | ||
2468 | * mapping len by a page so that we can fit | ||
2469 | */ | ||
2470 | if (end - start > BIO_MAX_PAGES) | ||
2471 | map_len -= PAGE_SIZE; | ||
2472 | |||
2473 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); | ||
2474 | if (ret < 0) | ||
2475 | goto unmap_rq; | ||
2476 | if (!bio) | ||
2477 | bio = rq->bio; | ||
2478 | bytes_read += ret; | ||
2479 | ubuf += ret; | ||
2480 | } | ||
2481 | |||
2482 | rq->buffer = rq->data = NULL; | ||
2483 | return 0; | ||
2484 | unmap_rq: | ||
2485 | blk_rq_unmap_user(bio); | ||
2486 | return ret; | ||
2487 | } | ||
2488 | |||
2489 | EXPORT_SYMBOL(blk_rq_map_user); | ||
2490 | |||
2491 | /** | ||
2492 | * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage | ||
2493 | * @q: request queue where request should be inserted | ||
2494 | * @rq: request to map data to | ||
2495 | * @iov: pointer to the iovec | ||
2496 | * @iov_count: number of elements in the iovec | ||
2497 | * @len: I/O byte count | ||
2498 | * | ||
2499 | * Description: | ||
2500 | * Data will be mapped directly for zero copy io, if possible. Otherwise | ||
2501 | * a kernel bounce buffer is used. | ||
2502 | * | ||
2503 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | ||
2504 | * still in process context. | ||
2505 | * | ||
2506 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | ||
2507 | * before being submitted to the device, as pages mapped may be out of | ||
2508 | * reach. It's the callers responsibility to make sure this happens. The | ||
2509 | * original bio must be passed back in to blk_rq_unmap_user() for proper | ||
2510 | * unmapping. | ||
2511 | */ | ||
2512 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | ||
2513 | struct sg_iovec *iov, int iov_count, unsigned int len) | ||
2514 | { | ||
2515 | struct bio *bio; | ||
2516 | |||
2517 | if (!iov || iov_count <= 0) | ||
2518 | return -EINVAL; | ||
2519 | |||
2520 | /* we don't allow misaligned data like bio_map_user() does. If the | ||
2521 | * user is using sg, they're expected to know the alignment constraints | ||
2522 | * and respect them accordingly */ | ||
2523 | bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); | ||
2524 | if (IS_ERR(bio)) | ||
2525 | return PTR_ERR(bio); | ||
2526 | |||
2527 | if (bio->bi_size != len) { | ||
2528 | bio_endio(bio, 0); | ||
2529 | bio_unmap_user(bio); | ||
2530 | return -EINVAL; | ||
2531 | } | ||
2532 | |||
2533 | bio_get(bio); | ||
2534 | blk_rq_bio_prep(q, rq, bio); | ||
2535 | rq->buffer = rq->data = NULL; | ||
2536 | return 0; | ||
2537 | } | ||
2538 | |||
2539 | EXPORT_SYMBOL(blk_rq_map_user_iov); | ||
2540 | |||
2541 | /** | ||
2542 | * blk_rq_unmap_user - unmap a request with user data | ||
2543 | * @bio: start of bio list | ||
2544 | * | ||
2545 | * Description: | ||
2546 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | ||
2547 | * supply the original rq->bio from the blk_rq_map_user() return, since | ||
2548 | * the io completion may have changed rq->bio. | ||
2549 | */ | ||
2550 | int blk_rq_unmap_user(struct bio *bio) | ||
2551 | { | ||
2552 | struct bio *mapped_bio; | ||
2553 | int ret = 0, ret2; | ||
2554 | |||
2555 | while (bio) { | ||
2556 | mapped_bio = bio; | ||
2557 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | ||
2558 | mapped_bio = bio->bi_private; | ||
2559 | |||
2560 | ret2 = __blk_rq_unmap_user(mapped_bio); | ||
2561 | if (ret2 && !ret) | ||
2562 | ret = ret2; | ||
2563 | |||
2564 | mapped_bio = bio; | ||
2565 | bio = bio->bi_next; | ||
2566 | bio_put(mapped_bio); | ||
2567 | } | ||
2568 | |||
2569 | return ret; | ||
2570 | } | ||
2571 | |||
2572 | EXPORT_SYMBOL(blk_rq_unmap_user); | ||
2573 | |||
2574 | /** | ||
2575 | * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage | ||
2576 | * @q: request queue where request should be inserted | ||
2577 | * @rq: request to fill | ||
2578 | * @kbuf: the kernel buffer | ||
2579 | * @len: length of user data | ||
2580 | * @gfp_mask: memory allocation flags | ||
2581 | */ | ||
2582 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | ||
2583 | unsigned int len, gfp_t gfp_mask) | ||
2584 | { | ||
2585 | struct bio *bio; | ||
2586 | |||
2587 | if (len > (q->max_hw_sectors << 9)) | ||
2588 | return -EINVAL; | ||
2589 | if (!len || !kbuf) | ||
2590 | return -EINVAL; | ||
2591 | |||
2592 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | ||
2593 | if (IS_ERR(bio)) | ||
2594 | return PTR_ERR(bio); | ||
2595 | |||
2596 | if (rq_data_dir(rq) == WRITE) | ||
2597 | bio->bi_rw |= (1 << BIO_RW); | ||
2598 | |||
2599 | blk_rq_bio_prep(q, rq, bio); | ||
2600 | blk_queue_bounce(q, &rq->bio); | ||
2601 | rq->buffer = rq->data = NULL; | ||
2602 | return 0; | ||
2603 | } | ||
2604 | |||
2605 | EXPORT_SYMBOL(blk_rq_map_kern); | ||
2606 | |||
2607 | /** | ||
2608 | * blk_execute_rq_nowait - insert a request into queue for execution | ||
2609 | * @q: queue to insert the request in | ||
2610 | * @bd_disk: matching gendisk | ||
2611 | * @rq: request to insert | ||
2612 | * @at_head: insert request at head or tail of queue | ||
2613 | * @done: I/O completion handler | ||
2614 | * | ||
2615 | * Description: | ||
2616 | * Insert a fully prepared request at the back of the io scheduler queue | ||
2617 | * for execution. Don't wait for completion. | ||
2618 | */ | ||
2619 | void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | ||
2620 | struct request *rq, int at_head, | ||
2621 | rq_end_io_fn *done) | ||
2622 | { | ||
2623 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | ||
2624 | |||
2625 | rq->rq_disk = bd_disk; | ||
2626 | rq->cmd_flags |= REQ_NOMERGE; | ||
2627 | rq->end_io = done; | ||
2628 | WARN_ON(irqs_disabled()); | ||
2629 | spin_lock_irq(q->queue_lock); | ||
2630 | __elv_add_request(q, rq, where, 1); | ||
2631 | __generic_unplug_device(q); | ||
2632 | spin_unlock_irq(q->queue_lock); | ||
2633 | } | ||
2634 | EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); | ||
2635 | |||
2636 | /** | ||
2637 | * blk_execute_rq - insert a request into queue for execution | ||
2638 | * @q: queue to insert the request in | ||
2639 | * @bd_disk: matching gendisk | ||
2640 | * @rq: request to insert | ||
2641 | * @at_head: insert request at head or tail of queue | ||
2642 | * | ||
2643 | * Description: | ||
2644 | * Insert a fully prepared request at the back of the io scheduler queue | ||
2645 | * for execution and wait for completion. | ||
2646 | */ | ||
2647 | int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, | ||
2648 | struct request *rq, int at_head) | ||
2649 | { | ||
2650 | DECLARE_COMPLETION_ONSTACK(wait); | ||
2651 | char sense[SCSI_SENSE_BUFFERSIZE]; | ||
2652 | int err = 0; | ||
2653 | |||
2654 | /* | ||
2655 | * we need an extra reference to the request, so we can look at | ||
2656 | * it after io completion | ||
2657 | */ | ||
2658 | rq->ref_count++; | ||
2659 | |||
2660 | if (!rq->sense) { | ||
2661 | memset(sense, 0, sizeof(sense)); | ||
2662 | rq->sense = sense; | ||
2663 | rq->sense_len = 0; | ||
2664 | } | ||
2665 | |||
2666 | rq->end_io_data = &wait; | ||
2667 | blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); | ||
2668 | wait_for_completion(&wait); | ||
2669 | |||
2670 | if (rq->errors) | ||
2671 | err = -EIO; | ||
2672 | |||
2673 | return err; | ||
2674 | } | ||
2675 | |||
2676 | EXPORT_SYMBOL(blk_execute_rq); | ||
2677 | |||
2678 | static void bio_end_empty_barrier(struct bio *bio, int err) | ||
2679 | { | ||
2680 | if (err) | ||
2681 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
2682 | |||
2683 | complete(bio->bi_private); | ||
2684 | } | ||
2685 | |||
2686 | /** | ||
2687 | * blkdev_issue_flush - queue a flush | ||
2688 | * @bdev: blockdev to issue flush for | ||
2689 | * @error_sector: error sector | ||
2690 | * | ||
2691 | * Description: | ||
2692 | * Issue a flush for the block device in question. Caller can supply | ||
2693 | * room for storing the error offset in case of a flush error, if they | ||
2694 | * wish to. Caller must run wait_for_completion() on its own. | ||
2695 | */ | ||
2696 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | ||
2697 | { | ||
2698 | DECLARE_COMPLETION_ONSTACK(wait); | ||
2699 | struct request_queue *q; | ||
2700 | struct bio *bio; | ||
2701 | int ret; | ||
2702 | |||
2703 | if (bdev->bd_disk == NULL) | ||
2704 | return -ENXIO; | ||
2705 | |||
2706 | q = bdev_get_queue(bdev); | ||
2707 | if (!q) | ||
2708 | return -ENXIO; | ||
2709 | |||
2710 | bio = bio_alloc(GFP_KERNEL, 0); | ||
2711 | if (!bio) | ||
2712 | return -ENOMEM; | ||
2713 | |||
2714 | bio->bi_end_io = bio_end_empty_barrier; | ||
2715 | bio->bi_private = &wait; | ||
2716 | bio->bi_bdev = bdev; | ||
2717 | submit_bio(1 << BIO_RW_BARRIER, bio); | ||
2718 | |||
2719 | wait_for_completion(&wait); | ||
2720 | |||
2721 | /* | ||
2722 | * The driver must store the error location in ->bi_sector, if | ||
2723 | * it supports it. For non-stacked drivers, this should be copied | ||
2724 | * from rq->sector. | ||
2725 | */ | ||
2726 | if (error_sector) | ||
2727 | *error_sector = bio->bi_sector; | ||
2728 | |||
2729 | ret = 0; | ||
2730 | if (!bio_flagged(bio, BIO_UPTODATE)) | ||
2731 | ret = -EIO; | ||
2732 | |||
2733 | bio_put(bio); | ||
2734 | return ret; | ||
2735 | } | ||
2736 | |||
2737 | EXPORT_SYMBOL(blkdev_issue_flush); | ||
2738 | |||
2739 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | ||
2740 | { | ||
2741 | int rw = rq_data_dir(rq); | ||
2742 | |||
2743 | if (!blk_fs_request(rq) || !rq->rq_disk) | ||
2744 | return; | ||
2745 | |||
2746 | if (!new_io) { | ||
2747 | __disk_stat_inc(rq->rq_disk, merges[rw]); | ||
2748 | } else { | ||
2749 | disk_round_stats(rq->rq_disk); | ||
2750 | rq->rq_disk->in_flight++; | ||
2751 | } | ||
2752 | } | ||
2753 | |||
2754 | /* | ||
2755 | * add-request adds a request to the linked list. | ||
2756 | * queue lock is held and interrupts disabled, as we muck with the | ||
2757 | * request queue list. | ||
2758 | */ | ||
2759 | static inline void add_request(struct request_queue * q, struct request * req) | ||
2760 | { | ||
2761 | drive_stat_acct(req, req->nr_sectors, 1); | ||
2762 | |||
2763 | /* | ||
2764 | * elevator indicated where it wants this request to be | ||
2765 | * inserted at elevator_merge time | ||
2766 | */ | ||
2767 | __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); | ||
2768 | } | ||
2769 | |||
2770 | /* | ||
2771 | * disk_round_stats() - Round off the performance stats on a struct | ||
2772 | * disk_stats. | ||
2773 | * | ||
2774 | * The average IO queue length and utilisation statistics are maintained | ||
2775 | * by observing the current state of the queue length and the amount of | ||
2776 | * time it has been in this state for. | ||
2777 | * | ||
2778 | * Normally, that accounting is done on IO completion, but that can result | ||
2779 | * in more than a second's worth of IO being accounted for within any one | ||
2780 | * second, leading to >100% utilisation. To deal with that, we call this | ||
2781 | * function to do a round-off before returning the results when reading | ||
2782 | * /proc/diskstats. This accounts immediately for all queue usage up to | ||
2783 | * the current jiffies and restarts the counters again. | ||
2784 | */ | ||
2785 | void disk_round_stats(struct gendisk *disk) | ||
2786 | { | ||
2787 | unsigned long now = jiffies; | ||
2788 | |||
2789 | if (now == disk->stamp) | ||
2790 | return; | ||
2791 | |||
2792 | if (disk->in_flight) { | ||
2793 | __disk_stat_add(disk, time_in_queue, | ||
2794 | disk->in_flight * (now - disk->stamp)); | ||
2795 | __disk_stat_add(disk, io_ticks, (now - disk->stamp)); | ||
2796 | } | ||
2797 | disk->stamp = now; | ||
2798 | } | ||
2799 | |||
2800 | EXPORT_SYMBOL_GPL(disk_round_stats); | ||
2801 | |||
2802 | /* | ||
2803 | * queue lock must be held | ||
2804 | */ | ||
2805 | void __blk_put_request(struct request_queue *q, struct request *req) | ||
2806 | { | ||
2807 | if (unlikely(!q)) | ||
2808 | return; | ||
2809 | if (unlikely(--req->ref_count)) | ||
2810 | return; | ||
2811 | |||
2812 | elv_completed_request(q, req); | ||
2813 | |||
2814 | /* | ||
2815 | * Request may not have originated from ll_rw_blk. if not, | ||
2816 | * it didn't come out of our reserved rq pools | ||
2817 | */ | ||
2818 | if (req->cmd_flags & REQ_ALLOCED) { | ||
2819 | int rw = rq_data_dir(req); | ||
2820 | int priv = req->cmd_flags & REQ_ELVPRIV; | ||
2821 | |||
2822 | BUG_ON(!list_empty(&req->queuelist)); | ||
2823 | BUG_ON(!hlist_unhashed(&req->hash)); | ||
2824 | |||
2825 | blk_free_request(q, req); | ||
2826 | freed_request(q, rw, priv); | ||
2827 | } | ||
2828 | } | ||
2829 | |||
2830 | EXPORT_SYMBOL_GPL(__blk_put_request); | ||
2831 | |||
2832 | void blk_put_request(struct request *req) | ||
2833 | { | ||
2834 | unsigned long flags; | ||
2835 | struct request_queue *q = req->q; | ||
2836 | |||
2837 | /* | ||
2838 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the | ||
2839 | * following if (q) test. | ||
2840 | */ | ||
2841 | if (q) { | ||
2842 | spin_lock_irqsave(q->queue_lock, flags); | ||
2843 | __blk_put_request(q, req); | ||
2844 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2845 | } | ||
2846 | } | ||
2847 | |||
2848 | EXPORT_SYMBOL(blk_put_request); | ||
2849 | |||
2850 | /** | ||
2851 | * blk_end_sync_rq - executes a completion event on a request | ||
2852 | * @rq: request to complete | ||
2853 | * @error: end io status of the request | ||
2854 | */ | ||
2855 | void blk_end_sync_rq(struct request *rq, int error) | ||
2856 | { | ||
2857 | struct completion *waiting = rq->end_io_data; | ||
2858 | |||
2859 | rq->end_io_data = NULL; | ||
2860 | __blk_put_request(rq->q, rq); | ||
2861 | |||
2862 | /* | ||
2863 | * complete last, if this is a stack request the process (and thus | ||
2864 | * the rq pointer) could be invalid right after this complete() | ||
2865 | */ | ||
2866 | complete(waiting); | ||
2867 | } | ||
2868 | EXPORT_SYMBOL(blk_end_sync_rq); | ||
2869 | |||
2870 | /* | ||
2871 | * Has to be called with the request spinlock acquired | ||
2872 | */ | ||
2873 | static int attempt_merge(struct request_queue *q, struct request *req, | ||
2874 | struct request *next) | ||
2875 | { | ||
2876 | if (!rq_mergeable(req) || !rq_mergeable(next)) | ||
2877 | return 0; | ||
2878 | |||
2879 | /* | ||
2880 | * not contiguous | ||
2881 | */ | ||
2882 | if (req->sector + req->nr_sectors != next->sector) | ||
2883 | return 0; | ||
2884 | |||
2885 | if (rq_data_dir(req) != rq_data_dir(next) | ||
2886 | || req->rq_disk != next->rq_disk | ||
2887 | || next->special) | ||
2888 | return 0; | ||
2889 | |||
2890 | /* | ||
2891 | * If we are allowed to merge, then append bio list | ||
2892 | * from next to rq and release next. merge_requests_fn | ||
2893 | * will have updated segment counts, update sector | ||
2894 | * counts here. | ||
2895 | */ | ||
2896 | if (!ll_merge_requests_fn(q, req, next)) | ||
2897 | return 0; | ||
2898 | |||
2899 | /* | ||
2900 | * At this point we have either done a back merge | ||
2901 | * or front merge. We need the smaller start_time of | ||
2902 | * the merged requests to be the current request | ||
2903 | * for accounting purposes. | ||
2904 | */ | ||
2905 | if (time_after(req->start_time, next->start_time)) | ||
2906 | req->start_time = next->start_time; | ||
2907 | |||
2908 | req->biotail->bi_next = next->bio; | ||
2909 | req->biotail = next->biotail; | ||
2910 | |||
2911 | req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; | ||
2912 | |||
2913 | elv_merge_requests(q, req, next); | ||
2914 | |||
2915 | if (req->rq_disk) { | ||
2916 | disk_round_stats(req->rq_disk); | ||
2917 | req->rq_disk->in_flight--; | ||
2918 | } | ||
2919 | |||
2920 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | ||
2921 | |||
2922 | __blk_put_request(q, next); | ||
2923 | return 1; | ||
2924 | } | ||
2925 | |||
2926 | static inline int attempt_back_merge(struct request_queue *q, | ||
2927 | struct request *rq) | ||
2928 | { | ||
2929 | struct request *next = elv_latter_request(q, rq); | ||
2930 | |||
2931 | if (next) | ||
2932 | return attempt_merge(q, rq, next); | ||
2933 | |||
2934 | return 0; | ||
2935 | } | ||
2936 | |||
2937 | static inline int attempt_front_merge(struct request_queue *q, | ||
2938 | struct request *rq) | ||
2939 | { | ||
2940 | struct request *prev = elv_former_request(q, rq); | ||
2941 | |||
2942 | if (prev) | ||
2943 | return attempt_merge(q, prev, rq); | ||
2944 | |||
2945 | return 0; | ||
2946 | } | ||
2947 | |||
2948 | static void init_request_from_bio(struct request *req, struct bio *bio) | ||
2949 | { | ||
2950 | req->cmd_type = REQ_TYPE_FS; | ||
2951 | |||
2952 | /* | ||
2953 | * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) | ||
2954 | */ | ||
2955 | if (bio_rw_ahead(bio) || bio_failfast(bio)) | ||
2956 | req->cmd_flags |= REQ_FAILFAST; | ||
2957 | |||
2958 | /* | ||
2959 | * REQ_BARRIER implies no merging, but lets make it explicit | ||
2960 | */ | ||
2961 | if (unlikely(bio_barrier(bio))) | ||
2962 | req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | ||
2963 | |||
2964 | if (bio_sync(bio)) | ||
2965 | req->cmd_flags |= REQ_RW_SYNC; | ||
2966 | if (bio_rw_meta(bio)) | ||
2967 | req->cmd_flags |= REQ_RW_META; | ||
2968 | |||
2969 | req->errors = 0; | ||
2970 | req->hard_sector = req->sector = bio->bi_sector; | ||
2971 | req->ioprio = bio_prio(bio); | ||
2972 | req->start_time = jiffies; | ||
2973 | blk_rq_bio_prep(req->q, req, bio); | ||
2974 | } | ||
2975 | |||
2976 | static int __make_request(struct request_queue *q, struct bio *bio) | ||
2977 | { | ||
2978 | struct request *req; | ||
2979 | int el_ret, nr_sectors, barrier, err; | ||
2980 | const unsigned short prio = bio_prio(bio); | ||
2981 | const int sync = bio_sync(bio); | ||
2982 | int rw_flags; | ||
2983 | |||
2984 | nr_sectors = bio_sectors(bio); | ||
2985 | |||
2986 | /* | ||
2987 | * low level driver can indicate that it wants pages above a | ||
2988 | * certain limit bounced to low memory (ie for highmem, or even | ||
2989 | * ISA dma in theory) | ||
2990 | */ | ||
2991 | blk_queue_bounce(q, &bio); | ||
2992 | |||
2993 | barrier = bio_barrier(bio); | ||
2994 | if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { | ||
2995 | err = -EOPNOTSUPP; | ||
2996 | goto end_io; | ||
2997 | } | ||
2998 | |||
2999 | spin_lock_irq(q->queue_lock); | ||
3000 | |||
3001 | if (unlikely(barrier) || elv_queue_empty(q)) | ||
3002 | goto get_rq; | ||
3003 | |||
3004 | el_ret = elv_merge(q, &req, bio); | ||
3005 | switch (el_ret) { | ||
3006 | case ELEVATOR_BACK_MERGE: | ||
3007 | BUG_ON(!rq_mergeable(req)); | ||
3008 | |||
3009 | if (!ll_back_merge_fn(q, req, bio)) | ||
3010 | break; | ||
3011 | |||
3012 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | ||
3013 | |||
3014 | req->biotail->bi_next = bio; | ||
3015 | req->biotail = bio; | ||
3016 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | ||
3017 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
3018 | drive_stat_acct(req, nr_sectors, 0); | ||
3019 | if (!attempt_back_merge(q, req)) | ||
3020 | elv_merged_request(q, req, el_ret); | ||
3021 | goto out; | ||
3022 | |||
3023 | case ELEVATOR_FRONT_MERGE: | ||
3024 | BUG_ON(!rq_mergeable(req)); | ||
3025 | |||
3026 | if (!ll_front_merge_fn(q, req, bio)) | ||
3027 | break; | ||
3028 | |||
3029 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | ||
3030 | |||
3031 | bio->bi_next = req->bio; | ||
3032 | req->bio = bio; | ||
3033 | |||
3034 | /* | ||
3035 | * may not be valid. if the low level driver said | ||
3036 | * it didn't need a bounce buffer then it better | ||
3037 | * not touch req->buffer either... | ||
3038 | */ | ||
3039 | req->buffer = bio_data(bio); | ||
3040 | req->current_nr_sectors = bio_cur_sectors(bio); | ||
3041 | req->hard_cur_sectors = req->current_nr_sectors; | ||
3042 | req->sector = req->hard_sector = bio->bi_sector; | ||
3043 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | ||
3044 | req->ioprio = ioprio_best(req->ioprio, prio); | ||
3045 | drive_stat_acct(req, nr_sectors, 0); | ||
3046 | if (!attempt_front_merge(q, req)) | ||
3047 | elv_merged_request(q, req, el_ret); | ||
3048 | goto out; | ||
3049 | |||
3050 | /* ELV_NO_MERGE: elevator says don't/can't merge. */ | ||
3051 | default: | ||
3052 | ; | ||
3053 | } | ||
3054 | |||
3055 | get_rq: | ||
3056 | /* | ||
3057 | * This sync check and mask will be re-done in init_request_from_bio(), | ||
3058 | * but we need to set it earlier to expose the sync flag to the | ||
3059 | * rq allocator and io schedulers. | ||
3060 | */ | ||
3061 | rw_flags = bio_data_dir(bio); | ||
3062 | if (sync) | ||
3063 | rw_flags |= REQ_RW_SYNC; | ||
3064 | |||
3065 | /* | ||
3066 | * Grab a free request. This is might sleep but can not fail. | ||
3067 | * Returns with the queue unlocked. | ||
3068 | */ | ||
3069 | req = get_request_wait(q, rw_flags, bio); | ||
3070 | |||
3071 | /* | ||
3072 | * After dropping the lock and possibly sleeping here, our request | ||
3073 | * may now be mergeable after it had proven unmergeable (above). | ||
3074 | * We don't worry about that case for efficiency. It won't happen | ||
3075 | * often, and the elevators are able to handle it. | ||
3076 | */ | ||
3077 | init_request_from_bio(req, bio); | ||
3078 | |||
3079 | spin_lock_irq(q->queue_lock); | ||
3080 | if (elv_queue_empty(q)) | ||
3081 | blk_plug_device(q); | ||
3082 | add_request(q, req); | ||
3083 | out: | ||
3084 | if (sync) | ||
3085 | __generic_unplug_device(q); | ||
3086 | |||
3087 | spin_unlock_irq(q->queue_lock); | ||
3088 | return 0; | ||
3089 | |||
3090 | end_io: | ||
3091 | bio_endio(bio, err); | ||
3092 | return 0; | ||
3093 | } | ||
3094 | |||
3095 | /* | ||
3096 | * If bio->bi_dev is a partition, remap the location | ||
3097 | */ | ||
3098 | static inline void blk_partition_remap(struct bio *bio) | ||
3099 | { | ||
3100 | struct block_device *bdev = bio->bi_bdev; | ||
3101 | |||
3102 | if (bio_sectors(bio) && bdev != bdev->bd_contains) { | ||
3103 | struct hd_struct *p = bdev->bd_part; | ||
3104 | const int rw = bio_data_dir(bio); | ||
3105 | |||
3106 | p->sectors[rw] += bio_sectors(bio); | ||
3107 | p->ios[rw]++; | ||
3108 | |||
3109 | bio->bi_sector += p->start_sect; | ||
3110 | bio->bi_bdev = bdev->bd_contains; | ||
3111 | |||
3112 | blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio, | ||
3113 | bdev->bd_dev, bio->bi_sector, | ||
3114 | bio->bi_sector - p->start_sect); | ||
3115 | } | ||
3116 | } | ||
3117 | |||
3118 | static void handle_bad_sector(struct bio *bio) | ||
3119 | { | ||
3120 | char b[BDEVNAME_SIZE]; | ||
3121 | |||
3122 | printk(KERN_INFO "attempt to access beyond end of device\n"); | ||
3123 | printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", | ||
3124 | bdevname(bio->bi_bdev, b), | ||
3125 | bio->bi_rw, | ||
3126 | (unsigned long long)bio->bi_sector + bio_sectors(bio), | ||
3127 | (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); | ||
3128 | |||
3129 | set_bit(BIO_EOF, &bio->bi_flags); | ||
3130 | } | ||
3131 | |||
3132 | #ifdef CONFIG_FAIL_MAKE_REQUEST | ||
3133 | |||
3134 | static DECLARE_FAULT_ATTR(fail_make_request); | ||
3135 | |||
3136 | static int __init setup_fail_make_request(char *str) | ||
3137 | { | ||
3138 | return setup_fault_attr(&fail_make_request, str); | ||
3139 | } | ||
3140 | __setup("fail_make_request=", setup_fail_make_request); | ||
3141 | |||
3142 | static int should_fail_request(struct bio *bio) | ||
3143 | { | ||
3144 | if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) || | ||
3145 | (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail)) | ||
3146 | return should_fail(&fail_make_request, bio->bi_size); | ||
3147 | |||
3148 | return 0; | ||
3149 | } | ||
3150 | |||
3151 | static int __init fail_make_request_debugfs(void) | ||
3152 | { | ||
3153 | return init_fault_attr_dentries(&fail_make_request, | ||
3154 | "fail_make_request"); | ||
3155 | } | ||
3156 | |||
3157 | late_initcall(fail_make_request_debugfs); | ||
3158 | |||
3159 | #else /* CONFIG_FAIL_MAKE_REQUEST */ | ||
3160 | |||
3161 | static inline int should_fail_request(struct bio *bio) | ||
3162 | { | ||
3163 | return 0; | ||
3164 | } | ||
3165 | |||
3166 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ | ||
3167 | |||
3168 | /* | ||
3169 | * Check whether this bio extends beyond the end of the device. | ||
3170 | */ | ||
3171 | static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | ||
3172 | { | ||
3173 | sector_t maxsector; | ||
3174 | |||
3175 | if (!nr_sectors) | ||
3176 | return 0; | ||
3177 | |||
3178 | /* Test device or partition size, when known. */ | ||
3179 | maxsector = bio->bi_bdev->bd_inode->i_size >> 9; | ||
3180 | if (maxsector) { | ||
3181 | sector_t sector = bio->bi_sector; | ||
3182 | |||
3183 | if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { | ||
3184 | /* | ||
3185 | * This may well happen - the kernel calls bread() | ||
3186 | * without checking the size of the device, e.g., when | ||
3187 | * mounting a device. | ||
3188 | */ | ||
3189 | handle_bad_sector(bio); | ||
3190 | return 1; | ||
3191 | } | ||
3192 | } | ||
3193 | |||
3194 | return 0; | ||
3195 | } | ||
3196 | |||
3197 | /** | ||
3198 | * generic_make_request: hand a buffer to its device driver for I/O | ||
3199 | * @bio: The bio describing the location in memory and on the device. | ||
3200 | * | ||
3201 | * generic_make_request() is used to make I/O requests of block | ||
3202 | * devices. It is passed a &struct bio, which describes the I/O that needs | ||
3203 | * to be done. | ||
3204 | * | ||
3205 | * generic_make_request() does not return any status. The | ||
3206 | * success/failure status of the request, along with notification of | ||
3207 | * completion, is delivered asynchronously through the bio->bi_end_io | ||
3208 | * function described (one day) else where. | ||
3209 | * | ||
3210 | * The caller of generic_make_request must make sure that bi_io_vec | ||
3211 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | ||
3212 | * set to describe the device address, and the | ||
3213 | * bi_end_io and optionally bi_private are set to describe how | ||
3214 | * completion notification should be signaled. | ||
3215 | * | ||
3216 | * generic_make_request and the drivers it calls may use bi_next if this | ||
3217 | * bio happens to be merged with someone else, and may change bi_dev and | ||
3218 | * bi_sector for remaps as it sees fit. So the values of these fields | ||
3219 | * should NOT be depended on after the call to generic_make_request. | ||
3220 | */ | ||
3221 | static inline void __generic_make_request(struct bio *bio) | ||
3222 | { | ||
3223 | struct request_queue *q; | ||
3224 | sector_t old_sector; | ||
3225 | int ret, nr_sectors = bio_sectors(bio); | ||
3226 | dev_t old_dev; | ||
3227 | |||
3228 | might_sleep(); | ||
3229 | |||
3230 | if (bio_check_eod(bio, nr_sectors)) | ||
3231 | goto end_io; | ||
3232 | |||
3233 | /* | ||
3234 | * Resolve the mapping until finished. (drivers are | ||
3235 | * still free to implement/resolve their own stacking | ||
3236 | * by explicitly returning 0) | ||
3237 | * | ||
3238 | * NOTE: we don't repeat the blk_size check for each new device. | ||
3239 | * Stacking drivers are expected to know what they are doing. | ||
3240 | */ | ||
3241 | old_sector = -1; | ||
3242 | old_dev = 0; | ||
3243 | do { | ||
3244 | char b[BDEVNAME_SIZE]; | ||
3245 | |||
3246 | q = bdev_get_queue(bio->bi_bdev); | ||
3247 | if (!q) { | ||
3248 | printk(KERN_ERR | ||
3249 | "generic_make_request: Trying to access " | ||
3250 | "nonexistent block-device %s (%Lu)\n", | ||
3251 | bdevname(bio->bi_bdev, b), | ||
3252 | (long long) bio->bi_sector); | ||
3253 | end_io: | ||
3254 | bio_endio(bio, -EIO); | ||
3255 | break; | ||
3256 | } | ||
3257 | |||
3258 | if (unlikely(nr_sectors > q->max_hw_sectors)) { | ||
3259 | printk("bio too big device %s (%u > %u)\n", | ||
3260 | bdevname(bio->bi_bdev, b), | ||
3261 | bio_sectors(bio), | ||
3262 | q->max_hw_sectors); | ||
3263 | goto end_io; | ||
3264 | } | ||
3265 | |||
3266 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
3267 | goto end_io; | ||
3268 | |||
3269 | if (should_fail_request(bio)) | ||
3270 | goto end_io; | ||
3271 | |||
3272 | /* | ||
3273 | * If this device has partitions, remap block n | ||
3274 | * of partition p to block n+start(p) of the disk. | ||
3275 | */ | ||
3276 | blk_partition_remap(bio); | ||
3277 | |||
3278 | if (old_sector != -1) | ||
3279 | blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, | ||
3280 | old_sector); | ||
3281 | |||
3282 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | ||
3283 | |||
3284 | old_sector = bio->bi_sector; | ||
3285 | old_dev = bio->bi_bdev->bd_dev; | ||
3286 | |||
3287 | if (bio_check_eod(bio, nr_sectors)) | ||
3288 | goto end_io; | ||
3289 | |||
3290 | ret = q->make_request_fn(q, bio); | ||
3291 | } while (ret); | ||
3292 | } | ||
3293 | |||
3294 | /* | ||
3295 | * We only want one ->make_request_fn to be active at a time, | ||
3296 | * else stack usage with stacked devices could be a problem. | ||
3297 | * So use current->bio_{list,tail} to keep a list of requests | ||
3298 | * submited by a make_request_fn function. | ||
3299 | * current->bio_tail is also used as a flag to say if | ||
3300 | * generic_make_request is currently active in this task or not. | ||
3301 | * If it is NULL, then no make_request is active. If it is non-NULL, | ||
3302 | * then a make_request is active, and new requests should be added | ||
3303 | * at the tail | ||
3304 | */ | ||
3305 | void generic_make_request(struct bio *bio) | ||
3306 | { | ||
3307 | if (current->bio_tail) { | ||
3308 | /* make_request is active */ | ||
3309 | *(current->bio_tail) = bio; | ||
3310 | bio->bi_next = NULL; | ||
3311 | current->bio_tail = &bio->bi_next; | ||
3312 | return; | ||
3313 | } | ||
3314 | /* following loop may be a bit non-obvious, and so deserves some | ||
3315 | * explanation. | ||
3316 | * Before entering the loop, bio->bi_next is NULL (as all callers | ||
3317 | * ensure that) so we have a list with a single bio. | ||
3318 | * We pretend that we have just taken it off a longer list, so | ||
3319 | * we assign bio_list to the next (which is NULL) and bio_tail | ||
3320 | * to &bio_list, thus initialising the bio_list of new bios to be | ||
3321 | * added. __generic_make_request may indeed add some more bios | ||
3322 | * through a recursive call to generic_make_request. If it | ||
3323 | * did, we find a non-NULL value in bio_list and re-enter the loop | ||
3324 | * from the top. In this case we really did just take the bio | ||
3325 | * of the top of the list (no pretending) and so fixup bio_list and | ||
3326 | * bio_tail or bi_next, and call into __generic_make_request again. | ||
3327 | * | ||
3328 | * The loop was structured like this to make only one call to | ||
3329 | * __generic_make_request (which is important as it is large and | ||
3330 | * inlined) and to keep the structure simple. | ||
3331 | */ | ||
3332 | BUG_ON(bio->bi_next); | ||
3333 | do { | ||
3334 | current->bio_list = bio->bi_next; | ||
3335 | if (bio->bi_next == NULL) | ||
3336 | current->bio_tail = ¤t->bio_list; | ||
3337 | else | ||
3338 | bio->bi_next = NULL; | ||
3339 | __generic_make_request(bio); | ||
3340 | bio = current->bio_list; | ||
3341 | } while (bio); | ||
3342 | current->bio_tail = NULL; /* deactivate */ | ||
3343 | } | ||
3344 | |||
3345 | EXPORT_SYMBOL(generic_make_request); | ||
3346 | |||
3347 | /** | ||
3348 | * submit_bio: submit a bio to the block device layer for I/O | ||
3349 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | ||
3350 | * @bio: The &struct bio which describes the I/O | ||
3351 | * | ||
3352 | * submit_bio() is very similar in purpose to generic_make_request(), and | ||
3353 | * uses that function to do most of the work. Both are fairly rough | ||
3354 | * interfaces, @bio must be presetup and ready for I/O. | ||
3355 | * | ||
3356 | */ | ||
3357 | void submit_bio(int rw, struct bio *bio) | ||
3358 | { | ||
3359 | int count = bio_sectors(bio); | ||
3360 | |||
3361 | bio->bi_rw |= rw; | ||
3362 | |||
3363 | /* | ||
3364 | * If it's a regular read/write or a barrier with data attached, | ||
3365 | * go through the normal accounting stuff before submission. | ||
3366 | */ | ||
3367 | if (!bio_empty_barrier(bio)) { | ||
3368 | |||
3369 | BIO_BUG_ON(!bio->bi_size); | ||
3370 | BIO_BUG_ON(!bio->bi_io_vec); | ||
3371 | |||
3372 | if (rw & WRITE) { | ||
3373 | count_vm_events(PGPGOUT, count); | ||
3374 | } else { | ||
3375 | task_io_account_read(bio->bi_size); | ||
3376 | count_vm_events(PGPGIN, count); | ||
3377 | } | ||
3378 | |||
3379 | if (unlikely(block_dump)) { | ||
3380 | char b[BDEVNAME_SIZE]; | ||
3381 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", | ||
3382 | current->comm, task_pid_nr(current), | ||
3383 | (rw & WRITE) ? "WRITE" : "READ", | ||
3384 | (unsigned long long)bio->bi_sector, | ||
3385 | bdevname(bio->bi_bdev,b)); | ||
3386 | } | ||
3387 | } | ||
3388 | |||
3389 | generic_make_request(bio); | ||
3390 | } | ||
3391 | |||
3392 | EXPORT_SYMBOL(submit_bio); | ||
3393 | |||
3394 | static void blk_recalc_rq_sectors(struct request *rq, int nsect) | ||
3395 | { | ||
3396 | if (blk_fs_request(rq)) { | ||
3397 | rq->hard_sector += nsect; | ||
3398 | rq->hard_nr_sectors -= nsect; | ||
3399 | |||
3400 | /* | ||
3401 | * Move the I/O submission pointers ahead if required. | ||
3402 | */ | ||
3403 | if ((rq->nr_sectors >= rq->hard_nr_sectors) && | ||
3404 | (rq->sector <= rq->hard_sector)) { | ||
3405 | rq->sector = rq->hard_sector; | ||
3406 | rq->nr_sectors = rq->hard_nr_sectors; | ||
3407 | rq->hard_cur_sectors = bio_cur_sectors(rq->bio); | ||
3408 | rq->current_nr_sectors = rq->hard_cur_sectors; | ||
3409 | rq->buffer = bio_data(rq->bio); | ||
3410 | } | ||
3411 | |||
3412 | /* | ||
3413 | * if total number of sectors is less than the first segment | ||
3414 | * size, something has gone terribly wrong | ||
3415 | */ | ||
3416 | if (rq->nr_sectors < rq->current_nr_sectors) { | ||
3417 | printk("blk: request botched\n"); | ||
3418 | rq->nr_sectors = rq->current_nr_sectors; | ||
3419 | } | ||
3420 | } | ||
3421 | } | ||
3422 | |||
3423 | static int __end_that_request_first(struct request *req, int uptodate, | ||
3424 | int nr_bytes) | ||
3425 | { | ||
3426 | int total_bytes, bio_nbytes, error, next_idx = 0; | ||
3427 | struct bio *bio; | ||
3428 | |||
3429 | blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); | ||
3430 | |||
3431 | /* | ||
3432 | * extend uptodate bool to allow < 0 value to be direct io error | ||
3433 | */ | ||
3434 | error = 0; | ||
3435 | if (end_io_error(uptodate)) | ||
3436 | error = !uptodate ? -EIO : uptodate; | ||
3437 | |||
3438 | /* | ||
3439 | * for a REQ_BLOCK_PC request, we want to carry any eventual | ||
3440 | * sense key with us all the way through | ||
3441 | */ | ||
3442 | if (!blk_pc_request(req)) | ||
3443 | req->errors = 0; | ||
3444 | |||
3445 | if (!uptodate) { | ||
3446 | if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) | ||
3447 | printk("end_request: I/O error, dev %s, sector %llu\n", | ||
3448 | req->rq_disk ? req->rq_disk->disk_name : "?", | ||
3449 | (unsigned long long)req->sector); | ||
3450 | } | ||
3451 | |||
3452 | if (blk_fs_request(req) && req->rq_disk) { | ||
3453 | const int rw = rq_data_dir(req); | ||
3454 | |||
3455 | disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); | ||
3456 | } | ||
3457 | |||
3458 | total_bytes = bio_nbytes = 0; | ||
3459 | while ((bio = req->bio) != NULL) { | ||
3460 | int nbytes; | ||
3461 | |||
3462 | /* | ||
3463 | * For an empty barrier request, the low level driver must | ||
3464 | * store a potential error location in ->sector. We pass | ||
3465 | * that back up in ->bi_sector. | ||
3466 | */ | ||
3467 | if (blk_empty_barrier(req)) | ||
3468 | bio->bi_sector = req->sector; | ||
3469 | |||
3470 | if (nr_bytes >= bio->bi_size) { | ||
3471 | req->bio = bio->bi_next; | ||
3472 | nbytes = bio->bi_size; | ||
3473 | req_bio_endio(req, bio, nbytes, error); | ||
3474 | next_idx = 0; | ||
3475 | bio_nbytes = 0; | ||
3476 | } else { | ||
3477 | int idx = bio->bi_idx + next_idx; | ||
3478 | |||
3479 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | ||
3480 | blk_dump_rq_flags(req, "__end_that"); | ||
3481 | printk("%s: bio idx %d >= vcnt %d\n", | ||
3482 | __FUNCTION__, | ||
3483 | bio->bi_idx, bio->bi_vcnt); | ||
3484 | break; | ||
3485 | } | ||
3486 | |||
3487 | nbytes = bio_iovec_idx(bio, idx)->bv_len; | ||
3488 | BIO_BUG_ON(nbytes > bio->bi_size); | ||
3489 | |||
3490 | /* | ||
3491 | * not a complete bvec done | ||
3492 | */ | ||
3493 | if (unlikely(nbytes > nr_bytes)) { | ||
3494 | bio_nbytes += nr_bytes; | ||
3495 | total_bytes += nr_bytes; | ||
3496 | break; | ||
3497 | } | ||
3498 | |||
3499 | /* | ||
3500 | * advance to the next vector | ||
3501 | */ | ||
3502 | next_idx++; | ||
3503 | bio_nbytes += nbytes; | ||
3504 | } | ||
3505 | |||
3506 | total_bytes += nbytes; | ||
3507 | nr_bytes -= nbytes; | ||
3508 | |||
3509 | if ((bio = req->bio)) { | ||
3510 | /* | ||
3511 | * end more in this run, or just return 'not-done' | ||
3512 | */ | ||
3513 | if (unlikely(nr_bytes <= 0)) | ||
3514 | break; | ||
3515 | } | ||
3516 | } | ||
3517 | |||
3518 | /* | ||
3519 | * completely done | ||
3520 | */ | ||
3521 | if (!req->bio) | ||
3522 | return 0; | ||
3523 | |||
3524 | /* | ||
3525 | * if the request wasn't completed, update state | ||
3526 | */ | ||
3527 | if (bio_nbytes) { | ||
3528 | req_bio_endio(req, bio, bio_nbytes, error); | ||
3529 | bio->bi_idx += next_idx; | ||
3530 | bio_iovec(bio)->bv_offset += nr_bytes; | ||
3531 | bio_iovec(bio)->bv_len -= nr_bytes; | ||
3532 | } | ||
3533 | |||
3534 | blk_recalc_rq_sectors(req, total_bytes >> 9); | ||
3535 | blk_recalc_rq_segments(req); | ||
3536 | return 1; | ||
3537 | } | ||
3538 | |||
3539 | /** | ||
3540 | * end_that_request_first - end I/O on a request | ||
3541 | * @req: the request being processed | ||
3542 | * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error | ||
3543 | * @nr_sectors: number of sectors to end I/O on | ||
3544 | * | ||
3545 | * Description: | ||
3546 | * Ends I/O on a number of sectors attached to @req, and sets it up | ||
3547 | * for the next range of segments (if any) in the cluster. | ||
3548 | * | ||
3549 | * Return: | ||
3550 | * 0 - we are done with this request, call end_that_request_last() | ||
3551 | * 1 - still buffers pending for this request | ||
3552 | **/ | ||
3553 | int end_that_request_first(struct request *req, int uptodate, int nr_sectors) | ||
3554 | { | ||
3555 | return __end_that_request_first(req, uptodate, nr_sectors << 9); | ||
3556 | } | ||
3557 | |||
3558 | EXPORT_SYMBOL(end_that_request_first); | ||
3559 | |||
3560 | /** | ||
3561 | * end_that_request_chunk - end I/O on a request | ||
3562 | * @req: the request being processed | ||
3563 | * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error | ||
3564 | * @nr_bytes: number of bytes to complete | ||
3565 | * | ||
3566 | * Description: | ||
3567 | * Ends I/O on a number of bytes attached to @req, and sets it up | ||
3568 | * for the next range of segments (if any). Like end_that_request_first(), | ||
3569 | * but deals with bytes instead of sectors. | ||
3570 | * | ||
3571 | * Return: | ||
3572 | * 0 - we are done with this request, call end_that_request_last() | ||
3573 | * 1 - still buffers pending for this request | ||
3574 | **/ | ||
3575 | int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes) | ||
3576 | { | ||
3577 | return __end_that_request_first(req, uptodate, nr_bytes); | ||
3578 | } | ||
3579 | |||
3580 | EXPORT_SYMBOL(end_that_request_chunk); | ||
3581 | |||
3582 | /* | ||
3583 | * splice the completion data to a local structure and hand off to | ||
3584 | * process_completion_queue() to complete the requests | ||
3585 | */ | ||
3586 | static void blk_done_softirq(struct softirq_action *h) | ||
3587 | { | ||
3588 | struct list_head *cpu_list, local_list; | ||
3589 | |||
3590 | local_irq_disable(); | ||
3591 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
3592 | list_replace_init(cpu_list, &local_list); | ||
3593 | local_irq_enable(); | ||
3594 | |||
3595 | while (!list_empty(&local_list)) { | ||
3596 | struct request *rq = list_entry(local_list.next, struct request, donelist); | ||
3597 | |||
3598 | list_del_init(&rq->donelist); | ||
3599 | rq->q->softirq_done_fn(rq); | ||
3600 | } | ||
3601 | } | ||
3602 | |||
3603 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, | ||
3604 | void *hcpu) | ||
3605 | { | ||
3606 | /* | ||
3607 | * If a CPU goes away, splice its entries to the current CPU | ||
3608 | * and trigger a run of the softirq | ||
3609 | */ | ||
3610 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
3611 | int cpu = (unsigned long) hcpu; | ||
3612 | |||
3613 | local_irq_disable(); | ||
3614 | list_splice_init(&per_cpu(blk_cpu_done, cpu), | ||
3615 | &__get_cpu_var(blk_cpu_done)); | ||
3616 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
3617 | local_irq_enable(); | ||
3618 | } | ||
3619 | |||
3620 | return NOTIFY_OK; | ||
3621 | } | ||
3622 | |||
3623 | |||
3624 | static struct notifier_block blk_cpu_notifier __cpuinitdata = { | ||
3625 | .notifier_call = blk_cpu_notify, | ||
3626 | }; | ||
3627 | |||
3628 | /** | ||
3629 | * blk_complete_request - end I/O on a request | ||
3630 | * @req: the request being processed | ||
3631 | * | ||
3632 | * Description: | ||
3633 | * Ends all I/O on a request. It does not handle partial completions, | ||
3634 | * unless the driver actually implements this in its completion callback | ||
3635 | * through requeueing. The actual completion happens out-of-order, | ||
3636 | * through a softirq handler. The user must have registered a completion | ||
3637 | * callback through blk_queue_softirq_done(). | ||
3638 | **/ | ||
3639 | |||
3640 | void blk_complete_request(struct request *req) | ||
3641 | { | ||
3642 | struct list_head *cpu_list; | ||
3643 | unsigned long flags; | ||
3644 | |||
3645 | BUG_ON(!req->q->softirq_done_fn); | ||
3646 | |||
3647 | local_irq_save(flags); | ||
3648 | |||
3649 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
3650 | list_add_tail(&req->donelist, cpu_list); | ||
3651 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
3652 | |||
3653 | local_irq_restore(flags); | ||
3654 | } | ||
3655 | |||
3656 | EXPORT_SYMBOL(blk_complete_request); | ||
3657 | |||
3658 | /* | ||
3659 | * queue lock must be held | ||
3660 | */ | ||
3661 | void end_that_request_last(struct request *req, int uptodate) | ||
3662 | { | ||
3663 | struct gendisk *disk = req->rq_disk; | ||
3664 | int error; | ||
3665 | |||
3666 | /* | ||
3667 | * extend uptodate bool to allow < 0 value to be direct io error | ||
3668 | */ | ||
3669 | error = 0; | ||
3670 | if (end_io_error(uptodate)) | ||
3671 | error = !uptodate ? -EIO : uptodate; | ||
3672 | |||
3673 | if (unlikely(laptop_mode) && blk_fs_request(req)) | ||
3674 | laptop_io_completion(); | ||
3675 | |||
3676 | /* | ||
3677 | * Account IO completion. bar_rq isn't accounted as a normal | ||
3678 | * IO on queueing nor completion. Accounting the containing | ||
3679 | * request is enough. | ||
3680 | */ | ||
3681 | if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { | ||
3682 | unsigned long duration = jiffies - req->start_time; | ||
3683 | const int rw = rq_data_dir(req); | ||
3684 | |||
3685 | __disk_stat_inc(disk, ios[rw]); | ||
3686 | __disk_stat_add(disk, ticks[rw], duration); | ||
3687 | disk_round_stats(disk); | ||
3688 | disk->in_flight--; | ||
3689 | } | ||
3690 | if (req->end_io) | ||
3691 | req->end_io(req, error); | ||
3692 | else | ||
3693 | __blk_put_request(req->q, req); | ||
3694 | } | ||
3695 | |||
3696 | EXPORT_SYMBOL(end_that_request_last); | ||
3697 | |||
3698 | static inline void __end_request(struct request *rq, int uptodate, | ||
3699 | unsigned int nr_bytes, int dequeue) | ||
3700 | { | ||
3701 | if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { | ||
3702 | if (dequeue) | ||
3703 | blkdev_dequeue_request(rq); | ||
3704 | add_disk_randomness(rq->rq_disk); | ||
3705 | end_that_request_last(rq, uptodate); | ||
3706 | } | ||
3707 | } | ||
3708 | |||
3709 | static unsigned int rq_byte_size(struct request *rq) | ||
3710 | { | ||
3711 | if (blk_fs_request(rq)) | ||
3712 | return rq->hard_nr_sectors << 9; | ||
3713 | |||
3714 | return rq->data_len; | ||
3715 | } | ||
3716 | |||
3717 | /** | ||
3718 | * end_queued_request - end all I/O on a queued request | ||
3719 | * @rq: the request being processed | ||
3720 | * @uptodate: error value or 0/1 uptodate flag | ||
3721 | * | ||
3722 | * Description: | ||
3723 | * Ends all I/O on a request, and removes it from the block layer queues. | ||
3724 | * Not suitable for normal IO completion, unless the driver still has | ||
3725 | * the request attached to the block layer. | ||
3726 | * | ||
3727 | **/ | ||
3728 | void end_queued_request(struct request *rq, int uptodate) | ||
3729 | { | ||
3730 | __end_request(rq, uptodate, rq_byte_size(rq), 1); | ||
3731 | } | ||
3732 | EXPORT_SYMBOL(end_queued_request); | ||
3733 | |||
3734 | /** | ||
3735 | * end_dequeued_request - end all I/O on a dequeued request | ||
3736 | * @rq: the request being processed | ||
3737 | * @uptodate: error value or 0/1 uptodate flag | ||
3738 | * | ||
3739 | * Description: | ||
3740 | * Ends all I/O on a request. The request must already have been | ||
3741 | * dequeued using blkdev_dequeue_request(), as is normally the case | ||
3742 | * for most drivers. | ||
3743 | * | ||
3744 | **/ | ||
3745 | void end_dequeued_request(struct request *rq, int uptodate) | ||
3746 | { | ||
3747 | __end_request(rq, uptodate, rq_byte_size(rq), 0); | ||
3748 | } | ||
3749 | EXPORT_SYMBOL(end_dequeued_request); | ||
3750 | |||
3751 | |||
3752 | /** | ||
3753 | * end_request - end I/O on the current segment of the request | ||
3754 | * @req: the request being processed | ||
3755 | * @uptodate: error value or 0/1 uptodate flag | ||
3756 | * | ||
3757 | * Description: | ||
3758 | * Ends I/O on the current segment of a request. If that is the only | ||
3759 | * remaining segment, the request is also completed and freed. | ||
3760 | * | ||
3761 | * This is a remnant of how older block drivers handled IO completions. | ||
3762 | * Modern drivers typically end IO on the full request in one go, unless | ||
3763 | * they have a residual value to account for. For that case this function | ||
3764 | * isn't really useful, unless the residual just happens to be the | ||
3765 | * full current segment. In other words, don't use this function in new | ||
3766 | * code. Either use end_request_completely(), or the | ||
3767 | * end_that_request_chunk() (along with end_that_request_last()) for | ||
3768 | * partial completions. | ||
3769 | * | ||
3770 | **/ | ||
3771 | void end_request(struct request *req, int uptodate) | ||
3772 | { | ||
3773 | __end_request(req, uptodate, req->hard_cur_sectors << 9, 1); | ||
3774 | } | ||
3775 | EXPORT_SYMBOL(end_request); | ||
3776 | |||
3777 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | ||
3778 | struct bio *bio) | ||
3779 | { | ||
3780 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ | ||
3781 | rq->cmd_flags |= (bio->bi_rw & 3); | ||
3782 | |||
3783 | rq->nr_phys_segments = bio_phys_segments(q, bio); | ||
3784 | rq->nr_hw_segments = bio_hw_segments(q, bio); | ||
3785 | rq->current_nr_sectors = bio_cur_sectors(bio); | ||
3786 | rq->hard_cur_sectors = rq->current_nr_sectors; | ||
3787 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | ||
3788 | rq->buffer = bio_data(bio); | ||
3789 | rq->data_len = bio->bi_size; | ||
3790 | |||
3791 | rq->bio = rq->biotail = bio; | ||
3792 | |||
3793 | if (bio->bi_bdev) | ||
3794 | rq->rq_disk = bio->bi_bdev->bd_disk; | ||
3795 | } | ||
3796 | |||
3797 | int kblockd_schedule_work(struct work_struct *work) | ||
3798 | { | ||
3799 | return queue_work(kblockd_workqueue, work); | ||
3800 | } | ||
3801 | |||
3802 | EXPORT_SYMBOL(kblockd_schedule_work); | ||
3803 | |||
3804 | void kblockd_flush_work(struct work_struct *work) | ||
3805 | { | ||
3806 | cancel_work_sync(work); | ||
3807 | } | ||
3808 | EXPORT_SYMBOL(kblockd_flush_work); | ||
3809 | |||
3810 | int __init blk_dev_init(void) | ||
3811 | { | ||
3812 | int i; | ||
3813 | |||
3814 | kblockd_workqueue = create_workqueue("kblockd"); | ||
3815 | if (!kblockd_workqueue) | ||
3816 | panic("Failed to create kblockd\n"); | ||
3817 | |||
3818 | request_cachep = kmem_cache_create("blkdev_requests", | ||
3819 | sizeof(struct request), 0, SLAB_PANIC, NULL); | ||
3820 | |||
3821 | requestq_cachep = kmem_cache_create("blkdev_queue", | ||
3822 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | ||
3823 | |||
3824 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | ||
3825 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); | ||
3826 | |||
3827 | for_each_possible_cpu(i) | ||
3828 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | ||
3829 | |||
3830 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); | ||
3831 | register_hotcpu_notifier(&blk_cpu_notifier); | ||
3832 | |||
3833 | blk_max_low_pfn = max_low_pfn - 1; | ||
3834 | blk_max_pfn = max_pfn - 1; | ||
3835 | |||
3836 | return 0; | ||
3837 | } | ||
3838 | |||
3839 | /* | ||
3840 | * IO Context helper functions | ||
3841 | */ | ||
3842 | void put_io_context(struct io_context *ioc) | ||
3843 | { | ||
3844 | if (ioc == NULL) | ||
3845 | return; | ||
3846 | |||
3847 | BUG_ON(atomic_read(&ioc->refcount) == 0); | ||
3848 | |||
3849 | if (atomic_dec_and_test(&ioc->refcount)) { | ||
3850 | struct cfq_io_context *cic; | ||
3851 | |||
3852 | rcu_read_lock(); | ||
3853 | if (ioc->aic && ioc->aic->dtor) | ||
3854 | ioc->aic->dtor(ioc->aic); | ||
3855 | if (ioc->cic_root.rb_node != NULL) { | ||
3856 | struct rb_node *n = rb_first(&ioc->cic_root); | ||
3857 | |||
3858 | cic = rb_entry(n, struct cfq_io_context, rb_node); | ||
3859 | cic->dtor(ioc); | ||
3860 | } | ||
3861 | rcu_read_unlock(); | ||
3862 | |||
3863 | kmem_cache_free(iocontext_cachep, ioc); | ||
3864 | } | ||
3865 | } | ||
3866 | EXPORT_SYMBOL(put_io_context); | ||
3867 | |||
3868 | /* Called by the exitting task */ | ||
3869 | void exit_io_context(void) | ||
3870 | { | ||
3871 | struct io_context *ioc; | ||
3872 | struct cfq_io_context *cic; | ||
3873 | |||
3874 | task_lock(current); | ||
3875 | ioc = current->io_context; | ||
3876 | current->io_context = NULL; | ||
3877 | task_unlock(current); | ||
3878 | |||
3879 | ioc->task = NULL; | ||
3880 | if (ioc->aic && ioc->aic->exit) | ||
3881 | ioc->aic->exit(ioc->aic); | ||
3882 | if (ioc->cic_root.rb_node != NULL) { | ||
3883 | cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); | ||
3884 | cic->exit(ioc); | ||
3885 | } | ||
3886 | |||
3887 | put_io_context(ioc); | ||
3888 | } | ||
3889 | |||
3890 | /* | ||
3891 | * If the current task has no IO context then create one and initialise it. | ||
3892 | * Otherwise, return its existing IO context. | ||
3893 | * | ||
3894 | * This returned IO context doesn't have a specifically elevated refcount, | ||
3895 | * but since the current task itself holds a reference, the context can be | ||
3896 | * used in general code, so long as it stays within `current` context. | ||
3897 | */ | ||
3898 | static struct io_context *current_io_context(gfp_t gfp_flags, int node) | ||
3899 | { | ||
3900 | struct task_struct *tsk = current; | ||
3901 | struct io_context *ret; | ||
3902 | |||
3903 | ret = tsk->io_context; | ||
3904 | if (likely(ret)) | ||
3905 | return ret; | ||
3906 | |||
3907 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | ||
3908 | if (ret) { | ||
3909 | atomic_set(&ret->refcount, 1); | ||
3910 | ret->task = current; | ||
3911 | ret->ioprio_changed = 0; | ||
3912 | ret->last_waited = jiffies; /* doesn't matter... */ | ||
3913 | ret->nr_batch_requests = 0; /* because this is 0 */ | ||
3914 | ret->aic = NULL; | ||
3915 | ret->cic_root.rb_node = NULL; | ||
3916 | ret->ioc_data = NULL; | ||
3917 | /* make sure set_task_ioprio() sees the settings above */ | ||
3918 | smp_wmb(); | ||
3919 | tsk->io_context = ret; | ||
3920 | } | ||
3921 | |||
3922 | return ret; | ||
3923 | } | ||
3924 | |||
3925 | /* | ||
3926 | * If the current task has no IO context then create one and initialise it. | ||
3927 | * If it does have a context, take a ref on it. | ||
3928 | * | ||
3929 | * This is always called in the context of the task which submitted the I/O. | ||
3930 | */ | ||
3931 | struct io_context *get_io_context(gfp_t gfp_flags, int node) | ||
3932 | { | ||
3933 | struct io_context *ret; | ||
3934 | ret = current_io_context(gfp_flags, node); | ||
3935 | if (likely(ret)) | ||
3936 | atomic_inc(&ret->refcount); | ||
3937 | return ret; | ||
3938 | } | ||
3939 | EXPORT_SYMBOL(get_io_context); | ||
3940 | |||
3941 | void copy_io_context(struct io_context **pdst, struct io_context **psrc) | ||
3942 | { | ||
3943 | struct io_context *src = *psrc; | ||
3944 | struct io_context *dst = *pdst; | ||
3945 | |||
3946 | if (src) { | ||
3947 | BUG_ON(atomic_read(&src->refcount) == 0); | ||
3948 | atomic_inc(&src->refcount); | ||
3949 | put_io_context(dst); | ||
3950 | *pdst = src; | ||
3951 | } | ||
3952 | } | ||
3953 | EXPORT_SYMBOL(copy_io_context); | ||
3954 | |||
3955 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2) | ||
3956 | { | ||
3957 | struct io_context *temp; | ||
3958 | temp = *ioc1; | ||
3959 | *ioc1 = *ioc2; | ||
3960 | *ioc2 = temp; | ||
3961 | } | ||
3962 | EXPORT_SYMBOL(swap_io_context); | ||
3963 | |||
3964 | /* | ||
3965 | * sysfs parts below | ||
3966 | */ | ||
3967 | struct queue_sysfs_entry { | ||
3968 | struct attribute attr; | ||
3969 | ssize_t (*show)(struct request_queue *, char *); | ||
3970 | ssize_t (*store)(struct request_queue *, const char *, size_t); | ||
3971 | }; | ||
3972 | |||
3973 | static ssize_t | ||
3974 | queue_var_show(unsigned int var, char *page) | ||
3975 | { | ||
3976 | return sprintf(page, "%d\n", var); | ||
3977 | } | ||
3978 | |||
3979 | static ssize_t | ||
3980 | queue_var_store(unsigned long *var, const char *page, size_t count) | ||
3981 | { | ||
3982 | char *p = (char *) page; | ||
3983 | |||
3984 | *var = simple_strtoul(p, &p, 10); | ||
3985 | return count; | ||
3986 | } | ||
3987 | |||
3988 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | ||
3989 | { | ||
3990 | return queue_var_show(q->nr_requests, (page)); | ||
3991 | } | ||
3992 | |||
3993 | static ssize_t | ||
3994 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | ||
3995 | { | ||
3996 | struct request_list *rl = &q->rq; | ||
3997 | unsigned long nr; | ||
3998 | int ret = queue_var_store(&nr, page, count); | ||
3999 | if (nr < BLKDEV_MIN_RQ) | ||
4000 | nr = BLKDEV_MIN_RQ; | ||
4001 | |||
4002 | spin_lock_irq(q->queue_lock); | ||
4003 | q->nr_requests = nr; | ||
4004 | blk_queue_congestion_threshold(q); | ||
4005 | |||
4006 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | ||
4007 | blk_set_queue_congested(q, READ); | ||
4008 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) | ||
4009 | blk_clear_queue_congested(q, READ); | ||
4010 | |||
4011 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) | ||
4012 | blk_set_queue_congested(q, WRITE); | ||
4013 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) | ||
4014 | blk_clear_queue_congested(q, WRITE); | ||
4015 | |||
4016 | if (rl->count[READ] >= q->nr_requests) { | ||
4017 | blk_set_queue_full(q, READ); | ||
4018 | } else if (rl->count[READ]+1 <= q->nr_requests) { | ||
4019 | blk_clear_queue_full(q, READ); | ||
4020 | wake_up(&rl->wait[READ]); | ||
4021 | } | ||
4022 | |||
4023 | if (rl->count[WRITE] >= q->nr_requests) { | ||
4024 | blk_set_queue_full(q, WRITE); | ||
4025 | } else if (rl->count[WRITE]+1 <= q->nr_requests) { | ||
4026 | blk_clear_queue_full(q, WRITE); | ||
4027 | wake_up(&rl->wait[WRITE]); | ||
4028 | } | ||
4029 | spin_unlock_irq(q->queue_lock); | ||
4030 | return ret; | ||
4031 | } | ||
4032 | |||
4033 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | ||
4034 | { | ||
4035 | int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); | ||
4036 | |||
4037 | return queue_var_show(ra_kb, (page)); | ||
4038 | } | ||
4039 | |||
4040 | static ssize_t | ||
4041 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | ||
4042 | { | ||
4043 | unsigned long ra_kb; | ||
4044 | ssize_t ret = queue_var_store(&ra_kb, page, count); | ||
4045 | |||
4046 | spin_lock_irq(q->queue_lock); | ||
4047 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); | ||
4048 | spin_unlock_irq(q->queue_lock); | ||
4049 | |||
4050 | return ret; | ||
4051 | } | ||
4052 | |||
4053 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | ||
4054 | { | ||
4055 | int max_sectors_kb = q->max_sectors >> 1; | ||
4056 | |||
4057 | return queue_var_show(max_sectors_kb, (page)); | ||
4058 | } | ||
4059 | |||
4060 | static ssize_t | ||
4061 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | ||
4062 | { | ||
4063 | unsigned long max_sectors_kb, | ||
4064 | max_hw_sectors_kb = q->max_hw_sectors >> 1, | ||
4065 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); | ||
4066 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | ||
4067 | |||
4068 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | ||
4069 | return -EINVAL; | ||
4070 | /* | ||
4071 | * Take the queue lock to update the readahead and max_sectors | ||
4072 | * values synchronously: | ||
4073 | */ | ||
4074 | spin_lock_irq(q->queue_lock); | ||
4075 | q->max_sectors = max_sectors_kb << 1; | ||
4076 | spin_unlock_irq(q->queue_lock); | ||
4077 | |||
4078 | return ret; | ||
4079 | } | ||
4080 | |||
4081 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | ||
4082 | { | ||
4083 | int max_hw_sectors_kb = q->max_hw_sectors >> 1; | ||
4084 | |||
4085 | return queue_var_show(max_hw_sectors_kb, (page)); | ||
4086 | } | ||
4087 | |||
4088 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) | ||
4089 | { | ||
4090 | return queue_var_show(q->max_phys_segments, page); | ||
4091 | } | ||
4092 | |||
4093 | static ssize_t queue_max_segments_store(struct request_queue *q, | ||
4094 | const char *page, size_t count) | ||
4095 | { | ||
4096 | unsigned long segments; | ||
4097 | ssize_t ret = queue_var_store(&segments, page, count); | ||
4098 | |||
4099 | spin_lock_irq(q->queue_lock); | ||
4100 | q->max_phys_segments = segments; | ||
4101 | spin_unlock_irq(q->queue_lock); | ||
4102 | |||
4103 | return ret; | ||
4104 | } | ||
4105 | static struct queue_sysfs_entry queue_requests_entry = { | ||
4106 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | ||
4107 | .show = queue_requests_show, | ||
4108 | .store = queue_requests_store, | ||
4109 | }; | ||
4110 | |||
4111 | static struct queue_sysfs_entry queue_ra_entry = { | ||
4112 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | ||
4113 | .show = queue_ra_show, | ||
4114 | .store = queue_ra_store, | ||
4115 | }; | ||
4116 | |||
4117 | static struct queue_sysfs_entry queue_max_sectors_entry = { | ||
4118 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | ||
4119 | .show = queue_max_sectors_show, | ||
4120 | .store = queue_max_sectors_store, | ||
4121 | }; | ||
4122 | |||
4123 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | ||
4124 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | ||
4125 | .show = queue_max_hw_sectors_show, | ||
4126 | }; | ||
4127 | |||
4128 | static struct queue_sysfs_entry queue_max_segments_entry = { | ||
4129 | .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR }, | ||
4130 | .show = queue_max_segments_show, | ||
4131 | .store = queue_max_segments_store, | ||
4132 | }; | ||
4133 | |||
4134 | static struct queue_sysfs_entry queue_iosched_entry = { | ||
4135 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | ||
4136 | .show = elv_iosched_show, | ||
4137 | .store = elv_iosched_store, | ||
4138 | }; | ||
4139 | |||
4140 | static struct attribute *default_attrs[] = { | ||
4141 | &queue_requests_entry.attr, | ||
4142 | &queue_ra_entry.attr, | ||
4143 | &queue_max_hw_sectors_entry.attr, | ||
4144 | &queue_max_sectors_entry.attr, | ||
4145 | &queue_max_segments_entry.attr, | ||
4146 | &queue_iosched_entry.attr, | ||
4147 | NULL, | ||
4148 | }; | ||
4149 | |||
4150 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | ||
4151 | |||
4152 | static ssize_t | ||
4153 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
4154 | { | ||
4155 | struct queue_sysfs_entry *entry = to_queue(attr); | ||
4156 | struct request_queue *q = | ||
4157 | container_of(kobj, struct request_queue, kobj); | ||
4158 | ssize_t res; | ||
4159 | |||
4160 | if (!entry->show) | ||
4161 | return -EIO; | ||
4162 | mutex_lock(&q->sysfs_lock); | ||
4163 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | ||
4164 | mutex_unlock(&q->sysfs_lock); | ||
4165 | return -ENOENT; | ||
4166 | } | ||
4167 | res = entry->show(q, page); | ||
4168 | mutex_unlock(&q->sysfs_lock); | ||
4169 | return res; | ||
4170 | } | ||
4171 | |||
4172 | static ssize_t | ||
4173 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | ||
4174 | const char *page, size_t length) | ||
4175 | { | ||
4176 | struct queue_sysfs_entry *entry = to_queue(attr); | ||
4177 | struct request_queue *q = container_of(kobj, struct request_queue, kobj); | ||
4178 | |||
4179 | ssize_t res; | ||
4180 | |||
4181 | if (!entry->store) | ||
4182 | return -EIO; | ||
4183 | mutex_lock(&q->sysfs_lock); | ||
4184 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | ||
4185 | mutex_unlock(&q->sysfs_lock); | ||
4186 | return -ENOENT; | ||
4187 | } | ||
4188 | res = entry->store(q, page, length); | ||
4189 | mutex_unlock(&q->sysfs_lock); | ||
4190 | return res; | ||
4191 | } | ||
4192 | |||
4193 | static struct sysfs_ops queue_sysfs_ops = { | ||
4194 | .show = queue_attr_show, | ||
4195 | .store = queue_attr_store, | ||
4196 | }; | ||
4197 | |||
4198 | static struct kobj_type queue_ktype = { | ||
4199 | .sysfs_ops = &queue_sysfs_ops, | ||
4200 | .default_attrs = default_attrs, | ||
4201 | .release = blk_release_queue, | ||
4202 | }; | ||
4203 | |||
4204 | int blk_register_queue(struct gendisk *disk) | ||
4205 | { | ||
4206 | int ret; | ||
4207 | |||
4208 | struct request_queue *q = disk->queue; | ||
4209 | |||
4210 | if (!q || !q->request_fn) | ||
4211 | return -ENXIO; | ||
4212 | |||
4213 | q->kobj.parent = kobject_get(&disk->kobj); | ||
4214 | |||
4215 | ret = kobject_add(&q->kobj); | ||
4216 | if (ret < 0) | ||
4217 | return ret; | ||
4218 | |||
4219 | kobject_uevent(&q->kobj, KOBJ_ADD); | ||
4220 | |||
4221 | ret = elv_register_queue(q); | ||
4222 | if (ret) { | ||
4223 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | ||
4224 | kobject_del(&q->kobj); | ||
4225 | return ret; | ||
4226 | } | ||
4227 | |||
4228 | return 0; | ||
4229 | } | ||
4230 | |||
4231 | void blk_unregister_queue(struct gendisk *disk) | ||
4232 | { | ||
4233 | struct request_queue *q = disk->queue; | ||
4234 | |||
4235 | if (q && q->request_fn) { | ||
4236 | elv_unregister_queue(q); | ||
4237 | |||
4238 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | ||
4239 | kobject_del(&q->kobj); | ||
4240 | kobject_put(&disk->kobj); | ||
4241 | } | ||
4242 | } | ||
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 7563d8aa3944..c23e02969650 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -101,7 +101,9 @@ static struct elevator_type elevator_noop = { | |||
101 | 101 | ||
102 | static int __init noop_init(void) | 102 | static int __init noop_init(void) |
103 | { | 103 | { |
104 | return elv_register(&elevator_noop); | 104 | elv_register(&elevator_noop); |
105 | |||
106 | return 0; | ||
105 | } | 107 | } |
106 | 108 | ||
107 | static void __exit noop_exit(void) | 109 | static void __exit noop_exit(void) |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 91c73224f4c6..9675b34638d4 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -230,7 +230,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, | |||
230 | rq->cmd_len = hdr->cmd_len; | 230 | rq->cmd_len = hdr->cmd_len; |
231 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 231 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
232 | 232 | ||
233 | rq->timeout = (hdr->timeout * HZ) / 1000; | 233 | rq->timeout = msecs_to_jiffies(hdr->timeout); |
234 | if (!rq->timeout) | 234 | if (!rq->timeout) |
235 | rq->timeout = q->sg_timeout; | 235 | rq->timeout = q->sg_timeout; |
236 | if (!rq->timeout) | 236 | if (!rq->timeout) |
@@ -366,7 +366,7 @@ static int sg_io(struct file *file, struct request_queue *q, | |||
366 | */ | 366 | */ |
367 | blk_execute_rq(q, bd_disk, rq, 0); | 367 | blk_execute_rq(q, bd_disk, rq, 0); |
368 | 368 | ||
369 | hdr->duration = ((jiffies - start_time) * 1000) / HZ; | 369 | hdr->duration = jiffies_to_msecs(jiffies - start_time); |
370 | 370 | ||
371 | return blk_complete_sghdr_rq(rq, hdr, bio); | 371 | return blk_complete_sghdr_rq(rq, hdr, bio); |
372 | out: | 372 | out: |