diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-30 21:37:12 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-30 21:37:12 -0500 |
commit | 23fd07750a789a66fe88cf173d52a18f1a387da4 (patch) | |
tree | 06fdd6df35fdb835abdaa9b754d62f6b84b97250 /drivers/block | |
parent | bd787d438a59266af3c9f6351644c85ef1dd21fe (diff) | |
parent | ed28f96ac1960f30f818374d65be71d2fdf811b0 (diff) |
Merge ../linux-2.6 by hand
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/Kconfig.iosched | 28 | ||||
-rw-r--r-- | drivers/block/aoe/aoe.h | 2 | ||||
-rw-r--r-- | drivers/block/aoe/aoechr.c | 2 | ||||
-rw-r--r-- | drivers/block/aoe/aoecmd.c | 15 | ||||
-rw-r--r-- | drivers/block/as-iosched.c | 330 | ||||
-rw-r--r-- | drivers/block/cciss_scsi.c | 10 | ||||
-rw-r--r-- | drivers/block/cfq-iosched.c | 394 | ||||
-rw-r--r-- | drivers/block/deadline-iosched.c | 125 | ||||
-rw-r--r-- | drivers/block/elevator.c | 391 | ||||
-rw-r--r-- | drivers/block/genhd.c | 25 | ||||
-rw-r--r-- | drivers/block/ll_rw_blk.c | 193 | ||||
-rw-r--r-- | drivers/block/loop.c | 2 | ||||
-rw-r--r-- | drivers/block/noop-iosched.c | 48 | ||||
-rw-r--r-- | drivers/block/paride/paride.c | 1 | ||||
-rw-r--r-- | drivers/block/paride/pf.c | 4 | ||||
-rw-r--r-- | drivers/block/paride/pg.c | 4 | ||||
-rw-r--r-- | drivers/block/paride/pt.c | 5 | ||||
-rw-r--r-- | drivers/block/rd.c | 2 | ||||
-rw-r--r-- | drivers/block/sx8.c | 51 | ||||
-rw-r--r-- | drivers/block/ub.c | 4 |
20 files changed, 613 insertions, 1023 deletions
diff --git a/drivers/block/Kconfig.iosched b/drivers/block/Kconfig.iosched index 6070a480600b..5b90d2fa63b8 100644 --- a/drivers/block/Kconfig.iosched +++ b/drivers/block/Kconfig.iosched | |||
@@ -38,4 +38,32 @@ config IOSCHED_CFQ | |||
38 | among all processes in the system. It should provide a fair | 38 | among all processes in the system. It should provide a fair |
39 | working environment, suitable for desktop systems. | 39 | working environment, suitable for desktop systems. |
40 | 40 | ||
41 | choice | ||
42 | prompt "Default I/O scheduler" | ||
43 | default DEFAULT_AS | ||
44 | help | ||
45 | Select the I/O scheduler which will be used by default for all | ||
46 | block devices. | ||
47 | |||
48 | config DEFAULT_AS | ||
49 | bool "Anticipatory" if IOSCHED_AS | ||
50 | |||
51 | config DEFAULT_DEADLINE | ||
52 | bool "Deadline" if IOSCHED_DEADLINE | ||
53 | |||
54 | config DEFAULT_CFQ | ||
55 | bool "CFQ" if IOSCHED_CFQ | ||
56 | |||
57 | config DEFAULT_NOOP | ||
58 | bool "No-op" | ||
59 | |||
60 | endchoice | ||
61 | |||
62 | config DEFAULT_IOSCHED | ||
63 | string | ||
64 | default "anticipatory" if DEFAULT_AS | ||
65 | default "deadline" if DEFAULT_DEADLINE | ||
66 | default "cfq" if DEFAULT_CFQ | ||
67 | default "noop" if DEFAULT_NOOP | ||
68 | |||
41 | endmenu | 69 | endmenu |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 0e9e586e9ba3..881c48d941b7 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */ |
2 | #define VERSION "12" | 2 | #define VERSION "14" |
3 | #define AOE_MAJOR 152 | 3 | #define AOE_MAJOR 152 |
4 | #define DEVICE_NAME "aoe" | 4 | #define DEVICE_NAME "aoe" |
5 | 5 | ||
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index 45a243096187..41ae0ede619a 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c | |||
@@ -224,7 +224,7 @@ aoechr_init(void) | |||
224 | return PTR_ERR(aoe_class); | 224 | return PTR_ERR(aoe_class); |
225 | } | 225 | } |
226 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) | 226 | for (i = 0; i < ARRAY_SIZE(chardevs); ++i) |
227 | class_device_create(aoe_class, | 227 | class_device_create(aoe_class, NULL, |
228 | MKDEV(AOE_MAJOR, chardevs[i].minor), | 228 | MKDEV(AOE_MAJOR, chardevs[i].minor), |
229 | NULL, chardevs[i].name); | 229 | NULL, chardevs[i].name); |
230 | 230 | ||
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index b5be4b7d7b5b..5c9c7c1a3d4c 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/skbuff.h> | 9 | #include <linux/skbuff.h> |
10 | #include <linux/netdevice.h> | 10 | #include <linux/netdevice.h> |
11 | #include <asm/unaligned.h> | ||
11 | #include "aoe.h" | 12 | #include "aoe.h" |
12 | 13 | ||
13 | #define TIMERTICK (HZ / 10) | 14 | #define TIMERTICK (HZ / 10) |
@@ -311,16 +312,16 @@ ataid_complete(struct aoedev *d, unsigned char *id) | |||
311 | u16 n; | 312 | u16 n; |
312 | 313 | ||
313 | /* word 83: command set supported */ | 314 | /* word 83: command set supported */ |
314 | n = le16_to_cpup((__le16 *) &id[83<<1]); | 315 | n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1])); |
315 | 316 | ||
316 | /* word 86: command set/feature enabled */ | 317 | /* word 86: command set/feature enabled */ |
317 | n |= le16_to_cpup((__le16 *) &id[86<<1]); | 318 | n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1])); |
318 | 319 | ||
319 | if (n & (1<<10)) { /* bit 10: LBA 48 */ | 320 | if (n & (1<<10)) { /* bit 10: LBA 48 */ |
320 | d->flags |= DEVFL_EXT; | 321 | d->flags |= DEVFL_EXT; |
321 | 322 | ||
322 | /* word 100: number lba48 sectors */ | 323 | /* word 100: number lba48 sectors */ |
323 | ssize = le64_to_cpup((__le64 *) &id[100<<1]); | 324 | ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1])); |
324 | 325 | ||
325 | /* set as in ide-disk.c:init_idedisk_capacity */ | 326 | /* set as in ide-disk.c:init_idedisk_capacity */ |
326 | d->geo.cylinders = ssize; | 327 | d->geo.cylinders = ssize; |
@@ -331,12 +332,12 @@ ataid_complete(struct aoedev *d, unsigned char *id) | |||
331 | d->flags &= ~DEVFL_EXT; | 332 | d->flags &= ~DEVFL_EXT; |
332 | 333 | ||
333 | /* number lba28 sectors */ | 334 | /* number lba28 sectors */ |
334 | ssize = le32_to_cpup((__le32 *) &id[60<<1]); | 335 | ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1])); |
335 | 336 | ||
336 | /* NOTE: obsolete in ATA 6 */ | 337 | /* NOTE: obsolete in ATA 6 */ |
337 | d->geo.cylinders = le16_to_cpup((__le16 *) &id[54<<1]); | 338 | d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1])); |
338 | d->geo.heads = le16_to_cpup((__le16 *) &id[55<<1]); | 339 | d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1])); |
339 | d->geo.sectors = le16_to_cpup((__le16 *) &id[56<<1]); | 340 | d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1])); |
340 | } | 341 | } |
341 | d->ssize = ssize; | 342 | d->ssize = ssize; |
342 | d->geo.start = 0; | 343 | d->geo.start = 0; |
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c index 95c0a3690b0f..c6744ff38294 100644 --- a/drivers/block/as-iosched.c +++ b/drivers/block/as-iosched.c | |||
@@ -98,7 +98,6 @@ struct as_data { | |||
98 | 98 | ||
99 | struct as_rq *next_arq[2]; /* next in sort order */ | 99 | struct as_rq *next_arq[2]; /* next in sort order */ |
100 | sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ | 100 | sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ |
101 | struct list_head *dispatch; /* driver dispatch queue */ | ||
102 | struct list_head *hash; /* request hash */ | 101 | struct list_head *hash; /* request hash */ |
103 | 102 | ||
104 | unsigned long exit_prob; /* probability a task will exit while | 103 | unsigned long exit_prob; /* probability a task will exit while |
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void) | |||
239 | return ioc; | 238 | return ioc; |
240 | } | 239 | } |
241 | 240 | ||
241 | static void as_put_io_context(struct as_rq *arq) | ||
242 | { | ||
243 | struct as_io_context *aic; | ||
244 | |||
245 | if (unlikely(!arq->io_context)) | ||
246 | return; | ||
247 | |||
248 | aic = arq->io_context->aic; | ||
249 | |||
250 | if (arq->is_sync == REQ_SYNC && aic) { | ||
251 | spin_lock(&aic->lock); | ||
252 | set_bit(AS_TASK_IORUNNING, &aic->state); | ||
253 | aic->last_end_request = jiffies; | ||
254 | spin_unlock(&aic->lock); | ||
255 | } | ||
256 | |||
257 | put_io_context(arq->io_context); | ||
258 | } | ||
259 | |||
242 | /* | 260 | /* |
243 | * the back merge hash support functions | 261 | * the back merge hash support functions |
244 | */ | 262 | */ |
@@ -261,14 +279,6 @@ static inline void as_del_arq_hash(struct as_rq *arq) | |||
261 | __as_del_arq_hash(arq); | 279 | __as_del_arq_hash(arq); |
262 | } | 280 | } |
263 | 281 | ||
264 | static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq) | ||
265 | { | ||
266 | as_del_arq_hash(arq); | ||
267 | |||
268 | if (q->last_merge == arq->request) | ||
269 | q->last_merge = NULL; | ||
270 | } | ||
271 | |||
272 | static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) | 282 | static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) |
273 | { | 283 | { |
274 | struct request *rq = arq->request; | 284 | struct request *rq = arq->request; |
@@ -312,7 +322,7 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) | |||
312 | BUG_ON(!arq->on_hash); | 322 | BUG_ON(!arq->on_hash); |
313 | 323 | ||
314 | if (!rq_mergeable(__rq)) { | 324 | if (!rq_mergeable(__rq)) { |
315 | as_remove_merge_hints(ad->q, arq); | 325 | as_del_arq_hash(arq); |
316 | continue; | 326 | continue; |
317 | } | 327 | } |
318 | 328 | ||
@@ -950,23 +960,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq) | |||
950 | 960 | ||
951 | WARN_ON(!list_empty(&rq->queuelist)); | 961 | WARN_ON(!list_empty(&rq->queuelist)); |
952 | 962 | ||
953 | if (arq->state == AS_RQ_PRESCHED) { | ||
954 | WARN_ON(arq->io_context); | ||
955 | goto out; | ||
956 | } | ||
957 | |||
958 | if (arq->state == AS_RQ_MERGED) | ||
959 | goto out_ioc; | ||
960 | |||
961 | if (arq->state != AS_RQ_REMOVED) { | 963 | if (arq->state != AS_RQ_REMOVED) { |
962 | printk("arq->state %d\n", arq->state); | 964 | printk("arq->state %d\n", arq->state); |
963 | WARN_ON(1); | 965 | WARN_ON(1); |
964 | goto out; | 966 | goto out; |
965 | } | 967 | } |
966 | 968 | ||
967 | if (!blk_fs_request(rq)) | ||
968 | goto out; | ||
969 | |||
970 | if (ad->changed_batch && ad->nr_dispatched == 1) { | 969 | if (ad->changed_batch && ad->nr_dispatched == 1) { |
971 | kblockd_schedule_work(&ad->antic_work); | 970 | kblockd_schedule_work(&ad->antic_work); |
972 | ad->changed_batch = 0; | 971 | ad->changed_batch = 0; |
@@ -1001,21 +1000,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq) | |||
1001 | } | 1000 | } |
1002 | } | 1001 | } |
1003 | 1002 | ||
1004 | out_ioc: | 1003 | as_put_io_context(arq); |
1005 | if (!arq->io_context) | ||
1006 | goto out; | ||
1007 | |||
1008 | if (arq->is_sync == REQ_SYNC) { | ||
1009 | struct as_io_context *aic = arq->io_context->aic; | ||
1010 | if (aic) { | ||
1011 | spin_lock(&aic->lock); | ||
1012 | set_bit(AS_TASK_IORUNNING, &aic->state); | ||
1013 | aic->last_end_request = jiffies; | ||
1014 | spin_unlock(&aic->lock); | ||
1015 | } | ||
1016 | } | ||
1017 | |||
1018 | put_io_context(arq->io_context); | ||
1019 | out: | 1004 | out: |
1020 | arq->state = AS_RQ_POSTSCHED; | 1005 | arq->state = AS_RQ_POSTSCHED; |
1021 | } | 1006 | } |
@@ -1047,73 +1032,11 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq) | |||
1047 | ad->next_arq[data_dir] = as_find_next_arq(ad, arq); | 1032 | ad->next_arq[data_dir] = as_find_next_arq(ad, arq); |
1048 | 1033 | ||
1049 | list_del_init(&arq->fifo); | 1034 | list_del_init(&arq->fifo); |
1050 | as_remove_merge_hints(q, arq); | 1035 | as_del_arq_hash(arq); |
1051 | as_del_arq_rb(ad, arq); | 1036 | as_del_arq_rb(ad, arq); |
1052 | } | 1037 | } |
1053 | 1038 | ||
1054 | /* | 1039 | /* |
1055 | * as_remove_dispatched_request is called to remove a request which has gone | ||
1056 | * to the dispatch list. | ||
1057 | */ | ||
1058 | static void as_remove_dispatched_request(request_queue_t *q, struct request *rq) | ||
1059 | { | ||
1060 | struct as_rq *arq = RQ_DATA(rq); | ||
1061 | struct as_io_context *aic; | ||
1062 | |||
1063 | if (!arq) { | ||
1064 | WARN_ON(1); | ||
1065 | return; | ||
1066 | } | ||
1067 | |||
1068 | WARN_ON(arq->state != AS_RQ_DISPATCHED); | ||
1069 | WARN_ON(ON_RB(&arq->rb_node)); | ||
1070 | if (arq->io_context && arq->io_context->aic) { | ||
1071 | aic = arq->io_context->aic; | ||
1072 | if (aic) { | ||
1073 | WARN_ON(!atomic_read(&aic->nr_dispatched)); | ||
1074 | atomic_dec(&aic->nr_dispatched); | ||
1075 | } | ||
1076 | } | ||
1077 | } | ||
1078 | |||
1079 | /* | ||
1080 | * as_remove_request is called when a driver has finished with a request. | ||
1081 | * This should be only called for dispatched requests, but for some reason | ||
1082 | * a POWER4 box running hwscan it does not. | ||
1083 | */ | ||
1084 | static void as_remove_request(request_queue_t *q, struct request *rq) | ||
1085 | { | ||
1086 | struct as_rq *arq = RQ_DATA(rq); | ||
1087 | |||
1088 | if (unlikely(arq->state == AS_RQ_NEW)) | ||
1089 | goto out; | ||
1090 | |||
1091 | if (ON_RB(&arq->rb_node)) { | ||
1092 | if (arq->state != AS_RQ_QUEUED) { | ||
1093 | printk("arq->state %d\n", arq->state); | ||
1094 | WARN_ON(1); | ||
1095 | goto out; | ||
1096 | } | ||
1097 | /* | ||
1098 | * We'll lose the aliased request(s) here. I don't think this | ||
1099 | * will ever happen, but if it does, hopefully someone will | ||
1100 | * report it. | ||
1101 | */ | ||
1102 | WARN_ON(!list_empty(&rq->queuelist)); | ||
1103 | as_remove_queued_request(q, rq); | ||
1104 | } else { | ||
1105 | if (arq->state != AS_RQ_DISPATCHED) { | ||
1106 | printk("arq->state %d\n", arq->state); | ||
1107 | WARN_ON(1); | ||
1108 | goto out; | ||
1109 | } | ||
1110 | as_remove_dispatched_request(q, rq); | ||
1111 | } | ||
1112 | out: | ||
1113 | arq->state = AS_RQ_REMOVED; | ||
1114 | } | ||
1115 | |||
1116 | /* | ||
1117 | * as_fifo_expired returns 0 if there are no expired reads on the fifo, | 1040 | * as_fifo_expired returns 0 if there are no expired reads on the fifo, |
1118 | * 1 otherwise. It is ratelimited so that we only perform the check once per | 1041 | * 1 otherwise. It is ratelimited so that we only perform the check once per |
1119 | * `fifo_expire' interval. Otherwise a large number of expired requests | 1042 | * `fifo_expire' interval. Otherwise a large number of expired requests |
@@ -1165,7 +1088,6 @@ static inline int as_batch_expired(struct as_data *ad) | |||
1165 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) | 1088 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) |
1166 | { | 1089 | { |
1167 | struct request *rq = arq->request; | 1090 | struct request *rq = arq->request; |
1168 | struct list_head *insert; | ||
1169 | const int data_dir = arq->is_sync; | 1091 | const int data_dir = arq->is_sync; |
1170 | 1092 | ||
1171 | BUG_ON(!ON_RB(&arq->rb_node)); | 1093 | BUG_ON(!ON_RB(&arq->rb_node)); |
@@ -1198,13 +1120,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) | |||
1198 | /* | 1120 | /* |
1199 | * take it off the sort and fifo list, add to dispatch queue | 1121 | * take it off the sort and fifo list, add to dispatch queue |
1200 | */ | 1122 | */ |
1201 | insert = ad->dispatch->prev; | ||
1202 | |||
1203 | while (!list_empty(&rq->queuelist)) { | 1123 | while (!list_empty(&rq->queuelist)) { |
1204 | struct request *__rq = list_entry_rq(rq->queuelist.next); | 1124 | struct request *__rq = list_entry_rq(rq->queuelist.next); |
1205 | struct as_rq *__arq = RQ_DATA(__rq); | 1125 | struct as_rq *__arq = RQ_DATA(__rq); |
1206 | 1126 | ||
1207 | list_move_tail(&__rq->queuelist, ad->dispatch); | 1127 | list_del(&__rq->queuelist); |
1128 | |||
1129 | elv_dispatch_add_tail(ad->q, __rq); | ||
1208 | 1130 | ||
1209 | if (__arq->io_context && __arq->io_context->aic) | 1131 | if (__arq->io_context && __arq->io_context->aic) |
1210 | atomic_inc(&__arq->io_context->aic->nr_dispatched); | 1132 | atomic_inc(&__arq->io_context->aic->nr_dispatched); |
@@ -1218,7 +1140,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) | |||
1218 | as_remove_queued_request(ad->q, rq); | 1140 | as_remove_queued_request(ad->q, rq); |
1219 | WARN_ON(arq->state != AS_RQ_QUEUED); | 1141 | WARN_ON(arq->state != AS_RQ_QUEUED); |
1220 | 1142 | ||
1221 | list_add(&rq->queuelist, insert); | 1143 | elv_dispatch_sort(ad->q, rq); |
1144 | |||
1222 | arq->state = AS_RQ_DISPATCHED; | 1145 | arq->state = AS_RQ_DISPATCHED; |
1223 | if (arq->io_context && arq->io_context->aic) | 1146 | if (arq->io_context && arq->io_context->aic) |
1224 | atomic_inc(&arq->io_context->aic->nr_dispatched); | 1147 | atomic_inc(&arq->io_context->aic->nr_dispatched); |
@@ -1230,12 +1153,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) | |||
1230 | * read/write expire, batch expire, etc, and moves it to the dispatch | 1153 | * read/write expire, batch expire, etc, and moves it to the dispatch |
1231 | * queue. Returns 1 if a request was found, 0 otherwise. | 1154 | * queue. Returns 1 if a request was found, 0 otherwise. |
1232 | */ | 1155 | */ |
1233 | static int as_dispatch_request(struct as_data *ad) | 1156 | static int as_dispatch_request(request_queue_t *q, int force) |
1234 | { | 1157 | { |
1158 | struct as_data *ad = q->elevator->elevator_data; | ||
1235 | struct as_rq *arq; | 1159 | struct as_rq *arq; |
1236 | const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); | 1160 | const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); |
1237 | const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); | 1161 | const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); |
1238 | 1162 | ||
1163 | if (unlikely(force)) { | ||
1164 | /* | ||
1165 | * Forced dispatch, accounting is useless. Reset | ||
1166 | * accounting states and dump fifo_lists. Note that | ||
1167 | * batch_data_dir is reset to REQ_SYNC to avoid | ||
1168 | * screwing write batch accounting as write batch | ||
1169 | * accounting occurs on W->R transition. | ||
1170 | */ | ||
1171 | int dispatched = 0; | ||
1172 | |||
1173 | ad->batch_data_dir = REQ_SYNC; | ||
1174 | ad->changed_batch = 0; | ||
1175 | ad->new_batch = 0; | ||
1176 | |||
1177 | while (ad->next_arq[REQ_SYNC]) { | ||
1178 | as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]); | ||
1179 | dispatched++; | ||
1180 | } | ||
1181 | ad->last_check_fifo[REQ_SYNC] = jiffies; | ||
1182 | |||
1183 | while (ad->next_arq[REQ_ASYNC]) { | ||
1184 | as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]); | ||
1185 | dispatched++; | ||
1186 | } | ||
1187 | ad->last_check_fifo[REQ_ASYNC] = jiffies; | ||
1188 | |||
1189 | return dispatched; | ||
1190 | } | ||
1191 | |||
1239 | /* Signal that the write batch was uncontended, so we can't time it */ | 1192 | /* Signal that the write batch was uncontended, so we can't time it */ |
1240 | if (ad->batch_data_dir == REQ_ASYNC && !reads) { | 1193 | if (ad->batch_data_dir == REQ_ASYNC && !reads) { |
1241 | if (ad->current_write_count == 0 || !writes) | 1194 | if (ad->current_write_count == 0 || !writes) |
@@ -1359,20 +1312,6 @@ fifo_expired: | |||
1359 | return 1; | 1312 | return 1; |
1360 | } | 1313 | } |
1361 | 1314 | ||
1362 | static struct request *as_next_request(request_queue_t *q) | ||
1363 | { | ||
1364 | struct as_data *ad = q->elevator->elevator_data; | ||
1365 | struct request *rq = NULL; | ||
1366 | |||
1367 | /* | ||
1368 | * if there are still requests on the dispatch queue, grab the first | ||
1369 | */ | ||
1370 | if (!list_empty(ad->dispatch) || as_dispatch_request(ad)) | ||
1371 | rq = list_entry_rq(ad->dispatch->next); | ||
1372 | |||
1373 | return rq; | ||
1374 | } | ||
1375 | |||
1376 | /* | 1315 | /* |
1377 | * Add arq to a list behind alias | 1316 | * Add arq to a list behind alias |
1378 | */ | 1317 | */ |
@@ -1404,17 +1343,26 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia | |||
1404 | /* | 1343 | /* |
1405 | * Don't want to have to handle merges. | 1344 | * Don't want to have to handle merges. |
1406 | */ | 1345 | */ |
1407 | as_remove_merge_hints(ad->q, arq); | 1346 | as_del_arq_hash(arq); |
1347 | arq->request->flags |= REQ_NOMERGE; | ||
1408 | } | 1348 | } |
1409 | 1349 | ||
1410 | /* | 1350 | /* |
1411 | * add arq to rbtree and fifo | 1351 | * add arq to rbtree and fifo |
1412 | */ | 1352 | */ |
1413 | static void as_add_request(struct as_data *ad, struct as_rq *arq) | 1353 | static void as_add_request(request_queue_t *q, struct request *rq) |
1414 | { | 1354 | { |
1355 | struct as_data *ad = q->elevator->elevator_data; | ||
1356 | struct as_rq *arq = RQ_DATA(rq); | ||
1415 | struct as_rq *alias; | 1357 | struct as_rq *alias; |
1416 | int data_dir; | 1358 | int data_dir; |
1417 | 1359 | ||
1360 | if (arq->state != AS_RQ_PRESCHED) { | ||
1361 | printk("arq->state: %d\n", arq->state); | ||
1362 | WARN_ON(1); | ||
1363 | } | ||
1364 | arq->state = AS_RQ_NEW; | ||
1365 | |||
1418 | if (rq_data_dir(arq->request) == READ | 1366 | if (rq_data_dir(arq->request) == READ |
1419 | || current->flags&PF_SYNCWRITE) | 1367 | || current->flags&PF_SYNCWRITE) |
1420 | arq->is_sync = 1; | 1368 | arq->is_sync = 1; |
@@ -1437,12 +1385,8 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq) | |||
1437 | arq->expires = jiffies + ad->fifo_expire[data_dir]; | 1385 | arq->expires = jiffies + ad->fifo_expire[data_dir]; |
1438 | list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); | 1386 | list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); |
1439 | 1387 | ||
1440 | if (rq_mergeable(arq->request)) { | 1388 | if (rq_mergeable(arq->request)) |
1441 | as_add_arq_hash(ad, arq); | 1389 | as_add_arq_hash(ad, arq); |
1442 | |||
1443 | if (!ad->q->last_merge) | ||
1444 | ad->q->last_merge = arq->request; | ||
1445 | } | ||
1446 | as_update_arq(ad, arq); /* keep state machine up to date */ | 1390 | as_update_arq(ad, arq); /* keep state machine up to date */ |
1447 | 1391 | ||
1448 | } else { | 1392 | } else { |
@@ -1463,96 +1407,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq) | |||
1463 | arq->state = AS_RQ_QUEUED; | 1407 | arq->state = AS_RQ_QUEUED; |
1464 | } | 1408 | } |
1465 | 1409 | ||
1466 | static void as_deactivate_request(request_queue_t *q, struct request *rq) | 1410 | static void as_activate_request(request_queue_t *q, struct request *rq) |
1467 | { | 1411 | { |
1468 | struct as_data *ad = q->elevator->elevator_data; | ||
1469 | struct as_rq *arq = RQ_DATA(rq); | 1412 | struct as_rq *arq = RQ_DATA(rq); |
1470 | 1413 | ||
1471 | if (arq) { | 1414 | WARN_ON(arq->state != AS_RQ_DISPATCHED); |
1472 | if (arq->state == AS_RQ_REMOVED) { | 1415 | arq->state = AS_RQ_REMOVED; |
1473 | arq->state = AS_RQ_DISPATCHED; | 1416 | if (arq->io_context && arq->io_context->aic) |
1474 | if (arq->io_context && arq->io_context->aic) | 1417 | atomic_dec(&arq->io_context->aic->nr_dispatched); |
1475 | atomic_inc(&arq->io_context->aic->nr_dispatched); | ||
1476 | } | ||
1477 | } else | ||
1478 | WARN_ON(blk_fs_request(rq) | ||
1479 | && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) ); | ||
1480 | |||
1481 | /* Stop anticipating - let this request get through */ | ||
1482 | as_antic_stop(ad); | ||
1483 | } | ||
1484 | |||
1485 | /* | ||
1486 | * requeue the request. The request has not been completed, nor is it a | ||
1487 | * new request, so don't touch accounting. | ||
1488 | */ | ||
1489 | static void as_requeue_request(request_queue_t *q, struct request *rq) | ||
1490 | { | ||
1491 | as_deactivate_request(q, rq); | ||
1492 | list_add(&rq->queuelist, &q->queue_head); | ||
1493 | } | ||
1494 | |||
1495 | /* | ||
1496 | * Account a request that is inserted directly onto the dispatch queue. | ||
1497 | * arq->io_context->aic->nr_dispatched should not need to be incremented | ||
1498 | * because only new requests should come through here: requeues go through | ||
1499 | * our explicit requeue handler. | ||
1500 | */ | ||
1501 | static void as_account_queued_request(struct as_data *ad, struct request *rq) | ||
1502 | { | ||
1503 | if (blk_fs_request(rq)) { | ||
1504 | struct as_rq *arq = RQ_DATA(rq); | ||
1505 | arq->state = AS_RQ_DISPATCHED; | ||
1506 | ad->nr_dispatched++; | ||
1507 | } | ||
1508 | } | 1418 | } |
1509 | 1419 | ||
1510 | static void | 1420 | static void as_deactivate_request(request_queue_t *q, struct request *rq) |
1511 | as_insert_request(request_queue_t *q, struct request *rq, int where) | ||
1512 | { | 1421 | { |
1513 | struct as_data *ad = q->elevator->elevator_data; | ||
1514 | struct as_rq *arq = RQ_DATA(rq); | 1422 | struct as_rq *arq = RQ_DATA(rq); |
1515 | 1423 | ||
1516 | if (arq) { | 1424 | WARN_ON(arq->state != AS_RQ_REMOVED); |
1517 | if (arq->state != AS_RQ_PRESCHED) { | 1425 | arq->state = AS_RQ_DISPATCHED; |
1518 | printk("arq->state: %d\n", arq->state); | 1426 | if (arq->io_context && arq->io_context->aic) |
1519 | WARN_ON(1); | 1427 | atomic_inc(&arq->io_context->aic->nr_dispatched); |
1520 | } | ||
1521 | arq->state = AS_RQ_NEW; | ||
1522 | } | ||
1523 | |||
1524 | /* barriers must flush the reorder queue */ | ||
1525 | if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) | ||
1526 | && where == ELEVATOR_INSERT_SORT)) { | ||
1527 | WARN_ON(1); | ||
1528 | where = ELEVATOR_INSERT_BACK; | ||
1529 | } | ||
1530 | |||
1531 | switch (where) { | ||
1532 | case ELEVATOR_INSERT_BACK: | ||
1533 | while (ad->next_arq[REQ_SYNC]) | ||
1534 | as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]); | ||
1535 | |||
1536 | while (ad->next_arq[REQ_ASYNC]) | ||
1537 | as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]); | ||
1538 | |||
1539 | list_add_tail(&rq->queuelist, ad->dispatch); | ||
1540 | as_account_queued_request(ad, rq); | ||
1541 | as_antic_stop(ad); | ||
1542 | break; | ||
1543 | case ELEVATOR_INSERT_FRONT: | ||
1544 | list_add(&rq->queuelist, ad->dispatch); | ||
1545 | as_account_queued_request(ad, rq); | ||
1546 | as_antic_stop(ad); | ||
1547 | break; | ||
1548 | case ELEVATOR_INSERT_SORT: | ||
1549 | BUG_ON(!blk_fs_request(rq)); | ||
1550 | as_add_request(ad, arq); | ||
1551 | break; | ||
1552 | default: | ||
1553 | BUG(); | ||
1554 | return; | ||
1555 | } | ||
1556 | } | 1428 | } |
1557 | 1429 | ||
1558 | /* | 1430 | /* |
@@ -1565,12 +1437,8 @@ static int as_queue_empty(request_queue_t *q) | |||
1565 | { | 1437 | { |
1566 | struct as_data *ad = q->elevator->elevator_data; | 1438 | struct as_data *ad = q->elevator->elevator_data; |
1567 | 1439 | ||
1568 | if (!list_empty(&ad->fifo_list[REQ_ASYNC]) | 1440 | return list_empty(&ad->fifo_list[REQ_ASYNC]) |
1569 | || !list_empty(&ad->fifo_list[REQ_SYNC]) | 1441 | && list_empty(&ad->fifo_list[REQ_SYNC]); |
1570 | || !list_empty(ad->dispatch)) | ||
1571 | return 0; | ||
1572 | |||
1573 | return 1; | ||
1574 | } | 1442 | } |
1575 | 1443 | ||
1576 | static struct request * | 1444 | static struct request * |
@@ -1608,15 +1476,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
1608 | int ret; | 1476 | int ret; |
1609 | 1477 | ||
1610 | /* | 1478 | /* |
1611 | * try last_merge to avoid going to hash | ||
1612 | */ | ||
1613 | ret = elv_try_last_merge(q, bio); | ||
1614 | if (ret != ELEVATOR_NO_MERGE) { | ||
1615 | __rq = q->last_merge; | ||
1616 | goto out_insert; | ||
1617 | } | ||
1618 | |||
1619 | /* | ||
1620 | * see if the merge hash can satisfy a back merge | 1479 | * see if the merge hash can satisfy a back merge |
1621 | */ | 1480 | */ |
1622 | __rq = as_find_arq_hash(ad, bio->bi_sector); | 1481 | __rq = as_find_arq_hash(ad, bio->bi_sector); |
@@ -1644,9 +1503,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
1644 | 1503 | ||
1645 | return ELEVATOR_NO_MERGE; | 1504 | return ELEVATOR_NO_MERGE; |
1646 | out: | 1505 | out: |
1647 | if (rq_mergeable(__rq)) | ||
1648 | q->last_merge = __rq; | ||
1649 | out_insert: | ||
1650 | if (ret) { | 1506 | if (ret) { |
1651 | if (rq_mergeable(__rq)) | 1507 | if (rq_mergeable(__rq)) |
1652 | as_hot_arq_hash(ad, RQ_DATA(__rq)); | 1508 | as_hot_arq_hash(ad, RQ_DATA(__rq)); |
@@ -1693,9 +1549,6 @@ static void as_merged_request(request_queue_t *q, struct request *req) | |||
1693 | * behind the disk head. We currently don't bother adjusting. | 1549 | * behind the disk head. We currently don't bother adjusting. |
1694 | */ | 1550 | */ |
1695 | } | 1551 | } |
1696 | |||
1697 | if (arq->on_hash) | ||
1698 | q->last_merge = req; | ||
1699 | } | 1552 | } |
1700 | 1553 | ||
1701 | static void | 1554 | static void |
@@ -1763,6 +1616,7 @@ as_merged_requests(request_queue_t *q, struct request *req, | |||
1763 | * kill knowledge of next, this one is a goner | 1616 | * kill knowledge of next, this one is a goner |
1764 | */ | 1617 | */ |
1765 | as_remove_queued_request(q, next); | 1618 | as_remove_queued_request(q, next); |
1619 | as_put_io_context(anext); | ||
1766 | 1620 | ||
1767 | anext->state = AS_RQ_MERGED; | 1621 | anext->state = AS_RQ_MERGED; |
1768 | } | 1622 | } |
@@ -1782,7 +1636,7 @@ static void as_work_handler(void *data) | |||
1782 | unsigned long flags; | 1636 | unsigned long flags; |
1783 | 1637 | ||
1784 | spin_lock_irqsave(q->queue_lock, flags); | 1638 | spin_lock_irqsave(q->queue_lock, flags); |
1785 | if (as_next_request(q)) | 1639 | if (!as_queue_empty(q)) |
1786 | q->request_fn(q); | 1640 | q->request_fn(q); |
1787 | spin_unlock_irqrestore(q->queue_lock, flags); | 1641 | spin_unlock_irqrestore(q->queue_lock, flags); |
1788 | } | 1642 | } |
@@ -1797,7 +1651,9 @@ static void as_put_request(request_queue_t *q, struct request *rq) | |||
1797 | return; | 1651 | return; |
1798 | } | 1652 | } |
1799 | 1653 | ||
1800 | if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) { | 1654 | if (unlikely(arq->state != AS_RQ_POSTSCHED && |
1655 | arq->state != AS_RQ_PRESCHED && | ||
1656 | arq->state != AS_RQ_MERGED)) { | ||
1801 | printk("arq->state %d\n", arq->state); | 1657 | printk("arq->state %d\n", arq->state); |
1802 | WARN_ON(1); | 1658 | WARN_ON(1); |
1803 | } | 1659 | } |
@@ -1807,7 +1663,7 @@ static void as_put_request(request_queue_t *q, struct request *rq) | |||
1807 | } | 1663 | } |
1808 | 1664 | ||
1809 | static int as_set_request(request_queue_t *q, struct request *rq, | 1665 | static int as_set_request(request_queue_t *q, struct request *rq, |
1810 | struct bio *bio, int gfp_mask) | 1666 | struct bio *bio, gfp_t gfp_mask) |
1811 | { | 1667 | { |
1812 | struct as_data *ad = q->elevator->elevator_data; | 1668 | struct as_data *ad = q->elevator->elevator_data; |
1813 | struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); | 1669 | struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); |
@@ -1907,7 +1763,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) | |||
1907 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); | 1763 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); |
1908 | ad->sort_list[REQ_SYNC] = RB_ROOT; | 1764 | ad->sort_list[REQ_SYNC] = RB_ROOT; |
1909 | ad->sort_list[REQ_ASYNC] = RB_ROOT; | 1765 | ad->sort_list[REQ_ASYNC] = RB_ROOT; |
1910 | ad->dispatch = &q->queue_head; | ||
1911 | ad->fifo_expire[REQ_SYNC] = default_read_expire; | 1766 | ad->fifo_expire[REQ_SYNC] = default_read_expire; |
1912 | ad->fifo_expire[REQ_ASYNC] = default_write_expire; | 1767 | ad->fifo_expire[REQ_ASYNC] = default_write_expire; |
1913 | ad->antic_expire = default_antic_expire; | 1768 | ad->antic_expire = default_antic_expire; |
@@ -2072,10 +1927,9 @@ static struct elevator_type iosched_as = { | |||
2072 | .elevator_merge_fn = as_merge, | 1927 | .elevator_merge_fn = as_merge, |
2073 | .elevator_merged_fn = as_merged_request, | 1928 | .elevator_merged_fn = as_merged_request, |
2074 | .elevator_merge_req_fn = as_merged_requests, | 1929 | .elevator_merge_req_fn = as_merged_requests, |
2075 | .elevator_next_req_fn = as_next_request, | 1930 | .elevator_dispatch_fn = as_dispatch_request, |
2076 | .elevator_add_req_fn = as_insert_request, | 1931 | .elevator_add_req_fn = as_add_request, |
2077 | .elevator_remove_req_fn = as_remove_request, | 1932 | .elevator_activate_req_fn = as_activate_request, |
2078 | .elevator_requeue_req_fn = as_requeue_request, | ||
2079 | .elevator_deactivate_req_fn = as_deactivate_request, | 1933 | .elevator_deactivate_req_fn = as_deactivate_request, |
2080 | .elevator_queue_empty_fn = as_queue_empty, | 1934 | .elevator_queue_empty_fn = as_queue_empty, |
2081 | .elevator_completed_req_fn = as_completed_request, | 1935 | .elevator_completed_req_fn = as_completed_request, |
@@ -2119,8 +1973,8 @@ static int __init as_init(void) | |||
2119 | 1973 | ||
2120 | static void __exit as_exit(void) | 1974 | static void __exit as_exit(void) |
2121 | { | 1975 | { |
2122 | kmem_cache_destroy(arq_pool); | ||
2123 | elv_unregister(&iosched_as); | 1976 | elv_unregister(&iosched_as); |
1977 | kmem_cache_destroy(arq_pool); | ||
2124 | } | 1978 | } |
2125 | 1979 | ||
2126 | module_init(as_init); | 1980 | module_init(as_init); |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index e183a3ef7839..ec27976a57da 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -28,13 +28,17 @@ | |||
28 | through the array controller. Note in particular, neither | 28 | through the array controller. Note in particular, neither |
29 | physical nor logical disks are presented through the scsi layer. */ | 29 | physical nor logical disks are presented through the scsi layer. */ |
30 | 30 | ||
31 | #include <linux/timer.h> | ||
32 | #include <linux/completion.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/string.h> | ||
35 | |||
36 | #include <asm/atomic.h> | ||
37 | |||
31 | #include <scsi/scsi.h> | 38 | #include <scsi/scsi.h> |
32 | #include <scsi/scsi_cmnd.h> | 39 | #include <scsi/scsi_cmnd.h> |
33 | #include <scsi/scsi_device.h> | 40 | #include <scsi/scsi_device.h> |
34 | #include <scsi/scsi_host.h> | 41 | #include <scsi/scsi_host.h> |
35 | #include <asm/atomic.h> | ||
36 | #include <linux/timer.h> | ||
37 | #include <linux/completion.h> | ||
38 | 42 | ||
39 | #include "cciss_scsi.h" | 43 | #include "cciss_scsi.h" |
40 | 44 | ||
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index cd056e7e64ec..5281f8e70510 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c | |||
@@ -84,7 +84,6 @@ static int cfq_max_depth = 2; | |||
84 | (node)->rb_left = NULL; \ | 84 | (node)->rb_left = NULL; \ |
85 | } while (0) | 85 | } while (0) |
86 | #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) | 86 | #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) |
87 | #define ON_RB(node) ((node)->rb_color != RB_NONE) | ||
88 | #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) | 87 | #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) |
89 | #define rq_rb_key(rq) (rq)->sector | 88 | #define rq_rb_key(rq) (rq)->sector |
90 | 89 | ||
@@ -271,10 +270,7 @@ CFQ_CFQQ_FNS(expired); | |||
271 | #undef CFQ_CFQQ_FNS | 270 | #undef CFQ_CFQQ_FNS |
272 | 271 | ||
273 | enum cfq_rq_state_flags { | 272 | enum cfq_rq_state_flags { |
274 | CFQ_CRQ_FLAG_in_flight = 0, | 273 | CFQ_CRQ_FLAG_is_sync = 0, |
275 | CFQ_CRQ_FLAG_in_driver, | ||
276 | CFQ_CRQ_FLAG_is_sync, | ||
277 | CFQ_CRQ_FLAG_requeued, | ||
278 | }; | 274 | }; |
279 | 275 | ||
280 | #define CFQ_CRQ_FNS(name) \ | 276 | #define CFQ_CRQ_FNS(name) \ |
@@ -291,14 +287,11 @@ static inline int cfq_crq_##name(const struct cfq_rq *crq) \ | |||
291 | return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ | 287 | return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ |
292 | } | 288 | } |
293 | 289 | ||
294 | CFQ_CRQ_FNS(in_flight); | ||
295 | CFQ_CRQ_FNS(in_driver); | ||
296 | CFQ_CRQ_FNS(is_sync); | 290 | CFQ_CRQ_FNS(is_sync); |
297 | CFQ_CRQ_FNS(requeued); | ||
298 | #undef CFQ_CRQ_FNS | 291 | #undef CFQ_CRQ_FNS |
299 | 292 | ||
300 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | 293 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); |
301 | static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); | 294 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); |
302 | static void cfq_put_cfqd(struct cfq_data *cfqd); | 295 | static void cfq_put_cfqd(struct cfq_data *cfqd); |
303 | 296 | ||
304 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) | 297 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) |
@@ -311,14 +304,6 @@ static inline void cfq_del_crq_hash(struct cfq_rq *crq) | |||
311 | hlist_del_init(&crq->hash); | 304 | hlist_del_init(&crq->hash); |
312 | } | 305 | } |
313 | 306 | ||
314 | static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq) | ||
315 | { | ||
316 | cfq_del_crq_hash(crq); | ||
317 | |||
318 | if (q->last_merge == crq->request) | ||
319 | q->last_merge = NULL; | ||
320 | } | ||
321 | |||
322 | static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) | 307 | static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) |
323 | { | 308 | { |
324 | const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); | 309 | const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); |
@@ -347,18 +332,13 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) | |||
347 | return NULL; | 332 | return NULL; |
348 | } | 333 | } |
349 | 334 | ||
350 | static inline int cfq_pending_requests(struct cfq_data *cfqd) | ||
351 | { | ||
352 | return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; | ||
353 | } | ||
354 | |||
355 | /* | 335 | /* |
356 | * scheduler run of queue, if there are requests pending and no one in the | 336 | * scheduler run of queue, if there are requests pending and no one in the |
357 | * driver that will restart queueing | 337 | * driver that will restart queueing |
358 | */ | 338 | */ |
359 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | 339 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) |
360 | { | 340 | { |
361 | if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) | 341 | if (!cfqd->rq_in_driver && cfqd->busy_queues) |
362 | kblockd_schedule_work(&cfqd->unplug_work); | 342 | kblockd_schedule_work(&cfqd->unplug_work); |
363 | } | 343 | } |
364 | 344 | ||
@@ -366,7 +346,7 @@ static int cfq_queue_empty(request_queue_t *q) | |||
366 | { | 346 | { |
367 | struct cfq_data *cfqd = q->elevator->elevator_data; | 347 | struct cfq_data *cfqd = q->elevator->elevator_data; |
368 | 348 | ||
369 | return !cfq_pending_requests(cfqd); | 349 | return !cfqd->busy_queues; |
370 | } | 350 | } |
371 | 351 | ||
372 | /* | 352 | /* |
@@ -386,11 +366,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) | |||
386 | if (crq2 == NULL) | 366 | if (crq2 == NULL) |
387 | return crq1; | 367 | return crq1; |
388 | 368 | ||
389 | if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2)) | ||
390 | return crq1; | ||
391 | else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1)) | ||
392 | return crq2; | ||
393 | |||
394 | if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) | 369 | if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) |
395 | return crq1; | 370 | return crq1; |
396 | else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) | 371 | else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) |
@@ -461,10 +436,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
461 | struct cfq_rq *crq_next = NULL, *crq_prev = NULL; | 436 | struct cfq_rq *crq_next = NULL, *crq_prev = NULL; |
462 | struct rb_node *rbnext, *rbprev; | 437 | struct rb_node *rbnext, *rbprev; |
463 | 438 | ||
464 | rbnext = NULL; | 439 | if (!(rbnext = rb_next(&last->rb_node))) { |
465 | if (ON_RB(&last->rb_node)) | ||
466 | rbnext = rb_next(&last->rb_node); | ||
467 | if (!rbnext) { | ||
468 | rbnext = rb_first(&cfqq->sort_list); | 440 | rbnext = rb_first(&cfqq->sort_list); |
469 | if (rbnext == &last->rb_node) | 441 | if (rbnext == &last->rb_node) |
470 | rbnext = NULL; | 442 | rbnext = NULL; |
@@ -545,13 +517,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | |||
545 | * the pending list according to last request service | 517 | * the pending list according to last request service |
546 | */ | 518 | */ |
547 | static inline void | 519 | static inline void |
548 | cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) | 520 | cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
549 | { | 521 | { |
550 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 522 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
551 | cfq_mark_cfqq_on_rr(cfqq); | 523 | cfq_mark_cfqq_on_rr(cfqq); |
552 | cfqd->busy_queues++; | 524 | cfqd->busy_queues++; |
553 | 525 | ||
554 | cfq_resort_rr_list(cfqq, requeue); | 526 | cfq_resort_rr_list(cfqq, 0); |
555 | } | 527 | } |
556 | 528 | ||
557 | static inline void | 529 | static inline void |
@@ -571,22 +543,19 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
571 | static inline void cfq_del_crq_rb(struct cfq_rq *crq) | 543 | static inline void cfq_del_crq_rb(struct cfq_rq *crq) |
572 | { | 544 | { |
573 | struct cfq_queue *cfqq = crq->cfq_queue; | 545 | struct cfq_queue *cfqq = crq->cfq_queue; |
546 | struct cfq_data *cfqd = cfqq->cfqd; | ||
547 | const int sync = cfq_crq_is_sync(crq); | ||
574 | 548 | ||
575 | if (ON_RB(&crq->rb_node)) { | 549 | BUG_ON(!cfqq->queued[sync]); |
576 | struct cfq_data *cfqd = cfqq->cfqd; | 550 | cfqq->queued[sync]--; |
577 | const int sync = cfq_crq_is_sync(crq); | ||
578 | |||
579 | BUG_ON(!cfqq->queued[sync]); | ||
580 | cfqq->queued[sync]--; | ||
581 | 551 | ||
582 | cfq_update_next_crq(crq); | 552 | cfq_update_next_crq(crq); |
583 | 553 | ||
584 | rb_erase(&crq->rb_node, &cfqq->sort_list); | 554 | rb_erase(&crq->rb_node, &cfqq->sort_list); |
585 | RB_CLEAR_COLOR(&crq->rb_node); | 555 | RB_CLEAR_COLOR(&crq->rb_node); |
586 | 556 | ||
587 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) | 557 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) |
588 | cfq_del_cfqq_rr(cfqd, cfqq); | 558 | cfq_del_cfqq_rr(cfqd, cfqq); |
589 | } | ||
590 | } | 559 | } |
591 | 560 | ||
592 | static struct cfq_rq * | 561 | static struct cfq_rq * |
@@ -627,12 +596,12 @@ static void cfq_add_crq_rb(struct cfq_rq *crq) | |||
627 | * if that happens, put the alias on the dispatch list | 596 | * if that happens, put the alias on the dispatch list |
628 | */ | 597 | */ |
629 | while ((__alias = __cfq_add_crq_rb(crq)) != NULL) | 598 | while ((__alias = __cfq_add_crq_rb(crq)) != NULL) |
630 | cfq_dispatch_sort(cfqd->queue, __alias); | 599 | cfq_dispatch_insert(cfqd->queue, __alias); |
631 | 600 | ||
632 | rb_insert_color(&crq->rb_node, &cfqq->sort_list); | 601 | rb_insert_color(&crq->rb_node, &cfqq->sort_list); |
633 | 602 | ||
634 | if (!cfq_cfqq_on_rr(cfqq)) | 603 | if (!cfq_cfqq_on_rr(cfqq)) |
635 | cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq)); | 604 | cfq_add_cfqq_rr(cfqd, cfqq); |
636 | 605 | ||
637 | /* | 606 | /* |
638 | * check if this request is a better next-serve candidate | 607 | * check if this request is a better next-serve candidate |
@@ -643,10 +612,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq) | |||
643 | static inline void | 612 | static inline void |
644 | cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) | 613 | cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) |
645 | { | 614 | { |
646 | if (ON_RB(&crq->rb_node)) { | 615 | rb_erase(&crq->rb_node, &cfqq->sort_list); |
647 | rb_erase(&crq->rb_node, &cfqq->sort_list); | 616 | cfqq->queued[cfq_crq_is_sync(crq)]--; |
648 | cfqq->queued[cfq_crq_is_sync(crq)]--; | ||
649 | } | ||
650 | 617 | ||
651 | cfq_add_crq_rb(crq); | 618 | cfq_add_crq_rb(crq); |
652 | } | 619 | } |
@@ -676,49 +643,28 @@ out: | |||
676 | return NULL; | 643 | return NULL; |
677 | } | 644 | } |
678 | 645 | ||
679 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) | 646 | static void cfq_activate_request(request_queue_t *q, struct request *rq) |
680 | { | 647 | { |
681 | struct cfq_data *cfqd = q->elevator->elevator_data; | 648 | struct cfq_data *cfqd = q->elevator->elevator_data; |
682 | struct cfq_rq *crq = RQ_DATA(rq); | ||
683 | |||
684 | if (crq) { | ||
685 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
686 | 649 | ||
687 | if (cfq_crq_in_driver(crq)) { | 650 | cfqd->rq_in_driver++; |
688 | cfq_clear_crq_in_driver(crq); | ||
689 | WARN_ON(!cfqd->rq_in_driver); | ||
690 | cfqd->rq_in_driver--; | ||
691 | } | ||
692 | if (cfq_crq_in_flight(crq)) { | ||
693 | const int sync = cfq_crq_is_sync(crq); | ||
694 | |||
695 | cfq_clear_crq_in_flight(crq); | ||
696 | WARN_ON(!cfqq->on_dispatch[sync]); | ||
697 | cfqq->on_dispatch[sync]--; | ||
698 | } | ||
699 | cfq_mark_crq_requeued(crq); | ||
700 | } | ||
701 | } | 651 | } |
702 | 652 | ||
703 | /* | 653 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) |
704 | * make sure the service time gets corrected on reissue of this request | ||
705 | */ | ||
706 | static void cfq_requeue_request(request_queue_t *q, struct request *rq) | ||
707 | { | 654 | { |
708 | cfq_deactivate_request(q, rq); | 655 | struct cfq_data *cfqd = q->elevator->elevator_data; |
709 | list_add(&rq->queuelist, &q->queue_head); | 656 | |
657 | WARN_ON(!cfqd->rq_in_driver); | ||
658 | cfqd->rq_in_driver--; | ||
710 | } | 659 | } |
711 | 660 | ||
712 | static void cfq_remove_request(request_queue_t *q, struct request *rq) | 661 | static void cfq_remove_request(struct request *rq) |
713 | { | 662 | { |
714 | struct cfq_rq *crq = RQ_DATA(rq); | 663 | struct cfq_rq *crq = RQ_DATA(rq); |
715 | 664 | ||
716 | if (crq) { | 665 | list_del_init(&rq->queuelist); |
717 | list_del_init(&rq->queuelist); | 666 | cfq_del_crq_rb(crq); |
718 | cfq_del_crq_rb(crq); | 667 | cfq_del_crq_hash(crq); |
719 | cfq_remove_merge_hints(q, crq); | ||
720 | |||
721 | } | ||
722 | } | 668 | } |
723 | 669 | ||
724 | static int | 670 | static int |
@@ -728,12 +674,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
728 | struct request *__rq; | 674 | struct request *__rq; |
729 | int ret; | 675 | int ret; |
730 | 676 | ||
731 | ret = elv_try_last_merge(q, bio); | ||
732 | if (ret != ELEVATOR_NO_MERGE) { | ||
733 | __rq = q->last_merge; | ||
734 | goto out_insert; | ||
735 | } | ||
736 | |||
737 | __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); | 677 | __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); |
738 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | 678 | if (__rq && elv_rq_merge_ok(__rq, bio)) { |
739 | ret = ELEVATOR_BACK_MERGE; | 679 | ret = ELEVATOR_BACK_MERGE; |
@@ -748,8 +688,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
748 | 688 | ||
749 | return ELEVATOR_NO_MERGE; | 689 | return ELEVATOR_NO_MERGE; |
750 | out: | 690 | out: |
751 | q->last_merge = __rq; | ||
752 | out_insert: | ||
753 | *req = __rq; | 691 | *req = __rq; |
754 | return ret; | 692 | return ret; |
755 | } | 693 | } |
@@ -762,14 +700,12 @@ static void cfq_merged_request(request_queue_t *q, struct request *req) | |||
762 | cfq_del_crq_hash(crq); | 700 | cfq_del_crq_hash(crq); |
763 | cfq_add_crq_hash(cfqd, crq); | 701 | cfq_add_crq_hash(cfqd, crq); |
764 | 702 | ||
765 | if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) { | 703 | if (rq_rb_key(req) != crq->rb_key) { |
766 | struct cfq_queue *cfqq = crq->cfq_queue; | 704 | struct cfq_queue *cfqq = crq->cfq_queue; |
767 | 705 | ||
768 | cfq_update_next_crq(crq); | 706 | cfq_update_next_crq(crq); |
769 | cfq_reposition_crq_rb(cfqq, crq); | 707 | cfq_reposition_crq_rb(cfqq, crq); |
770 | } | 708 | } |
771 | |||
772 | q->last_merge = req; | ||
773 | } | 709 | } |
774 | 710 | ||
775 | static void | 711 | static void |
@@ -785,7 +721,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, | |||
785 | time_before(next->start_time, rq->start_time)) | 721 | time_before(next->start_time, rq->start_time)) |
786 | list_move(&rq->queuelist, &next->queuelist); | 722 | list_move(&rq->queuelist, &next->queuelist); |
787 | 723 | ||
788 | cfq_remove_request(q, next); | 724 | cfq_remove_request(next); |
789 | } | 725 | } |
790 | 726 | ||
791 | static inline void | 727 | static inline void |
@@ -992,53 +928,15 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
992 | return 1; | 928 | return 1; |
993 | } | 929 | } |
994 | 930 | ||
995 | /* | 931 | static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq) |
996 | * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues, | ||
997 | * this function sector sorts the selected request to minimize seeks. we start | ||
998 | * at cfqd->last_sector, not 0. | ||
999 | */ | ||
1000 | static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) | ||
1001 | { | 932 | { |
1002 | struct cfq_data *cfqd = q->elevator->elevator_data; | 933 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1003 | struct cfq_queue *cfqq = crq->cfq_queue; | 934 | struct cfq_queue *cfqq = crq->cfq_queue; |
1004 | struct list_head *head = &q->queue_head, *entry = head; | ||
1005 | struct request *__rq; | ||
1006 | sector_t last; | ||
1007 | |||
1008 | list_del(&crq->request->queuelist); | ||
1009 | |||
1010 | last = cfqd->last_sector; | ||
1011 | list_for_each_entry_reverse(__rq, head, queuelist) { | ||
1012 | struct cfq_rq *__crq = RQ_DATA(__rq); | ||
1013 | |||
1014 | if (blk_barrier_rq(__rq)) | ||
1015 | break; | ||
1016 | if (!blk_fs_request(__rq)) | ||
1017 | break; | ||
1018 | if (cfq_crq_requeued(__crq)) | ||
1019 | break; | ||
1020 | |||
1021 | if (__rq->sector <= crq->request->sector) | ||
1022 | break; | ||
1023 | if (__rq->sector > last && crq->request->sector < last) { | ||
1024 | last = crq->request->sector + crq->request->nr_sectors; | ||
1025 | break; | ||
1026 | } | ||
1027 | entry = &__rq->queuelist; | ||
1028 | } | ||
1029 | |||
1030 | cfqd->last_sector = last; | ||
1031 | 935 | ||
1032 | cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); | 936 | cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); |
1033 | 937 | cfq_remove_request(crq->request); | |
1034 | cfq_del_crq_rb(crq); | ||
1035 | cfq_remove_merge_hints(q, crq); | ||
1036 | |||
1037 | cfq_mark_crq_in_flight(crq); | ||
1038 | cfq_clear_crq_requeued(crq); | ||
1039 | |||
1040 | cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; | 938 | cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; |
1041 | list_add_tail(&crq->request->queuelist, entry); | 939 | elv_dispatch_sort(q, crq->request); |
1042 | } | 940 | } |
1043 | 941 | ||
1044 | /* | 942 | /* |
@@ -1159,7 +1057,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1159 | /* | 1057 | /* |
1160 | * finally, insert request into driver dispatch list | 1058 | * finally, insert request into driver dispatch list |
1161 | */ | 1059 | */ |
1162 | cfq_dispatch_sort(cfqd->queue, crq); | 1060 | cfq_dispatch_insert(cfqd->queue, crq); |
1163 | 1061 | ||
1164 | cfqd->dispatch_slice++; | 1062 | cfqd->dispatch_slice++; |
1165 | dispatched++; | 1063 | dispatched++; |
@@ -1194,7 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1194 | } | 1092 | } |
1195 | 1093 | ||
1196 | static int | 1094 | static int |
1197 | cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) | 1095 | cfq_dispatch_requests(request_queue_t *q, int force) |
1198 | { | 1096 | { |
1199 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1097 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1200 | struct cfq_queue *cfqq; | 1098 | struct cfq_queue *cfqq; |
@@ -1204,12 +1102,25 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) | |||
1204 | 1102 | ||
1205 | cfqq = cfq_select_queue(cfqd, force); | 1103 | cfqq = cfq_select_queue(cfqd, force); |
1206 | if (cfqq) { | 1104 | if (cfqq) { |
1105 | int max_dispatch; | ||
1106 | |||
1107 | /* | ||
1108 | * if idle window is disabled, allow queue buildup | ||
1109 | */ | ||
1110 | if (!cfq_cfqq_idle_window(cfqq) && | ||
1111 | cfqd->rq_in_driver >= cfqd->cfq_max_depth) | ||
1112 | return 0; | ||
1113 | |||
1207 | cfq_clear_cfqq_must_dispatch(cfqq); | 1114 | cfq_clear_cfqq_must_dispatch(cfqq); |
1208 | cfq_clear_cfqq_wait_request(cfqq); | 1115 | cfq_clear_cfqq_wait_request(cfqq); |
1209 | del_timer(&cfqd->idle_slice_timer); | 1116 | del_timer(&cfqd->idle_slice_timer); |
1210 | 1117 | ||
1211 | if (cfq_class_idle(cfqq)) | 1118 | if (!force) { |
1212 | max_dispatch = 1; | 1119 | max_dispatch = cfqd->cfq_quantum; |
1120 | if (cfq_class_idle(cfqq)) | ||
1121 | max_dispatch = 1; | ||
1122 | } else | ||
1123 | max_dispatch = INT_MAX; | ||
1213 | 1124 | ||
1214 | return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); | 1125 | return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); |
1215 | } | 1126 | } |
@@ -1217,93 +1128,6 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) | |||
1217 | return 0; | 1128 | return 0; |
1218 | } | 1129 | } |
1219 | 1130 | ||
1220 | static inline void cfq_account_dispatch(struct cfq_rq *crq) | ||
1221 | { | ||
1222 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
1223 | struct cfq_data *cfqd = cfqq->cfqd; | ||
1224 | |||
1225 | if (unlikely(!blk_fs_request(crq->request))) | ||
1226 | return; | ||
1227 | |||
1228 | /* | ||
1229 | * accounted bit is necessary since some drivers will call | ||
1230 | * elv_next_request() many times for the same request (eg ide) | ||
1231 | */ | ||
1232 | if (cfq_crq_in_driver(crq)) | ||
1233 | return; | ||
1234 | |||
1235 | cfq_mark_crq_in_driver(crq); | ||
1236 | cfqd->rq_in_driver++; | ||
1237 | } | ||
1238 | |||
1239 | static inline void | ||
1240 | cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) | ||
1241 | { | ||
1242 | struct cfq_data *cfqd = cfqq->cfqd; | ||
1243 | unsigned long now; | ||
1244 | |||
1245 | if (!cfq_crq_in_driver(crq)) | ||
1246 | return; | ||
1247 | |||
1248 | now = jiffies; | ||
1249 | |||
1250 | WARN_ON(!cfqd->rq_in_driver); | ||
1251 | cfqd->rq_in_driver--; | ||
1252 | |||
1253 | if (!cfq_class_idle(cfqq)) | ||
1254 | cfqd->last_end_request = now; | ||
1255 | |||
1256 | if (!cfq_cfqq_dispatched(cfqq)) { | ||
1257 | if (cfq_cfqq_on_rr(cfqq)) { | ||
1258 | cfqq->service_last = now; | ||
1259 | cfq_resort_rr_list(cfqq, 0); | ||
1260 | } | ||
1261 | if (cfq_cfqq_expired(cfqq)) { | ||
1262 | __cfq_slice_expired(cfqd, cfqq, 0); | ||
1263 | cfq_schedule_dispatch(cfqd); | ||
1264 | } | ||
1265 | } | ||
1266 | |||
1267 | if (cfq_crq_is_sync(crq)) | ||
1268 | crq->io_context->last_end_request = now; | ||
1269 | } | ||
1270 | |||
1271 | static struct request *cfq_next_request(request_queue_t *q) | ||
1272 | { | ||
1273 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1274 | struct request *rq; | ||
1275 | |||
1276 | if (!list_empty(&q->queue_head)) { | ||
1277 | struct cfq_rq *crq; | ||
1278 | dispatch: | ||
1279 | rq = list_entry_rq(q->queue_head.next); | ||
1280 | |||
1281 | crq = RQ_DATA(rq); | ||
1282 | if (crq) { | ||
1283 | struct cfq_queue *cfqq = crq->cfq_queue; | ||
1284 | |||
1285 | /* | ||
1286 | * if idle window is disabled, allow queue buildup | ||
1287 | */ | ||
1288 | if (!cfq_crq_in_driver(crq) && | ||
1289 | !cfq_cfqq_idle_window(cfqq) && | ||
1290 | !blk_barrier_rq(rq) && | ||
1291 | cfqd->rq_in_driver >= cfqd->cfq_max_depth) | ||
1292 | return NULL; | ||
1293 | |||
1294 | cfq_remove_merge_hints(q, crq); | ||
1295 | cfq_account_dispatch(crq); | ||
1296 | } | ||
1297 | |||
1298 | return rq; | ||
1299 | } | ||
1300 | |||
1301 | if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0)) | ||
1302 | goto dispatch; | ||
1303 | |||
1304 | return NULL; | ||
1305 | } | ||
1306 | |||
1307 | /* | 1131 | /* |
1308 | * task holds one reference to the queue, dropped when task exits. each crq | 1132 | * task holds one reference to the queue, dropped when task exits. each crq |
1309 | * in-flight on this queue also holds a reference, dropped when crq is freed. | 1133 | * in-flight on this queue also holds a reference, dropped when crq is freed. |
@@ -1422,7 +1246,7 @@ static void cfq_exit_io_context(struct cfq_io_context *cic) | |||
1422 | } | 1246 | } |
1423 | 1247 | ||
1424 | static struct cfq_io_context * | 1248 | static struct cfq_io_context * |
1425 | cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask) | 1249 | cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) |
1426 | { | 1250 | { |
1427 | struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); | 1251 | struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); |
1428 | 1252 | ||
@@ -1517,7 +1341,7 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) | |||
1517 | 1341 | ||
1518 | static struct cfq_queue * | 1342 | static struct cfq_queue * |
1519 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, | 1343 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, |
1520 | int gfp_mask) | 1344 | gfp_t gfp_mask) |
1521 | { | 1345 | { |
1522 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); | 1346 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); |
1523 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1347 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
@@ -1578,7 +1402,7 @@ out: | |||
1578 | * cfqq, so we don't need to worry about it disappearing | 1402 | * cfqq, so we don't need to worry about it disappearing |
1579 | */ | 1403 | */ |
1580 | static struct cfq_io_context * | 1404 | static struct cfq_io_context * |
1581 | cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask) | 1405 | cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) |
1582 | { | 1406 | { |
1583 | struct io_context *ioc = NULL; | 1407 | struct io_context *ioc = NULL; |
1584 | struct cfq_io_context *cic; | 1408 | struct cfq_io_context *cic; |
@@ -1816,8 +1640,9 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1816 | } | 1640 | } |
1817 | } | 1641 | } |
1818 | 1642 | ||
1819 | static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) | 1643 | static void cfq_insert_request(request_queue_t *q, struct request *rq) |
1820 | { | 1644 | { |
1645 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1821 | struct cfq_rq *crq = RQ_DATA(rq); | 1646 | struct cfq_rq *crq = RQ_DATA(rq); |
1822 | struct cfq_queue *cfqq = crq->cfq_queue; | 1647 | struct cfq_queue *cfqq = crq->cfq_queue; |
1823 | 1648 | ||
@@ -1827,66 +1652,43 @@ static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) | |||
1827 | 1652 | ||
1828 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 1653 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
1829 | 1654 | ||
1830 | if (rq_mergeable(rq)) { | 1655 | if (rq_mergeable(rq)) |
1831 | cfq_add_crq_hash(cfqd, crq); | 1656 | cfq_add_crq_hash(cfqd, crq); |
1832 | 1657 | ||
1833 | if (!cfqd->queue->last_merge) | ||
1834 | cfqd->queue->last_merge = rq; | ||
1835 | } | ||
1836 | |||
1837 | cfq_crq_enqueued(cfqd, cfqq, crq); | 1658 | cfq_crq_enqueued(cfqd, cfqq, crq); |
1838 | } | 1659 | } |
1839 | 1660 | ||
1840 | static void | ||
1841 | cfq_insert_request(request_queue_t *q, struct request *rq, int where) | ||
1842 | { | ||
1843 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
1844 | |||
1845 | switch (where) { | ||
1846 | case ELEVATOR_INSERT_BACK: | ||
1847 | while (cfq_dispatch_requests(q, INT_MAX, 1)) | ||
1848 | ; | ||
1849 | list_add_tail(&rq->queuelist, &q->queue_head); | ||
1850 | /* | ||
1851 | * If we were idling with pending requests on | ||
1852 | * inactive cfqqs, force dispatching will | ||
1853 | * remove the idle timer and the queue won't | ||
1854 | * be kicked by __make_request() afterward. | ||
1855 | * Kick it here. | ||
1856 | */ | ||
1857 | cfq_schedule_dispatch(cfqd); | ||
1858 | break; | ||
1859 | case ELEVATOR_INSERT_FRONT: | ||
1860 | list_add(&rq->queuelist, &q->queue_head); | ||
1861 | break; | ||
1862 | case ELEVATOR_INSERT_SORT: | ||
1863 | BUG_ON(!blk_fs_request(rq)); | ||
1864 | cfq_enqueue(cfqd, rq); | ||
1865 | break; | ||
1866 | default: | ||
1867 | printk("%s: bad insert point %d\n", __FUNCTION__,where); | ||
1868 | return; | ||
1869 | } | ||
1870 | } | ||
1871 | |||
1872 | static void cfq_completed_request(request_queue_t *q, struct request *rq) | 1661 | static void cfq_completed_request(request_queue_t *q, struct request *rq) |
1873 | { | 1662 | { |
1874 | struct cfq_rq *crq = RQ_DATA(rq); | 1663 | struct cfq_rq *crq = RQ_DATA(rq); |
1875 | struct cfq_queue *cfqq; | 1664 | struct cfq_queue *cfqq = crq->cfq_queue; |
1665 | struct cfq_data *cfqd = cfqq->cfqd; | ||
1666 | const int sync = cfq_crq_is_sync(crq); | ||
1667 | unsigned long now; | ||
1876 | 1668 | ||
1877 | if (unlikely(!blk_fs_request(rq))) | 1669 | now = jiffies; |
1878 | return; | ||
1879 | 1670 | ||
1880 | cfqq = crq->cfq_queue; | 1671 | WARN_ON(!cfqd->rq_in_driver); |
1672 | WARN_ON(!cfqq->on_dispatch[sync]); | ||
1673 | cfqd->rq_in_driver--; | ||
1674 | cfqq->on_dispatch[sync]--; | ||
1881 | 1675 | ||
1882 | if (cfq_crq_in_flight(crq)) { | 1676 | if (!cfq_class_idle(cfqq)) |
1883 | const int sync = cfq_crq_is_sync(crq); | 1677 | cfqd->last_end_request = now; |
1884 | 1678 | ||
1885 | WARN_ON(!cfqq->on_dispatch[sync]); | 1679 | if (!cfq_cfqq_dispatched(cfqq)) { |
1886 | cfqq->on_dispatch[sync]--; | 1680 | if (cfq_cfqq_on_rr(cfqq)) { |
1681 | cfqq->service_last = now; | ||
1682 | cfq_resort_rr_list(cfqq, 0); | ||
1683 | } | ||
1684 | if (cfq_cfqq_expired(cfqq)) { | ||
1685 | __cfq_slice_expired(cfqd, cfqq, 0); | ||
1686 | cfq_schedule_dispatch(cfqd); | ||
1687 | } | ||
1887 | } | 1688 | } |
1888 | 1689 | ||
1889 | cfq_account_completion(cfqq, crq); | 1690 | if (cfq_crq_is_sync(crq)) |
1691 | crq->io_context->last_end_request = now; | ||
1890 | } | 1692 | } |
1891 | 1693 | ||
1892 | static struct request * | 1694 | static struct request * |
@@ -2075,7 +1877,7 @@ static void cfq_put_request(request_queue_t *q, struct request *rq) | |||
2075 | */ | 1877 | */ |
2076 | static int | 1878 | static int |
2077 | cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | 1879 | cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
2078 | int gfp_mask) | 1880 | gfp_t gfp_mask) |
2079 | { | 1881 | { |
2080 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1882 | struct cfq_data *cfqd = q->elevator->elevator_data; |
2081 | struct task_struct *tsk = current; | 1883 | struct task_struct *tsk = current; |
@@ -2118,9 +1920,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
2118 | INIT_HLIST_NODE(&crq->hash); | 1920 | INIT_HLIST_NODE(&crq->hash); |
2119 | crq->cfq_queue = cfqq; | 1921 | crq->cfq_queue = cfqq; |
2120 | crq->io_context = cic; | 1922 | crq->io_context = cic; |
2121 | cfq_clear_crq_in_flight(crq); | ||
2122 | cfq_clear_crq_in_driver(crq); | ||
2123 | cfq_clear_crq_requeued(crq); | ||
2124 | 1923 | ||
2125 | if (rw == READ || process_sync(tsk)) | 1924 | if (rw == READ || process_sync(tsk)) |
2126 | cfq_mark_crq_is_sync(crq); | 1925 | cfq_mark_crq_is_sync(crq); |
@@ -2201,7 +2000,7 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
2201 | * only expire and reinvoke request handler, if there are | 2000 | * only expire and reinvoke request handler, if there are |
2202 | * other queues with pending requests | 2001 | * other queues with pending requests |
2203 | */ | 2002 | */ |
2204 | if (!cfq_pending_requests(cfqd)) { | 2003 | if (!cfqd->busy_queues) { |
2205 | cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); | 2004 | cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); |
2206 | add_timer(&cfqd->idle_slice_timer); | 2005 | add_timer(&cfqd->idle_slice_timer); |
2207 | goto out_cont; | 2006 | goto out_cont; |
@@ -2576,10 +2375,9 @@ static struct elevator_type iosched_cfq = { | |||
2576 | .elevator_merge_fn = cfq_merge, | 2375 | .elevator_merge_fn = cfq_merge, |
2577 | .elevator_merged_fn = cfq_merged_request, | 2376 | .elevator_merged_fn = cfq_merged_request, |
2578 | .elevator_merge_req_fn = cfq_merged_requests, | 2377 | .elevator_merge_req_fn = cfq_merged_requests, |
2579 | .elevator_next_req_fn = cfq_next_request, | 2378 | .elevator_dispatch_fn = cfq_dispatch_requests, |
2580 | .elevator_add_req_fn = cfq_insert_request, | 2379 | .elevator_add_req_fn = cfq_insert_request, |
2581 | .elevator_remove_req_fn = cfq_remove_request, | 2380 | .elevator_activate_req_fn = cfq_activate_request, |
2582 | .elevator_requeue_req_fn = cfq_requeue_request, | ||
2583 | .elevator_deactivate_req_fn = cfq_deactivate_request, | 2381 | .elevator_deactivate_req_fn = cfq_deactivate_request, |
2584 | .elevator_queue_empty_fn = cfq_queue_empty, | 2382 | .elevator_queue_empty_fn = cfq_queue_empty, |
2585 | .elevator_completed_req_fn = cfq_completed_request, | 2383 | .elevator_completed_req_fn = cfq_completed_request, |
@@ -2620,28 +2418,8 @@ static int __init cfq_init(void) | |||
2620 | 2418 | ||
2621 | static void __exit cfq_exit(void) | 2419 | static void __exit cfq_exit(void) |
2622 | { | 2420 | { |
2623 | struct task_struct *g, *p; | ||
2624 | unsigned long flags; | ||
2625 | |||
2626 | read_lock_irqsave(&tasklist_lock, flags); | ||
2627 | |||
2628 | /* | ||
2629 | * iterate each process in the system, removing our io_context | ||
2630 | */ | ||
2631 | do_each_thread(g, p) { | ||
2632 | struct io_context *ioc = p->io_context; | ||
2633 | |||
2634 | if (ioc && ioc->cic) { | ||
2635 | ioc->cic->exit(ioc->cic); | ||
2636 | cfq_free_io_context(ioc->cic); | ||
2637 | ioc->cic = NULL; | ||
2638 | } | ||
2639 | } while_each_thread(g, p); | ||
2640 | |||
2641 | read_unlock_irqrestore(&tasklist_lock, flags); | ||
2642 | |||
2643 | cfq_slab_kill(); | ||
2644 | elv_unregister(&iosched_cfq); | 2421 | elv_unregister(&iosched_cfq); |
2422 | cfq_slab_kill(); | ||
2645 | } | 2423 | } |
2646 | 2424 | ||
2647 | module_init(cfq_init); | 2425 | module_init(cfq_init); |
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c index 52a3ae5289a0..7929471d7df7 100644 --- a/drivers/block/deadline-iosched.c +++ b/drivers/block/deadline-iosched.c | |||
@@ -50,7 +50,6 @@ struct deadline_data { | |||
50 | * next in sort order. read, write or both are NULL | 50 | * next in sort order. read, write or both are NULL |
51 | */ | 51 | */ |
52 | struct deadline_rq *next_drq[2]; | 52 | struct deadline_rq *next_drq[2]; |
53 | struct list_head *dispatch; /* driver dispatch queue */ | ||
54 | struct list_head *hash; /* request hash */ | 53 | struct list_head *hash; /* request hash */ |
55 | unsigned int batching; /* number of sequential requests made */ | 54 | unsigned int batching; /* number of sequential requests made */ |
56 | sector_t last_sector; /* head position */ | 55 | sector_t last_sector; /* head position */ |
@@ -113,15 +112,6 @@ static inline void deadline_del_drq_hash(struct deadline_rq *drq) | |||
113 | __deadline_del_drq_hash(drq); | 112 | __deadline_del_drq_hash(drq); |
114 | } | 113 | } |
115 | 114 | ||
116 | static void | ||
117 | deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq) | ||
118 | { | ||
119 | deadline_del_drq_hash(drq); | ||
120 | |||
121 | if (q->last_merge == drq->request) | ||
122 | q->last_merge = NULL; | ||
123 | } | ||
124 | |||
125 | static inline void | 115 | static inline void |
126 | deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | 116 | deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) |
127 | { | 117 | { |
@@ -239,10 +229,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | |||
239 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | 229 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); |
240 | } | 230 | } |
241 | 231 | ||
242 | if (ON_RB(&drq->rb_node)) { | 232 | BUG_ON(!ON_RB(&drq->rb_node)); |
243 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); | 233 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); |
244 | RB_CLEAR(&drq->rb_node); | 234 | RB_CLEAR(&drq->rb_node); |
245 | } | ||
246 | } | 235 | } |
247 | 236 | ||
248 | static struct request * | 237 | static struct request * |
@@ -286,7 +275,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir) | |||
286 | /* | 275 | /* |
287 | * add drq to rbtree and fifo | 276 | * add drq to rbtree and fifo |
288 | */ | 277 | */ |
289 | static inline void | 278 | static void |
290 | deadline_add_request(struct request_queue *q, struct request *rq) | 279 | deadline_add_request(struct request_queue *q, struct request *rq) |
291 | { | 280 | { |
292 | struct deadline_data *dd = q->elevator->elevator_data; | 281 | struct deadline_data *dd = q->elevator->elevator_data; |
@@ -301,12 +290,8 @@ deadline_add_request(struct request_queue *q, struct request *rq) | |||
301 | drq->expires = jiffies + dd->fifo_expire[data_dir]; | 290 | drq->expires = jiffies + dd->fifo_expire[data_dir]; |
302 | list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); | 291 | list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); |
303 | 292 | ||
304 | if (rq_mergeable(rq)) { | 293 | if (rq_mergeable(rq)) |
305 | deadline_add_drq_hash(dd, drq); | 294 | deadline_add_drq_hash(dd, drq); |
306 | |||
307 | if (!q->last_merge) | ||
308 | q->last_merge = rq; | ||
309 | } | ||
310 | } | 295 | } |
311 | 296 | ||
312 | /* | 297 | /* |
@@ -315,14 +300,11 @@ deadline_add_request(struct request_queue *q, struct request *rq) | |||
315 | static void deadline_remove_request(request_queue_t *q, struct request *rq) | 300 | static void deadline_remove_request(request_queue_t *q, struct request *rq) |
316 | { | 301 | { |
317 | struct deadline_rq *drq = RQ_DATA(rq); | 302 | struct deadline_rq *drq = RQ_DATA(rq); |
303 | struct deadline_data *dd = q->elevator->elevator_data; | ||
318 | 304 | ||
319 | if (drq) { | 305 | list_del_init(&drq->fifo); |
320 | struct deadline_data *dd = q->elevator->elevator_data; | 306 | deadline_del_drq_rb(dd, drq); |
321 | 307 | deadline_del_drq_hash(drq); | |
322 | list_del_init(&drq->fifo); | ||
323 | deadline_remove_merge_hints(q, drq); | ||
324 | deadline_del_drq_rb(dd, drq); | ||
325 | } | ||
326 | } | 308 | } |
327 | 309 | ||
328 | static int | 310 | static int |
@@ -333,15 +315,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
333 | int ret; | 315 | int ret; |
334 | 316 | ||
335 | /* | 317 | /* |
336 | * try last_merge to avoid going to hash | ||
337 | */ | ||
338 | ret = elv_try_last_merge(q, bio); | ||
339 | if (ret != ELEVATOR_NO_MERGE) { | ||
340 | __rq = q->last_merge; | ||
341 | goto out_insert; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * see if the merge hash can satisfy a back merge | 318 | * see if the merge hash can satisfy a back merge |
346 | */ | 319 | */ |
347 | __rq = deadline_find_drq_hash(dd, bio->bi_sector); | 320 | __rq = deadline_find_drq_hash(dd, bio->bi_sector); |
@@ -373,8 +346,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
373 | 346 | ||
374 | return ELEVATOR_NO_MERGE; | 347 | return ELEVATOR_NO_MERGE; |
375 | out: | 348 | out: |
376 | q->last_merge = __rq; | ||
377 | out_insert: | ||
378 | if (ret) | 349 | if (ret) |
379 | deadline_hot_drq_hash(dd, RQ_DATA(__rq)); | 350 | deadline_hot_drq_hash(dd, RQ_DATA(__rq)); |
380 | *req = __rq; | 351 | *req = __rq; |
@@ -399,8 +370,6 @@ static void deadline_merged_request(request_queue_t *q, struct request *req) | |||
399 | deadline_del_drq_rb(dd, drq); | 370 | deadline_del_drq_rb(dd, drq); |
400 | deadline_add_drq_rb(dd, drq); | 371 | deadline_add_drq_rb(dd, drq); |
401 | } | 372 | } |
402 | |||
403 | q->last_merge = req; | ||
404 | } | 373 | } |
405 | 374 | ||
406 | static void | 375 | static void |
@@ -452,7 +421,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) | |||
452 | request_queue_t *q = drq->request->q; | 421 | request_queue_t *q = drq->request->q; |
453 | 422 | ||
454 | deadline_remove_request(q, drq->request); | 423 | deadline_remove_request(q, drq->request); |
455 | list_add_tail(&drq->request->queuelist, dd->dispatch); | 424 | elv_dispatch_add_tail(q, drq->request); |
456 | } | 425 | } |
457 | 426 | ||
458 | /* | 427 | /* |
@@ -502,8 +471,9 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |||
502 | * deadline_dispatch_requests selects the best request according to | 471 | * deadline_dispatch_requests selects the best request according to |
503 | * read/write expire, fifo_batch, etc | 472 | * read/write expire, fifo_batch, etc |
504 | */ | 473 | */ |
505 | static int deadline_dispatch_requests(struct deadline_data *dd) | 474 | static int deadline_dispatch_requests(request_queue_t *q, int force) |
506 | { | 475 | { |
476 | struct deadline_data *dd = q->elevator->elevator_data; | ||
507 | const int reads = !list_empty(&dd->fifo_list[READ]); | 477 | const int reads = !list_empty(&dd->fifo_list[READ]); |
508 | const int writes = !list_empty(&dd->fifo_list[WRITE]); | 478 | const int writes = !list_empty(&dd->fifo_list[WRITE]); |
509 | struct deadline_rq *drq; | 479 | struct deadline_rq *drq; |
@@ -597,65 +567,12 @@ dispatch_request: | |||
597 | return 1; | 567 | return 1; |
598 | } | 568 | } |
599 | 569 | ||
600 | static struct request *deadline_next_request(request_queue_t *q) | ||
601 | { | ||
602 | struct deadline_data *dd = q->elevator->elevator_data; | ||
603 | struct request *rq; | ||
604 | |||
605 | /* | ||
606 | * if there are still requests on the dispatch queue, grab the first one | ||
607 | */ | ||
608 | if (!list_empty(dd->dispatch)) { | ||
609 | dispatch: | ||
610 | rq = list_entry_rq(dd->dispatch->next); | ||
611 | return rq; | ||
612 | } | ||
613 | |||
614 | if (deadline_dispatch_requests(dd)) | ||
615 | goto dispatch; | ||
616 | |||
617 | return NULL; | ||
618 | } | ||
619 | |||
620 | static void | ||
621 | deadline_insert_request(request_queue_t *q, struct request *rq, int where) | ||
622 | { | ||
623 | struct deadline_data *dd = q->elevator->elevator_data; | ||
624 | |||
625 | /* barriers must flush the reorder queue */ | ||
626 | if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) | ||
627 | && where == ELEVATOR_INSERT_SORT)) | ||
628 | where = ELEVATOR_INSERT_BACK; | ||
629 | |||
630 | switch (where) { | ||
631 | case ELEVATOR_INSERT_BACK: | ||
632 | while (deadline_dispatch_requests(dd)) | ||
633 | ; | ||
634 | list_add_tail(&rq->queuelist, dd->dispatch); | ||
635 | break; | ||
636 | case ELEVATOR_INSERT_FRONT: | ||
637 | list_add(&rq->queuelist, dd->dispatch); | ||
638 | break; | ||
639 | case ELEVATOR_INSERT_SORT: | ||
640 | BUG_ON(!blk_fs_request(rq)); | ||
641 | deadline_add_request(q, rq); | ||
642 | break; | ||
643 | default: | ||
644 | printk("%s: bad insert point %d\n", __FUNCTION__,where); | ||
645 | return; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | static int deadline_queue_empty(request_queue_t *q) | 570 | static int deadline_queue_empty(request_queue_t *q) |
650 | { | 571 | { |
651 | struct deadline_data *dd = q->elevator->elevator_data; | 572 | struct deadline_data *dd = q->elevator->elevator_data; |
652 | 573 | ||
653 | if (!list_empty(&dd->fifo_list[WRITE]) | 574 | return list_empty(&dd->fifo_list[WRITE]) |
654 | || !list_empty(&dd->fifo_list[READ]) | 575 | && list_empty(&dd->fifo_list[READ]); |
655 | || !list_empty(dd->dispatch)) | ||
656 | return 0; | ||
657 | |||
658 | return 1; | ||
659 | } | 576 | } |
660 | 577 | ||
661 | static struct request * | 578 | static struct request * |
@@ -733,7 +650,6 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
733 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | 650 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
734 | dd->sort_list[READ] = RB_ROOT; | 651 | dd->sort_list[READ] = RB_ROOT; |
735 | dd->sort_list[WRITE] = RB_ROOT; | 652 | dd->sort_list[WRITE] = RB_ROOT; |
736 | dd->dispatch = &q->queue_head; | ||
737 | dd->fifo_expire[READ] = read_expire; | 653 | dd->fifo_expire[READ] = read_expire; |
738 | dd->fifo_expire[WRITE] = write_expire; | 654 | dd->fifo_expire[WRITE] = write_expire; |
739 | dd->writes_starved = writes_starved; | 655 | dd->writes_starved = writes_starved; |
@@ -748,15 +664,13 @@ static void deadline_put_request(request_queue_t *q, struct request *rq) | |||
748 | struct deadline_data *dd = q->elevator->elevator_data; | 664 | struct deadline_data *dd = q->elevator->elevator_data; |
749 | struct deadline_rq *drq = RQ_DATA(rq); | 665 | struct deadline_rq *drq = RQ_DATA(rq); |
750 | 666 | ||
751 | if (drq) { | 667 | mempool_free(drq, dd->drq_pool); |
752 | mempool_free(drq, dd->drq_pool); | 668 | rq->elevator_private = NULL; |
753 | rq->elevator_private = NULL; | ||
754 | } | ||
755 | } | 669 | } |
756 | 670 | ||
757 | static int | 671 | static int |
758 | deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | 672 | deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
759 | int gfp_mask) | 673 | gfp_t gfp_mask) |
760 | { | 674 | { |
761 | struct deadline_data *dd = q->elevator->elevator_data; | 675 | struct deadline_data *dd = q->elevator->elevator_data; |
762 | struct deadline_rq *drq; | 676 | struct deadline_rq *drq; |
@@ -917,9 +831,8 @@ static struct elevator_type iosched_deadline = { | |||
917 | .elevator_merge_fn = deadline_merge, | 831 | .elevator_merge_fn = deadline_merge, |
918 | .elevator_merged_fn = deadline_merged_request, | 832 | .elevator_merged_fn = deadline_merged_request, |
919 | .elevator_merge_req_fn = deadline_merged_requests, | 833 | .elevator_merge_req_fn = deadline_merged_requests, |
920 | .elevator_next_req_fn = deadline_next_request, | 834 | .elevator_dispatch_fn = deadline_dispatch_requests, |
921 | .elevator_add_req_fn = deadline_insert_request, | 835 | .elevator_add_req_fn = deadline_add_request, |
922 | .elevator_remove_req_fn = deadline_remove_request, | ||
923 | .elevator_queue_empty_fn = deadline_queue_empty, | 836 | .elevator_queue_empty_fn = deadline_queue_empty, |
924 | .elevator_former_req_fn = deadline_former_request, | 837 | .elevator_former_req_fn = deadline_former_request, |
925 | .elevator_latter_req_fn = deadline_latter_request, | 838 | .elevator_latter_req_fn = deadline_latter_request, |
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c index 98f0126a2deb..36f1057084b0 100644 --- a/drivers/block/elevator.c +++ b/drivers/block/elevator.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/compiler.h> | 36 | #include <linux/compiler.h> |
37 | #include <linux/delay.h> | ||
37 | 38 | ||
38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
39 | 40 | ||
@@ -83,21 +84,11 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio) | |||
83 | } | 84 | } |
84 | EXPORT_SYMBOL(elv_try_merge); | 85 | EXPORT_SYMBOL(elv_try_merge); |
85 | 86 | ||
86 | inline int elv_try_last_merge(request_queue_t *q, struct bio *bio) | ||
87 | { | ||
88 | if (q->last_merge) | ||
89 | return elv_try_merge(q->last_merge, bio); | ||
90 | |||
91 | return ELEVATOR_NO_MERGE; | ||
92 | } | ||
93 | EXPORT_SYMBOL(elv_try_last_merge); | ||
94 | |||
95 | static struct elevator_type *elevator_find(const char *name) | 87 | static struct elevator_type *elevator_find(const char *name) |
96 | { | 88 | { |
97 | struct elevator_type *e = NULL; | 89 | struct elevator_type *e = NULL; |
98 | struct list_head *entry; | 90 | struct list_head *entry; |
99 | 91 | ||
100 | spin_lock_irq(&elv_list_lock); | ||
101 | list_for_each(entry, &elv_list) { | 92 | list_for_each(entry, &elv_list) { |
102 | struct elevator_type *__e; | 93 | struct elevator_type *__e; |
103 | 94 | ||
@@ -108,7 +99,6 @@ static struct elevator_type *elevator_find(const char *name) | |||
108 | break; | 99 | break; |
109 | } | 100 | } |
110 | } | 101 | } |
111 | spin_unlock_irq(&elv_list_lock); | ||
112 | 102 | ||
113 | return e; | 103 | return e; |
114 | } | 104 | } |
@@ -120,12 +110,15 @@ static void elevator_put(struct elevator_type *e) | |||
120 | 110 | ||
121 | static struct elevator_type *elevator_get(const char *name) | 111 | static struct elevator_type *elevator_get(const char *name) |
122 | { | 112 | { |
123 | struct elevator_type *e = elevator_find(name); | 113 | struct elevator_type *e; |
124 | 114 | ||
125 | if (!e) | 115 | spin_lock_irq(&elv_list_lock); |
126 | return NULL; | 116 | |
127 | if (!try_module_get(e->elevator_owner)) | 117 | e = elevator_find(name); |
128 | return NULL; | 118 | if (e && !try_module_get(e->elevator_owner)) |
119 | e = NULL; | ||
120 | |||
121 | spin_unlock_irq(&elv_list_lock); | ||
129 | 122 | ||
130 | return e; | 123 | return e; |
131 | } | 124 | } |
@@ -139,8 +132,6 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e, | |||
139 | eq->ops = &e->ops; | 132 | eq->ops = &e->ops; |
140 | eq->elevator_type = e; | 133 | eq->elevator_type = e; |
141 | 134 | ||
142 | INIT_LIST_HEAD(&q->queue_head); | ||
143 | q->last_merge = NULL; | ||
144 | q->elevator = eq; | 135 | q->elevator = eq; |
145 | 136 | ||
146 | if (eq->ops->elevator_init_fn) | 137 | if (eq->ops->elevator_init_fn) |
@@ -153,23 +144,20 @@ static char chosen_elevator[16]; | |||
153 | 144 | ||
154 | static void elevator_setup_default(void) | 145 | static void elevator_setup_default(void) |
155 | { | 146 | { |
147 | struct elevator_type *e; | ||
148 | |||
156 | /* | 149 | /* |
157 | * check if default is set and exists | 150 | * If default has not been set, use the compiled-in selection. |
158 | */ | 151 | */ |
159 | if (chosen_elevator[0] && elevator_find(chosen_elevator)) | 152 | if (!chosen_elevator[0]) |
160 | return; | 153 | strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED); |
161 | 154 | ||
162 | #if defined(CONFIG_IOSCHED_AS) | 155 | /* |
163 | strcpy(chosen_elevator, "anticipatory"); | 156 | * If the given scheduler is not available, fall back to no-op. |
164 | #elif defined(CONFIG_IOSCHED_DEADLINE) | 157 | */ |
165 | strcpy(chosen_elevator, "deadline"); | 158 | if (!(e = elevator_find(chosen_elevator))) |
166 | #elif defined(CONFIG_IOSCHED_CFQ) | 159 | strcpy(chosen_elevator, "noop"); |
167 | strcpy(chosen_elevator, "cfq"); | 160 | elevator_put(e); |
168 | #elif defined(CONFIG_IOSCHED_NOOP) | ||
169 | strcpy(chosen_elevator, "noop"); | ||
170 | #else | ||
171 | #error "You must build at least 1 IO scheduler into the kernel" | ||
172 | #endif | ||
173 | } | 161 | } |
174 | 162 | ||
175 | static int __init elevator_setup(char *str) | 163 | static int __init elevator_setup(char *str) |
@@ -186,6 +174,11 @@ int elevator_init(request_queue_t *q, char *name) | |||
186 | struct elevator_queue *eq; | 174 | struct elevator_queue *eq; |
187 | int ret = 0; | 175 | int ret = 0; |
188 | 176 | ||
177 | INIT_LIST_HEAD(&q->queue_head); | ||
178 | q->last_merge = NULL; | ||
179 | q->end_sector = 0; | ||
180 | q->boundary_rq = NULL; | ||
181 | |||
189 | elevator_setup_default(); | 182 | elevator_setup_default(); |
190 | 183 | ||
191 | if (!name) | 184 | if (!name) |
@@ -220,9 +213,52 @@ void elevator_exit(elevator_t *e) | |||
220 | kfree(e); | 213 | kfree(e); |
221 | } | 214 | } |
222 | 215 | ||
216 | /* | ||
217 | * Insert rq into dispatch queue of q. Queue lock must be held on | ||
218 | * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be | ||
219 | * appended to the dispatch queue. To be used by specific elevators. | ||
220 | */ | ||
221 | void elv_dispatch_sort(request_queue_t *q, struct request *rq) | ||
222 | { | ||
223 | sector_t boundary; | ||
224 | struct list_head *entry; | ||
225 | |||
226 | if (q->last_merge == rq) | ||
227 | q->last_merge = NULL; | ||
228 | |||
229 | boundary = q->end_sector; | ||
230 | |||
231 | list_for_each_prev(entry, &q->queue_head) { | ||
232 | struct request *pos = list_entry_rq(entry); | ||
233 | |||
234 | if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) | ||
235 | break; | ||
236 | if (rq->sector >= boundary) { | ||
237 | if (pos->sector < boundary) | ||
238 | continue; | ||
239 | } else { | ||
240 | if (pos->sector >= boundary) | ||
241 | break; | ||
242 | } | ||
243 | if (rq->sector >= pos->sector) | ||
244 | break; | ||
245 | } | ||
246 | |||
247 | list_add(&rq->queuelist, entry); | ||
248 | } | ||
249 | |||
223 | int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) | 250 | int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) |
224 | { | 251 | { |
225 | elevator_t *e = q->elevator; | 252 | elevator_t *e = q->elevator; |
253 | int ret; | ||
254 | |||
255 | if (q->last_merge) { | ||
256 | ret = elv_try_merge(q->last_merge, bio); | ||
257 | if (ret != ELEVATOR_NO_MERGE) { | ||
258 | *req = q->last_merge; | ||
259 | return ret; | ||
260 | } | ||
261 | } | ||
226 | 262 | ||
227 | if (e->ops->elevator_merge_fn) | 263 | if (e->ops->elevator_merge_fn) |
228 | return e->ops->elevator_merge_fn(q, req, bio); | 264 | return e->ops->elevator_merge_fn(q, req, bio); |
@@ -236,6 +272,8 @@ void elv_merged_request(request_queue_t *q, struct request *rq) | |||
236 | 272 | ||
237 | if (e->ops->elevator_merged_fn) | 273 | if (e->ops->elevator_merged_fn) |
238 | e->ops->elevator_merged_fn(q, rq); | 274 | e->ops->elevator_merged_fn(q, rq); |
275 | |||
276 | q->last_merge = rq; | ||
239 | } | 277 | } |
240 | 278 | ||
241 | void elv_merge_requests(request_queue_t *q, struct request *rq, | 279 | void elv_merge_requests(request_queue_t *q, struct request *rq, |
@@ -243,20 +281,13 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, | |||
243 | { | 281 | { |
244 | elevator_t *e = q->elevator; | 282 | elevator_t *e = q->elevator; |
245 | 283 | ||
246 | if (q->last_merge == next) | ||
247 | q->last_merge = NULL; | ||
248 | |||
249 | if (e->ops->elevator_merge_req_fn) | 284 | if (e->ops->elevator_merge_req_fn) |
250 | e->ops->elevator_merge_req_fn(q, rq, next); | 285 | e->ops->elevator_merge_req_fn(q, rq, next); |
286 | |||
287 | q->last_merge = rq; | ||
251 | } | 288 | } |
252 | 289 | ||
253 | /* | 290 | void elv_requeue_request(request_queue_t *q, struct request *rq) |
254 | * For careful internal use by the block layer. Essentially the same as | ||
255 | * a requeue in that it tells the io scheduler that this request is not | ||
256 | * active in the driver or hardware anymore, but we don't want the request | ||
257 | * added back to the scheduler. Function is not exported. | ||
258 | */ | ||
259 | void elv_deactivate_request(request_queue_t *q, struct request *rq) | ||
260 | { | 291 | { |
261 | elevator_t *e = q->elevator; | 292 | elevator_t *e = q->elevator; |
262 | 293 | ||
@@ -264,19 +295,14 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq) | |||
264 | * it already went through dequeue, we need to decrement the | 295 | * it already went through dequeue, we need to decrement the |
265 | * in_flight count again | 296 | * in_flight count again |
266 | */ | 297 | */ |
267 | if (blk_account_rq(rq)) | 298 | if (blk_account_rq(rq)) { |
268 | q->in_flight--; | 299 | q->in_flight--; |
300 | if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn) | ||
301 | e->ops->elevator_deactivate_req_fn(q, rq); | ||
302 | } | ||
269 | 303 | ||
270 | rq->flags &= ~REQ_STARTED; | 304 | rq->flags &= ~REQ_STARTED; |
271 | 305 | ||
272 | if (e->ops->elevator_deactivate_req_fn) | ||
273 | e->ops->elevator_deactivate_req_fn(q, rq); | ||
274 | } | ||
275 | |||
276 | void elv_requeue_request(request_queue_t *q, struct request *rq) | ||
277 | { | ||
278 | elv_deactivate_request(q, rq); | ||
279 | |||
280 | /* | 306 | /* |
281 | * if this is the flush, requeue the original instead and drop the flush | 307 | * if this is the flush, requeue the original instead and drop the flush |
282 | */ | 308 | */ |
@@ -285,31 +311,27 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) | |||
285 | rq = rq->end_io_data; | 311 | rq = rq->end_io_data; |
286 | } | 312 | } |
287 | 313 | ||
288 | /* | 314 | __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); |
289 | * the request is prepped and may have some resources allocated. | ||
290 | * allowing unprepped requests to pass this one may cause resource | ||
291 | * deadlock. turn on softbarrier. | ||
292 | */ | ||
293 | rq->flags |= REQ_SOFTBARRIER; | ||
294 | |||
295 | /* | ||
296 | * if iosched has an explicit requeue hook, then use that. otherwise | ||
297 | * just put the request at the front of the queue | ||
298 | */ | ||
299 | if (q->elevator->ops->elevator_requeue_req_fn) | ||
300 | q->elevator->ops->elevator_requeue_req_fn(q, rq); | ||
301 | else | ||
302 | __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); | ||
303 | } | 315 | } |
304 | 316 | ||
305 | void __elv_add_request(request_queue_t *q, struct request *rq, int where, | 317 | void __elv_add_request(request_queue_t *q, struct request *rq, int where, |
306 | int plug) | 318 | int plug) |
307 | { | 319 | { |
308 | /* | 320 | if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { |
309 | * barriers implicitly indicate back insertion | 321 | /* |
310 | */ | 322 | * barriers implicitly indicate back insertion |
311 | if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) && | 323 | */ |
312 | where == ELEVATOR_INSERT_SORT) | 324 | if (where == ELEVATOR_INSERT_SORT) |
325 | where = ELEVATOR_INSERT_BACK; | ||
326 | |||
327 | /* | ||
328 | * this request is scheduling boundary, update end_sector | ||
329 | */ | ||
330 | if (blk_fs_request(rq)) { | ||
331 | q->end_sector = rq_end_sector(rq); | ||
332 | q->boundary_rq = rq; | ||
333 | } | ||
334 | } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) | ||
313 | where = ELEVATOR_INSERT_BACK; | 335 | where = ELEVATOR_INSERT_BACK; |
314 | 336 | ||
315 | if (plug) | 337 | if (plug) |
@@ -317,23 +339,54 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
317 | 339 | ||
318 | rq->q = q; | 340 | rq->q = q; |
319 | 341 | ||
320 | if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) { | 342 | switch (where) { |
321 | q->elevator->ops->elevator_add_req_fn(q, rq, where); | 343 | case ELEVATOR_INSERT_FRONT: |
344 | rq->flags |= REQ_SOFTBARRIER; | ||
322 | 345 | ||
323 | if (blk_queue_plugged(q)) { | 346 | list_add(&rq->queuelist, &q->queue_head); |
324 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] | 347 | break; |
325 | - q->in_flight; | ||
326 | 348 | ||
327 | if (nrq >= q->unplug_thresh) | 349 | case ELEVATOR_INSERT_BACK: |
328 | __generic_unplug_device(q); | 350 | rq->flags |= REQ_SOFTBARRIER; |
329 | } | 351 | |
330 | } else | 352 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) |
353 | ; | ||
354 | list_add_tail(&rq->queuelist, &q->queue_head); | ||
331 | /* | 355 | /* |
332 | * if drain is set, store the request "locally". when the drain | 356 | * We kick the queue here for the following reasons. |
333 | * is finished, the requests will be handed ordered to the io | 357 | * - The elevator might have returned NULL previously |
334 | * scheduler | 358 | * to delay requests and returned them now. As the |
359 | * queue wasn't empty before this request, ll_rw_blk | ||
360 | * won't run the queue on return, resulting in hang. | ||
361 | * - Usually, back inserted requests won't be merged | ||
362 | * with anything. There's no point in delaying queue | ||
363 | * processing. | ||
335 | */ | 364 | */ |
336 | list_add_tail(&rq->queuelist, &q->drain_list); | 365 | blk_remove_plug(q); |
366 | q->request_fn(q); | ||
367 | break; | ||
368 | |||
369 | case ELEVATOR_INSERT_SORT: | ||
370 | BUG_ON(!blk_fs_request(rq)); | ||
371 | rq->flags |= REQ_SORTED; | ||
372 | q->elevator->ops->elevator_add_req_fn(q, rq); | ||
373 | if (q->last_merge == NULL && rq_mergeable(rq)) | ||
374 | q->last_merge = rq; | ||
375 | break; | ||
376 | |||
377 | default: | ||
378 | printk(KERN_ERR "%s: bad insertion point %d\n", | ||
379 | __FUNCTION__, where); | ||
380 | BUG(); | ||
381 | } | ||
382 | |||
383 | if (blk_queue_plugged(q)) { | ||
384 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] | ||
385 | - q->in_flight; | ||
386 | |||
387 | if (nrq >= q->unplug_thresh) | ||
388 | __generic_unplug_device(q); | ||
389 | } | ||
337 | } | 390 | } |
338 | 391 | ||
339 | void elv_add_request(request_queue_t *q, struct request *rq, int where, | 392 | void elv_add_request(request_queue_t *q, struct request *rq, int where, |
@@ -348,13 +401,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
348 | 401 | ||
349 | static inline struct request *__elv_next_request(request_queue_t *q) | 402 | static inline struct request *__elv_next_request(request_queue_t *q) |
350 | { | 403 | { |
351 | struct request *rq = q->elevator->ops->elevator_next_req_fn(q); | 404 | struct request *rq; |
405 | |||
406 | if (unlikely(list_empty(&q->queue_head) && | ||
407 | !q->elevator->ops->elevator_dispatch_fn(q, 0))) | ||
408 | return NULL; | ||
409 | |||
410 | rq = list_entry_rq(q->queue_head.next); | ||
352 | 411 | ||
353 | /* | 412 | /* |
354 | * if this is a barrier write and the device has to issue a | 413 | * if this is a barrier write and the device has to issue a |
355 | * flush sequence to support it, check how far we are | 414 | * flush sequence to support it, check how far we are |
356 | */ | 415 | */ |
357 | if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) { | 416 | if (blk_fs_request(rq) && blk_barrier_rq(rq)) { |
358 | BUG_ON(q->ordered == QUEUE_ORDERED_NONE); | 417 | BUG_ON(q->ordered == QUEUE_ORDERED_NONE); |
359 | 418 | ||
360 | if (q->ordered == QUEUE_ORDERED_FLUSH && | 419 | if (q->ordered == QUEUE_ORDERED_FLUSH && |
@@ -371,15 +430,30 @@ struct request *elv_next_request(request_queue_t *q) | |||
371 | int ret; | 430 | int ret; |
372 | 431 | ||
373 | while ((rq = __elv_next_request(q)) != NULL) { | 432 | while ((rq = __elv_next_request(q)) != NULL) { |
374 | /* | 433 | if (!(rq->flags & REQ_STARTED)) { |
375 | * just mark as started even if we don't start it, a request | 434 | elevator_t *e = q->elevator; |
376 | * that has been delayed should not be passed by new incoming | 435 | |
377 | * requests | 436 | /* |
378 | */ | 437 | * This is the first time the device driver |
379 | rq->flags |= REQ_STARTED; | 438 | * sees this request (possibly after |
439 | * requeueing). Notify IO scheduler. | ||
440 | */ | ||
441 | if (blk_sorted_rq(rq) && | ||
442 | e->ops->elevator_activate_req_fn) | ||
443 | e->ops->elevator_activate_req_fn(q, rq); | ||
444 | |||
445 | /* | ||
446 | * just mark as started even if we don't start | ||
447 | * it, a request that has been delayed should | ||
448 | * not be passed by new incoming requests | ||
449 | */ | ||
450 | rq->flags |= REQ_STARTED; | ||
451 | } | ||
380 | 452 | ||
381 | if (rq == q->last_merge) | 453 | if (!q->boundary_rq || q->boundary_rq == rq) { |
382 | q->last_merge = NULL; | 454 | q->end_sector = rq_end_sector(rq); |
455 | q->boundary_rq = NULL; | ||
456 | } | ||
383 | 457 | ||
384 | if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) | 458 | if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) |
385 | break; | 459 | break; |
@@ -391,9 +465,9 @@ struct request *elv_next_request(request_queue_t *q) | |||
391 | /* | 465 | /* |
392 | * the request may have been (partially) prepped. | 466 | * the request may have been (partially) prepped. |
393 | * we need to keep this request in the front to | 467 | * we need to keep this request in the front to |
394 | * avoid resource deadlock. turn on softbarrier. | 468 | * avoid resource deadlock. REQ_STARTED will |
469 | * prevent other fs requests from passing this one. | ||
395 | */ | 470 | */ |
396 | rq->flags |= REQ_SOFTBARRIER; | ||
397 | rq = NULL; | 471 | rq = NULL; |
398 | break; | 472 | break; |
399 | } else if (ret == BLKPREP_KILL) { | 473 | } else if (ret == BLKPREP_KILL) { |
@@ -416,42 +490,32 @@ struct request *elv_next_request(request_queue_t *q) | |||
416 | return rq; | 490 | return rq; |
417 | } | 491 | } |
418 | 492 | ||
419 | void elv_remove_request(request_queue_t *q, struct request *rq) | 493 | void elv_dequeue_request(request_queue_t *q, struct request *rq) |
420 | { | 494 | { |
421 | elevator_t *e = q->elevator; | 495 | BUG_ON(list_empty(&rq->queuelist)); |
496 | |||
497 | list_del_init(&rq->queuelist); | ||
422 | 498 | ||
423 | /* | 499 | /* |
424 | * the time frame between a request being removed from the lists | 500 | * the time frame between a request being removed from the lists |
425 | * and to it is freed is accounted as io that is in progress at | 501 | * and to it is freed is accounted as io that is in progress at |
426 | * the driver side. note that we only account requests that the | 502 | * the driver side. |
427 | * driver has seen (REQ_STARTED set), to avoid false accounting | ||
428 | * for request-request merges | ||
429 | */ | 503 | */ |
430 | if (blk_account_rq(rq)) | 504 | if (blk_account_rq(rq)) |
431 | q->in_flight++; | 505 | q->in_flight++; |
432 | |||
433 | /* | ||
434 | * the main clearing point for q->last_merge is on retrieval of | ||
435 | * request by driver (it calls elv_next_request()), but it _can_ | ||
436 | * also happen here if a request is added to the queue but later | ||
437 | * deleted without ever being given to driver (merged with another | ||
438 | * request). | ||
439 | */ | ||
440 | if (rq == q->last_merge) | ||
441 | q->last_merge = NULL; | ||
442 | |||
443 | if (e->ops->elevator_remove_req_fn) | ||
444 | e->ops->elevator_remove_req_fn(q, rq); | ||
445 | } | 506 | } |
446 | 507 | ||
447 | int elv_queue_empty(request_queue_t *q) | 508 | int elv_queue_empty(request_queue_t *q) |
448 | { | 509 | { |
449 | elevator_t *e = q->elevator; | 510 | elevator_t *e = q->elevator; |
450 | 511 | ||
512 | if (!list_empty(&q->queue_head)) | ||
513 | return 0; | ||
514 | |||
451 | if (e->ops->elevator_queue_empty_fn) | 515 | if (e->ops->elevator_queue_empty_fn) |
452 | return e->ops->elevator_queue_empty_fn(q); | 516 | return e->ops->elevator_queue_empty_fn(q); |
453 | 517 | ||
454 | return list_empty(&q->queue_head); | 518 | return 1; |
455 | } | 519 | } |
456 | 520 | ||
457 | struct request *elv_latter_request(request_queue_t *q, struct request *rq) | 521 | struct request *elv_latter_request(request_queue_t *q, struct request *rq) |
@@ -487,7 +551,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq) | |||
487 | } | 551 | } |
488 | 552 | ||
489 | int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | 553 | int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
490 | int gfp_mask) | 554 | gfp_t gfp_mask) |
491 | { | 555 | { |
492 | elevator_t *e = q->elevator; | 556 | elevator_t *e = q->elevator; |
493 | 557 | ||
@@ -523,11 +587,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq) | |||
523 | /* | 587 | /* |
524 | * request is released from the driver, io must be done | 588 | * request is released from the driver, io must be done |
525 | */ | 589 | */ |
526 | if (blk_account_rq(rq)) | 590 | if (blk_account_rq(rq)) { |
527 | q->in_flight--; | 591 | q->in_flight--; |
528 | 592 | if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) | |
529 | if (e->ops->elevator_completed_req_fn) | 593 | e->ops->elevator_completed_req_fn(q, rq); |
530 | e->ops->elevator_completed_req_fn(q, rq); | 594 | } |
531 | } | 595 | } |
532 | 596 | ||
533 | int elv_register_queue(struct request_queue *q) | 597 | int elv_register_queue(struct request_queue *q) |
@@ -555,10 +619,9 @@ void elv_unregister_queue(struct request_queue *q) | |||
555 | 619 | ||
556 | int elv_register(struct elevator_type *e) | 620 | int elv_register(struct elevator_type *e) |
557 | { | 621 | { |
622 | spin_lock_irq(&elv_list_lock); | ||
558 | if (elevator_find(e->elevator_name)) | 623 | if (elevator_find(e->elevator_name)) |
559 | BUG(); | 624 | BUG(); |
560 | |||
561 | spin_lock_irq(&elv_list_lock); | ||
562 | list_add_tail(&e->list, &elv_list); | 625 | list_add_tail(&e->list, &elv_list); |
563 | spin_unlock_irq(&elv_list_lock); | 626 | spin_unlock_irq(&elv_list_lock); |
564 | 627 | ||
@@ -572,6 +635,27 @@ EXPORT_SYMBOL_GPL(elv_register); | |||
572 | 635 | ||
573 | void elv_unregister(struct elevator_type *e) | 636 | void elv_unregister(struct elevator_type *e) |
574 | { | 637 | { |
638 | struct task_struct *g, *p; | ||
639 | |||
640 | /* | ||
641 | * Iterate every thread in the process to remove the io contexts. | ||
642 | */ | ||
643 | read_lock(&tasklist_lock); | ||
644 | do_each_thread(g, p) { | ||
645 | struct io_context *ioc = p->io_context; | ||
646 | if (ioc && ioc->cic) { | ||
647 | ioc->cic->exit(ioc->cic); | ||
648 | ioc->cic->dtor(ioc->cic); | ||
649 | ioc->cic = NULL; | ||
650 | } | ||
651 | if (ioc && ioc->aic) { | ||
652 | ioc->aic->exit(ioc->aic); | ||
653 | ioc->aic->dtor(ioc->aic); | ||
654 | ioc->aic = NULL; | ||
655 | } | ||
656 | } while_each_thread(g, p); | ||
657 | read_unlock(&tasklist_lock); | ||
658 | |||
575 | spin_lock_irq(&elv_list_lock); | 659 | spin_lock_irq(&elv_list_lock); |
576 | list_del_init(&e->list); | 660 | list_del_init(&e->list); |
577 | spin_unlock_irq(&elv_list_lock); | 661 | spin_unlock_irq(&elv_list_lock); |
@@ -582,25 +666,36 @@ EXPORT_SYMBOL_GPL(elv_unregister); | |||
582 | * switch to new_e io scheduler. be careful not to introduce deadlocks - | 666 | * switch to new_e io scheduler. be careful not to introduce deadlocks - |
583 | * we don't free the old io scheduler, before we have allocated what we | 667 | * we don't free the old io scheduler, before we have allocated what we |
584 | * need for the new one. this way we have a chance of going back to the old | 668 | * need for the new one. this way we have a chance of going back to the old |
585 | * one, if the new one fails init for some reason. we also do an intermediate | 669 | * one, if the new one fails init for some reason. |
586 | * switch to noop to ensure safety with stack-allocated requests, since they | ||
587 | * don't originate from the block layer allocator. noop is safe here, because | ||
588 | * it never needs to touch the elevator itself for completion events. DRAIN | ||
589 | * flags will make sure we don't touch it for additions either. | ||
590 | */ | 670 | */ |
591 | static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | 671 | static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) |
592 | { | 672 | { |
593 | elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL); | 673 | elevator_t *old_elevator, *e; |
594 | struct elevator_type *noop_elevator = NULL; | ||
595 | elevator_t *old_elevator; | ||
596 | 674 | ||
675 | /* | ||
676 | * Allocate new elevator | ||
677 | */ | ||
678 | e = kmalloc(sizeof(elevator_t), GFP_KERNEL); | ||
597 | if (!e) | 679 | if (!e) |
598 | goto error; | 680 | goto error; |
599 | 681 | ||
600 | /* | 682 | /* |
601 | * first step, drain requests from the block freelist | 683 | * Turn on BYPASS and drain all requests w/ elevator private data |
602 | */ | 684 | */ |
603 | blk_wait_queue_drained(q, 0); | 685 | spin_lock_irq(q->queue_lock); |
686 | |||
687 | set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | ||
688 | |||
689 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) | ||
690 | ; | ||
691 | |||
692 | while (q->rq.elvpriv) { | ||
693 | spin_unlock_irq(q->queue_lock); | ||
694 | msleep(10); | ||
695 | spin_lock_irq(q->queue_lock); | ||
696 | } | ||
697 | |||
698 | spin_unlock_irq(q->queue_lock); | ||
604 | 699 | ||
605 | /* | 700 | /* |
606 | * unregister old elevator data | 701 | * unregister old elevator data |
@@ -609,18 +704,6 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
609 | old_elevator = q->elevator; | 704 | old_elevator = q->elevator; |
610 | 705 | ||
611 | /* | 706 | /* |
612 | * next step, switch to noop since it uses no private rq structures | ||
613 | * and doesn't allocate any memory for anything. then wait for any | ||
614 | * non-fs requests in-flight | ||
615 | */ | ||
616 | noop_elevator = elevator_get("noop"); | ||
617 | spin_lock_irq(q->queue_lock); | ||
618 | elevator_attach(q, noop_elevator, e); | ||
619 | spin_unlock_irq(q->queue_lock); | ||
620 | |||
621 | blk_wait_queue_drained(q, 1); | ||
622 | |||
623 | /* | ||
624 | * attach and start new elevator | 707 | * attach and start new elevator |
625 | */ | 708 | */ |
626 | if (elevator_attach(q, new_e, e)) | 709 | if (elevator_attach(q, new_e, e)) |
@@ -630,11 +713,10 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
630 | goto fail_register; | 713 | goto fail_register; |
631 | 714 | ||
632 | /* | 715 | /* |
633 | * finally exit old elevator and start queue again | 716 | * finally exit old elevator and turn off BYPASS. |
634 | */ | 717 | */ |
635 | elevator_exit(old_elevator); | 718 | elevator_exit(old_elevator); |
636 | blk_finish_queue_drain(q); | 719 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
637 | elevator_put(noop_elevator); | ||
638 | return; | 720 | return; |
639 | 721 | ||
640 | fail_register: | 722 | fail_register: |
@@ -643,13 +725,13 @@ fail_register: | |||
643 | * one again (along with re-adding the sysfs dir) | 725 | * one again (along with re-adding the sysfs dir) |
644 | */ | 726 | */ |
645 | elevator_exit(e); | 727 | elevator_exit(e); |
728 | e = NULL; | ||
646 | fail: | 729 | fail: |
647 | q->elevator = old_elevator; | 730 | q->elevator = old_elevator; |
648 | elv_register_queue(q); | 731 | elv_register_queue(q); |
649 | blk_finish_queue_drain(q); | 732 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
733 | kfree(e); | ||
650 | error: | 734 | error: |
651 | if (noop_elevator) | ||
652 | elevator_put(noop_elevator); | ||
653 | elevator_put(new_e); | 735 | elevator_put(new_e); |
654 | printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name); | 736 | printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name); |
655 | } | 737 | } |
@@ -671,8 +753,10 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | |||
671 | return -EINVAL; | 753 | return -EINVAL; |
672 | } | 754 | } |
673 | 755 | ||
674 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) | 756 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { |
757 | elevator_put(e); | ||
675 | return count; | 758 | return count; |
759 | } | ||
676 | 760 | ||
677 | elevator_switch(q, e); | 761 | elevator_switch(q, e); |
678 | return count; | 762 | return count; |
@@ -701,11 +785,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) | |||
701 | return len; | 785 | return len; |
702 | } | 786 | } |
703 | 787 | ||
788 | EXPORT_SYMBOL(elv_dispatch_sort); | ||
704 | EXPORT_SYMBOL(elv_add_request); | 789 | EXPORT_SYMBOL(elv_add_request); |
705 | EXPORT_SYMBOL(__elv_add_request); | 790 | EXPORT_SYMBOL(__elv_add_request); |
706 | EXPORT_SYMBOL(elv_requeue_request); | 791 | EXPORT_SYMBOL(elv_requeue_request); |
707 | EXPORT_SYMBOL(elv_next_request); | 792 | EXPORT_SYMBOL(elv_next_request); |
708 | EXPORT_SYMBOL(elv_remove_request); | 793 | EXPORT_SYMBOL(elv_dequeue_request); |
709 | EXPORT_SYMBOL(elv_queue_empty); | 794 | EXPORT_SYMBOL(elv_queue_empty); |
710 | EXPORT_SYMBOL(elv_completed_request); | 795 | EXPORT_SYMBOL(elv_completed_request); |
711 | EXPORT_SYMBOL(elevator_exit); | 796 | EXPORT_SYMBOL(elevator_exit); |
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c index d42840cc0d1d..486ce1fdeb8c 100644 --- a/drivers/block/genhd.c +++ b/drivers/block/genhd.c | |||
@@ -337,10 +337,30 @@ static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr, | |||
337 | return ret; | 337 | return ret; |
338 | } | 338 | } |
339 | 339 | ||
340 | static ssize_t disk_attr_store(struct kobject * kobj, struct attribute * attr, | ||
341 | const char *page, size_t count) | ||
342 | { | ||
343 | struct gendisk *disk = to_disk(kobj); | ||
344 | struct disk_attribute *disk_attr = | ||
345 | container_of(attr,struct disk_attribute,attr); | ||
346 | ssize_t ret = 0; | ||
347 | |||
348 | if (disk_attr->store) | ||
349 | ret = disk_attr->store(disk, page, count); | ||
350 | return ret; | ||
351 | } | ||
352 | |||
340 | static struct sysfs_ops disk_sysfs_ops = { | 353 | static struct sysfs_ops disk_sysfs_ops = { |
341 | .show = &disk_attr_show, | 354 | .show = &disk_attr_show, |
355 | .store = &disk_attr_store, | ||
342 | }; | 356 | }; |
343 | 357 | ||
358 | static ssize_t disk_uevent_store(struct gendisk * disk, | ||
359 | const char *buf, size_t count) | ||
360 | { | ||
361 | kobject_hotplug(&disk->kobj, KOBJ_ADD); | ||
362 | return count; | ||
363 | } | ||
344 | static ssize_t disk_dev_read(struct gendisk * disk, char *page) | 364 | static ssize_t disk_dev_read(struct gendisk * disk, char *page) |
345 | { | 365 | { |
346 | dev_t base = MKDEV(disk->major, disk->first_minor); | 366 | dev_t base = MKDEV(disk->major, disk->first_minor); |
@@ -382,6 +402,10 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page) | |||
382 | jiffies_to_msecs(disk_stat_read(disk, io_ticks)), | 402 | jiffies_to_msecs(disk_stat_read(disk, io_ticks)), |
383 | jiffies_to_msecs(disk_stat_read(disk, time_in_queue))); | 403 | jiffies_to_msecs(disk_stat_read(disk, time_in_queue))); |
384 | } | 404 | } |
405 | static struct disk_attribute disk_attr_uevent = { | ||
406 | .attr = {.name = "uevent", .mode = S_IWUSR }, | ||
407 | .store = disk_uevent_store | ||
408 | }; | ||
385 | static struct disk_attribute disk_attr_dev = { | 409 | static struct disk_attribute disk_attr_dev = { |
386 | .attr = {.name = "dev", .mode = S_IRUGO }, | 410 | .attr = {.name = "dev", .mode = S_IRUGO }, |
387 | .show = disk_dev_read | 411 | .show = disk_dev_read |
@@ -404,6 +428,7 @@ static struct disk_attribute disk_attr_stat = { | |||
404 | }; | 428 | }; |
405 | 429 | ||
406 | static struct attribute * default_attrs[] = { | 430 | static struct attribute * default_attrs[] = { |
431 | &disk_attr_uevent.attr, | ||
407 | &disk_attr_dev.attr, | 432 | &disk_attr_dev.attr, |
408 | &disk_attr_range.attr, | 433 | &disk_attr_range.attr, |
409 | &disk_attr_removable.attr, | 434 | &disk_attr_removable.attr, |
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index baedac522945..0af73512b9a8 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -263,8 +263,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | |||
263 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 263 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
264 | 264 | ||
265 | blk_queue_activity_fn(q, NULL, NULL); | 265 | blk_queue_activity_fn(q, NULL, NULL); |
266 | |||
267 | INIT_LIST_HEAD(&q->drain_list); | ||
268 | } | 266 | } |
269 | 267 | ||
270 | EXPORT_SYMBOL(blk_queue_make_request); | 268 | EXPORT_SYMBOL(blk_queue_make_request); |
@@ -353,6 +351,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq) | |||
353 | struct request *rq = flush_rq->end_io_data; | 351 | struct request *rq = flush_rq->end_io_data; |
354 | request_queue_t *q = rq->q; | 352 | request_queue_t *q = rq->q; |
355 | 353 | ||
354 | elv_completed_request(q, flush_rq); | ||
355 | |||
356 | rq->flags |= REQ_BAR_PREFLUSH; | 356 | rq->flags |= REQ_BAR_PREFLUSH; |
357 | 357 | ||
358 | if (!flush_rq->errors) | 358 | if (!flush_rq->errors) |
@@ -369,6 +369,8 @@ static void blk_post_flush_end_io(struct request *flush_rq) | |||
369 | struct request *rq = flush_rq->end_io_data; | 369 | struct request *rq = flush_rq->end_io_data; |
370 | request_queue_t *q = rq->q; | 370 | request_queue_t *q = rq->q; |
371 | 371 | ||
372 | elv_completed_request(q, flush_rq); | ||
373 | |||
372 | rq->flags |= REQ_BAR_POSTFLUSH; | 374 | rq->flags |= REQ_BAR_POSTFLUSH; |
373 | 375 | ||
374 | q->end_flush_fn(q, flush_rq); | 376 | q->end_flush_fn(q, flush_rq); |
@@ -408,8 +410,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq) | |||
408 | if (!list_empty(&rq->queuelist)) | 410 | if (!list_empty(&rq->queuelist)) |
409 | blkdev_dequeue_request(rq); | 411 | blkdev_dequeue_request(rq); |
410 | 412 | ||
411 | elv_deactivate_request(q, rq); | ||
412 | |||
413 | flush_rq->end_io_data = rq; | 413 | flush_rq->end_io_data = rq; |
414 | flush_rq->end_io = blk_pre_flush_end_io; | 414 | flush_rq->end_io = blk_pre_flush_end_io; |
415 | 415 | ||
@@ -1040,6 +1040,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags); | |||
1040 | static char *rq_flags[] = { | 1040 | static char *rq_flags[] = { |
1041 | "REQ_RW", | 1041 | "REQ_RW", |
1042 | "REQ_FAILFAST", | 1042 | "REQ_FAILFAST", |
1043 | "REQ_SORTED", | ||
1043 | "REQ_SOFTBARRIER", | 1044 | "REQ_SOFTBARRIER", |
1044 | "REQ_HARDBARRIER", | 1045 | "REQ_HARDBARRIER", |
1045 | "REQ_CMD", | 1046 | "REQ_CMD", |
@@ -1047,6 +1048,7 @@ static char *rq_flags[] = { | |||
1047 | "REQ_STARTED", | 1048 | "REQ_STARTED", |
1048 | "REQ_DONTPREP", | 1049 | "REQ_DONTPREP", |
1049 | "REQ_QUEUED", | 1050 | "REQ_QUEUED", |
1051 | "REQ_ELVPRIV", | ||
1050 | "REQ_PC", | 1052 | "REQ_PC", |
1051 | "REQ_BLOCK_PC", | 1053 | "REQ_BLOCK_PC", |
1052 | "REQ_SENSE", | 1054 | "REQ_SENSE", |
@@ -1637,9 +1639,9 @@ static int blk_init_free_list(request_queue_t *q) | |||
1637 | 1639 | ||
1638 | rl->count[READ] = rl->count[WRITE] = 0; | 1640 | rl->count[READ] = rl->count[WRITE] = 0; |
1639 | rl->starved[READ] = rl->starved[WRITE] = 0; | 1641 | rl->starved[READ] = rl->starved[WRITE] = 0; |
1642 | rl->elvpriv = 0; | ||
1640 | init_waitqueue_head(&rl->wait[READ]); | 1643 | init_waitqueue_head(&rl->wait[READ]); |
1641 | init_waitqueue_head(&rl->wait[WRITE]); | 1644 | init_waitqueue_head(&rl->wait[WRITE]); |
1642 | init_waitqueue_head(&rl->drain); | ||
1643 | 1645 | ||
1644 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 1646 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
1645 | mempool_free_slab, request_cachep, q->node); | 1647 | mempool_free_slab, request_cachep, q->node); |
@@ -1652,13 +1654,13 @@ static int blk_init_free_list(request_queue_t *q) | |||
1652 | 1654 | ||
1653 | static int __make_request(request_queue_t *, struct bio *); | 1655 | static int __make_request(request_queue_t *, struct bio *); |
1654 | 1656 | ||
1655 | request_queue_t *blk_alloc_queue(int gfp_mask) | 1657 | request_queue_t *blk_alloc_queue(gfp_t gfp_mask) |
1656 | { | 1658 | { |
1657 | return blk_alloc_queue_node(gfp_mask, -1); | 1659 | return blk_alloc_queue_node(gfp_mask, -1); |
1658 | } | 1660 | } |
1659 | EXPORT_SYMBOL(blk_alloc_queue); | 1661 | EXPORT_SYMBOL(blk_alloc_queue); |
1660 | 1662 | ||
1661 | request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id) | 1663 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
1662 | { | 1664 | { |
1663 | request_queue_t *q; | 1665 | request_queue_t *q; |
1664 | 1666 | ||
@@ -1782,12 +1784,14 @@ EXPORT_SYMBOL(blk_get_queue); | |||
1782 | 1784 | ||
1783 | static inline void blk_free_request(request_queue_t *q, struct request *rq) | 1785 | static inline void blk_free_request(request_queue_t *q, struct request *rq) |
1784 | { | 1786 | { |
1785 | elv_put_request(q, rq); | 1787 | if (rq->flags & REQ_ELVPRIV) |
1788 | elv_put_request(q, rq); | ||
1786 | mempool_free(rq, q->rq.rq_pool); | 1789 | mempool_free(rq, q->rq.rq_pool); |
1787 | } | 1790 | } |
1788 | 1791 | ||
1789 | static inline struct request * | 1792 | static inline struct request * |
1790 | blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) | 1793 | blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, |
1794 | int priv, gfp_t gfp_mask) | ||
1791 | { | 1795 | { |
1792 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 1796 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); |
1793 | 1797 | ||
@@ -1800,11 +1804,15 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) | |||
1800 | */ | 1804 | */ |
1801 | rq->flags = rw; | 1805 | rq->flags = rw; |
1802 | 1806 | ||
1803 | if (!elv_set_request(q, rq, bio, gfp_mask)) | 1807 | if (priv) { |
1804 | return rq; | 1808 | if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { |
1809 | mempool_free(rq, q->rq.rq_pool); | ||
1810 | return NULL; | ||
1811 | } | ||
1812 | rq->flags |= REQ_ELVPRIV; | ||
1813 | } | ||
1805 | 1814 | ||
1806 | mempool_free(rq, q->rq.rq_pool); | 1815 | return rq; |
1807 | return NULL; | ||
1808 | } | 1816 | } |
1809 | 1817 | ||
1810 | /* | 1818 | /* |
@@ -1860,22 +1868,18 @@ static void __freed_request(request_queue_t *q, int rw) | |||
1860 | * A request has just been released. Account for it, update the full and | 1868 | * A request has just been released. Account for it, update the full and |
1861 | * congestion status, wake up any waiters. Called under q->queue_lock. | 1869 | * congestion status, wake up any waiters. Called under q->queue_lock. |
1862 | */ | 1870 | */ |
1863 | static void freed_request(request_queue_t *q, int rw) | 1871 | static void freed_request(request_queue_t *q, int rw, int priv) |
1864 | { | 1872 | { |
1865 | struct request_list *rl = &q->rq; | 1873 | struct request_list *rl = &q->rq; |
1866 | 1874 | ||
1867 | rl->count[rw]--; | 1875 | rl->count[rw]--; |
1876 | if (priv) | ||
1877 | rl->elvpriv--; | ||
1868 | 1878 | ||
1869 | __freed_request(q, rw); | 1879 | __freed_request(q, rw); |
1870 | 1880 | ||
1871 | if (unlikely(rl->starved[rw ^ 1])) | 1881 | if (unlikely(rl->starved[rw ^ 1])) |
1872 | __freed_request(q, rw ^ 1); | 1882 | __freed_request(q, rw ^ 1); |
1873 | |||
1874 | if (!rl->count[READ] && !rl->count[WRITE]) { | ||
1875 | smp_mb(); | ||
1876 | if (unlikely(waitqueue_active(&rl->drain))) | ||
1877 | wake_up(&rl->drain); | ||
1878 | } | ||
1879 | } | 1883 | } |
1880 | 1884 | ||
1881 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) | 1885 | #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) |
@@ -1885,14 +1889,12 @@ static void freed_request(request_queue_t *q, int rw) | |||
1885 | * Returns !NULL on success, with queue_lock *not held*. | 1889 | * Returns !NULL on success, with queue_lock *not held*. |
1886 | */ | 1890 | */ |
1887 | static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | 1891 | static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, |
1888 | int gfp_mask) | 1892 | gfp_t gfp_mask) |
1889 | { | 1893 | { |
1890 | struct request *rq = NULL; | 1894 | struct request *rq = NULL; |
1891 | struct request_list *rl = &q->rq; | 1895 | struct request_list *rl = &q->rq; |
1892 | struct io_context *ioc = current_io_context(GFP_ATOMIC); | 1896 | struct io_context *ioc = current_io_context(GFP_ATOMIC); |
1893 | 1897 | int priv; | |
1894 | if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) | ||
1895 | goto out; | ||
1896 | 1898 | ||
1897 | if (rl->count[rw]+1 >= q->nr_requests) { | 1899 | if (rl->count[rw]+1 >= q->nr_requests) { |
1898 | /* | 1900 | /* |
@@ -1937,9 +1939,14 @@ get_rq: | |||
1937 | rl->starved[rw] = 0; | 1939 | rl->starved[rw] = 0; |
1938 | if (rl->count[rw] >= queue_congestion_on_threshold(q)) | 1940 | if (rl->count[rw] >= queue_congestion_on_threshold(q)) |
1939 | set_queue_congested(q, rw); | 1941 | set_queue_congested(q, rw); |
1942 | |||
1943 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | ||
1944 | if (priv) | ||
1945 | rl->elvpriv++; | ||
1946 | |||
1940 | spin_unlock_irq(q->queue_lock); | 1947 | spin_unlock_irq(q->queue_lock); |
1941 | 1948 | ||
1942 | rq = blk_alloc_request(q, rw, bio, gfp_mask); | 1949 | rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); |
1943 | if (!rq) { | 1950 | if (!rq) { |
1944 | /* | 1951 | /* |
1945 | * Allocation failed presumably due to memory. Undo anything | 1952 | * Allocation failed presumably due to memory. Undo anything |
@@ -1949,7 +1956,7 @@ get_rq: | |||
1949 | * wait queue, but this is pretty rare. | 1956 | * wait queue, but this is pretty rare. |
1950 | */ | 1957 | */ |
1951 | spin_lock_irq(q->queue_lock); | 1958 | spin_lock_irq(q->queue_lock); |
1952 | freed_request(q, rw); | 1959 | freed_request(q, rw, priv); |
1953 | 1960 | ||
1954 | /* | 1961 | /* |
1955 | * in the very unlikely event that allocation failed and no | 1962 | * in the very unlikely event that allocation failed and no |
@@ -2019,7 +2026,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, | |||
2019 | return rq; | 2026 | return rq; |
2020 | } | 2027 | } |
2021 | 2028 | ||
2022 | struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) | 2029 | struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) |
2023 | { | 2030 | { |
2024 | struct request *rq; | 2031 | struct request *rq; |
2025 | 2032 | ||
@@ -2251,7 +2258,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user); | |||
2251 | * @gfp_mask: memory allocation flags | 2258 | * @gfp_mask: memory allocation flags |
2252 | */ | 2259 | */ |
2253 | int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | 2260 | int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, |
2254 | unsigned int len, unsigned int gfp_mask) | 2261 | unsigned int len, gfp_t gfp_mask) |
2255 | { | 2262 | { |
2256 | struct bio *bio; | 2263 | struct bio *bio; |
2257 | 2264 | ||
@@ -2433,13 +2440,15 @@ void disk_round_stats(struct gendisk *disk) | |||
2433 | { | 2440 | { |
2434 | unsigned long now = jiffies; | 2441 | unsigned long now = jiffies; |
2435 | 2442 | ||
2436 | __disk_stat_add(disk, time_in_queue, | 2443 | if (now == disk->stamp) |
2437 | disk->in_flight * (now - disk->stamp)); | 2444 | return; |
2438 | disk->stamp = now; | ||
2439 | 2445 | ||
2440 | if (disk->in_flight) | 2446 | if (disk->in_flight) { |
2441 | __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle)); | 2447 | __disk_stat_add(disk, time_in_queue, |
2442 | disk->stamp_idle = now; | 2448 | disk->in_flight * (now - disk->stamp)); |
2449 | __disk_stat_add(disk, io_ticks, (now - disk->stamp)); | ||
2450 | } | ||
2451 | disk->stamp = now; | ||
2443 | } | 2452 | } |
2444 | 2453 | ||
2445 | /* | 2454 | /* |
@@ -2454,6 +2463,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2454 | if (unlikely(--req->ref_count)) | 2463 | if (unlikely(--req->ref_count)) |
2455 | return; | 2464 | return; |
2456 | 2465 | ||
2466 | elv_completed_request(q, req); | ||
2467 | |||
2457 | req->rq_status = RQ_INACTIVE; | 2468 | req->rq_status = RQ_INACTIVE; |
2458 | req->rl = NULL; | 2469 | req->rl = NULL; |
2459 | 2470 | ||
@@ -2463,26 +2474,25 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2463 | */ | 2474 | */ |
2464 | if (rl) { | 2475 | if (rl) { |
2465 | int rw = rq_data_dir(req); | 2476 | int rw = rq_data_dir(req); |
2466 | 2477 | int priv = req->flags & REQ_ELVPRIV; | |
2467 | elv_completed_request(q, req); | ||
2468 | 2478 | ||
2469 | BUG_ON(!list_empty(&req->queuelist)); | 2479 | BUG_ON(!list_empty(&req->queuelist)); |
2470 | 2480 | ||
2471 | blk_free_request(q, req); | 2481 | blk_free_request(q, req); |
2472 | freed_request(q, rw); | 2482 | freed_request(q, rw, priv); |
2473 | } | 2483 | } |
2474 | } | 2484 | } |
2475 | 2485 | ||
2476 | void blk_put_request(struct request *req) | 2486 | void blk_put_request(struct request *req) |
2477 | { | 2487 | { |
2488 | unsigned long flags; | ||
2489 | request_queue_t *q = req->q; | ||
2490 | |||
2478 | /* | 2491 | /* |
2479 | * if req->rl isn't set, this request didnt originate from the | 2492 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the |
2480 | * block layer, so it's safe to just disregard it | 2493 | * following if (q) test. |
2481 | */ | 2494 | */ |
2482 | if (req->rl) { | 2495 | if (q) { |
2483 | unsigned long flags; | ||
2484 | request_queue_t *q = req->q; | ||
2485 | |||
2486 | spin_lock_irqsave(q->queue_lock, flags); | 2496 | spin_lock_irqsave(q->queue_lock, flags); |
2487 | __blk_put_request(q, req); | 2497 | __blk_put_request(q, req); |
2488 | spin_unlock_irqrestore(q->queue_lock, flags); | 2498 | spin_unlock_irqrestore(q->queue_lock, flags); |
@@ -2797,97 +2807,6 @@ static inline void blk_partition_remap(struct bio *bio) | |||
2797 | } | 2807 | } |
2798 | } | 2808 | } |
2799 | 2809 | ||
2800 | void blk_finish_queue_drain(request_queue_t *q) | ||
2801 | { | ||
2802 | struct request_list *rl = &q->rq; | ||
2803 | struct request *rq; | ||
2804 | int requeued = 0; | ||
2805 | |||
2806 | spin_lock_irq(q->queue_lock); | ||
2807 | clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); | ||
2808 | |||
2809 | while (!list_empty(&q->drain_list)) { | ||
2810 | rq = list_entry_rq(q->drain_list.next); | ||
2811 | |||
2812 | list_del_init(&rq->queuelist); | ||
2813 | elv_requeue_request(q, rq); | ||
2814 | requeued++; | ||
2815 | } | ||
2816 | |||
2817 | if (requeued) | ||
2818 | q->request_fn(q); | ||
2819 | |||
2820 | spin_unlock_irq(q->queue_lock); | ||
2821 | |||
2822 | wake_up(&rl->wait[0]); | ||
2823 | wake_up(&rl->wait[1]); | ||
2824 | wake_up(&rl->drain); | ||
2825 | } | ||
2826 | |||
2827 | static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch) | ||
2828 | { | ||
2829 | int wait = rl->count[READ] + rl->count[WRITE]; | ||
2830 | |||
2831 | if (dispatch) | ||
2832 | wait += !list_empty(&q->queue_head); | ||
2833 | |||
2834 | return wait; | ||
2835 | } | ||
2836 | |||
2837 | /* | ||
2838 | * We rely on the fact that only requests allocated through blk_alloc_request() | ||
2839 | * have io scheduler private data structures associated with them. Any other | ||
2840 | * type of request (allocated on stack or through kmalloc()) should not go | ||
2841 | * to the io scheduler core, but be attached to the queue head instead. | ||
2842 | */ | ||
2843 | void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch) | ||
2844 | { | ||
2845 | struct request_list *rl = &q->rq; | ||
2846 | DEFINE_WAIT(wait); | ||
2847 | |||
2848 | spin_lock_irq(q->queue_lock); | ||
2849 | set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); | ||
2850 | |||
2851 | while (wait_drain(q, rl, wait_dispatch)) { | ||
2852 | prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE); | ||
2853 | |||
2854 | if (wait_drain(q, rl, wait_dispatch)) { | ||
2855 | __generic_unplug_device(q); | ||
2856 | spin_unlock_irq(q->queue_lock); | ||
2857 | io_schedule(); | ||
2858 | spin_lock_irq(q->queue_lock); | ||
2859 | } | ||
2860 | |||
2861 | finish_wait(&rl->drain, &wait); | ||
2862 | } | ||
2863 | |||
2864 | spin_unlock_irq(q->queue_lock); | ||
2865 | } | ||
2866 | |||
2867 | /* | ||
2868 | * block waiting for the io scheduler being started again. | ||
2869 | */ | ||
2870 | static inline void block_wait_queue_running(request_queue_t *q) | ||
2871 | { | ||
2872 | DEFINE_WAIT(wait); | ||
2873 | |||
2874 | while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) { | ||
2875 | struct request_list *rl = &q->rq; | ||
2876 | |||
2877 | prepare_to_wait_exclusive(&rl->drain, &wait, | ||
2878 | TASK_UNINTERRUPTIBLE); | ||
2879 | |||
2880 | /* | ||
2881 | * re-check the condition. avoids using prepare_to_wait() | ||
2882 | * in the fast path (queue is running) | ||
2883 | */ | ||
2884 | if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) | ||
2885 | io_schedule(); | ||
2886 | |||
2887 | finish_wait(&rl->drain, &wait); | ||
2888 | } | ||
2889 | } | ||
2890 | |||
2891 | static void handle_bad_sector(struct bio *bio) | 2810 | static void handle_bad_sector(struct bio *bio) |
2892 | { | 2811 | { |
2893 | char b[BDEVNAME_SIZE]; | 2812 | char b[BDEVNAME_SIZE]; |
@@ -2983,8 +2902,6 @@ end_io: | |||
2983 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | 2902 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) |
2984 | goto end_io; | 2903 | goto end_io; |
2985 | 2904 | ||
2986 | block_wait_queue_running(q); | ||
2987 | |||
2988 | /* | 2905 | /* |
2989 | * If this device has partitions, remap block n | 2906 | * If this device has partitions, remap block n |
2990 | * of partition p to block n+start(p) of the disk. | 2907 | * of partition p to block n+start(p) of the disk. |
@@ -3393,7 +3310,7 @@ void exit_io_context(void) | |||
3393 | * but since the current task itself holds a reference, the context can be | 3310 | * but since the current task itself holds a reference, the context can be |
3394 | * used in general code, so long as it stays within `current` context. | 3311 | * used in general code, so long as it stays within `current` context. |
3395 | */ | 3312 | */ |
3396 | struct io_context *current_io_context(int gfp_flags) | 3313 | struct io_context *current_io_context(gfp_t gfp_flags) |
3397 | { | 3314 | { |
3398 | struct task_struct *tsk = current; | 3315 | struct task_struct *tsk = current; |
3399 | struct io_context *ret; | 3316 | struct io_context *ret; |
@@ -3424,7 +3341,7 @@ EXPORT_SYMBOL(current_io_context); | |||
3424 | * | 3341 | * |
3425 | * This is always called in the context of the task which submitted the I/O. | 3342 | * This is always called in the context of the task which submitted the I/O. |
3426 | */ | 3343 | */ |
3427 | struct io_context *get_io_context(int gfp_flags) | 3344 | struct io_context *get_io_context(gfp_t gfp_flags) |
3428 | { | 3345 | { |
3429 | struct io_context *ret; | 3346 | struct io_context *ret; |
3430 | ret = current_io_context(gfp_flags); | 3347 | ret = current_io_context(gfp_flags); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index b35e08876dd4..96c664af8d06 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -881,7 +881,7 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, | |||
881 | static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) | 881 | static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) |
882 | { | 882 | { |
883 | struct file *filp = lo->lo_backing_file; | 883 | struct file *filp = lo->lo_backing_file; |
884 | int gfp = lo->old_gfp_mask; | 884 | gfp_t gfp = lo->old_gfp_mask; |
885 | 885 | ||
886 | if (lo->lo_state != Lo_bound) | 886 | if (lo->lo_state != Lo_bound) |
887 | return -ENXIO; | 887 | return -ENXIO; |
diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c index b1730b62c37e..f56b8edb06e4 100644 --- a/drivers/block/noop-iosched.c +++ b/drivers/block/noop-iosched.c | |||
@@ -7,57 +7,19 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | 9 | ||
10 | /* | 10 | static void elevator_noop_add_request(request_queue_t *q, struct request *rq) |
11 | * See if we can find a request that this buffer can be coalesced with. | ||
12 | */ | ||
13 | static int elevator_noop_merge(request_queue_t *q, struct request **req, | ||
14 | struct bio *bio) | ||
15 | { | ||
16 | int ret; | ||
17 | |||
18 | ret = elv_try_last_merge(q, bio); | ||
19 | if (ret != ELEVATOR_NO_MERGE) | ||
20 | *req = q->last_merge; | ||
21 | |||
22 | return ret; | ||
23 | } | ||
24 | |||
25 | static void elevator_noop_merge_requests(request_queue_t *q, struct request *req, | ||
26 | struct request *next) | ||
27 | { | ||
28 | list_del_init(&next->queuelist); | ||
29 | } | ||
30 | |||
31 | static void elevator_noop_add_request(request_queue_t *q, struct request *rq, | ||
32 | int where) | ||
33 | { | 11 | { |
34 | if (where == ELEVATOR_INSERT_FRONT) | 12 | elv_dispatch_add_tail(q, rq); |
35 | list_add(&rq->queuelist, &q->queue_head); | ||
36 | else | ||
37 | list_add_tail(&rq->queuelist, &q->queue_head); | ||
38 | |||
39 | /* | ||
40 | * new merges must not precede this barrier | ||
41 | */ | ||
42 | if (rq->flags & REQ_HARDBARRIER) | ||
43 | q->last_merge = NULL; | ||
44 | else if (!q->last_merge) | ||
45 | q->last_merge = rq; | ||
46 | } | 13 | } |
47 | 14 | ||
48 | static struct request *elevator_noop_next_request(request_queue_t *q) | 15 | static int elevator_noop_dispatch(request_queue_t *q, int force) |
49 | { | 16 | { |
50 | if (!list_empty(&q->queue_head)) | 17 | return 0; |
51 | return list_entry_rq(q->queue_head.next); | ||
52 | |||
53 | return NULL; | ||
54 | } | 18 | } |
55 | 19 | ||
56 | static struct elevator_type elevator_noop = { | 20 | static struct elevator_type elevator_noop = { |
57 | .ops = { | 21 | .ops = { |
58 | .elevator_merge_fn = elevator_noop_merge, | 22 | .elevator_dispatch_fn = elevator_noop_dispatch, |
59 | .elevator_merge_req_fn = elevator_noop_merge_requests, | ||
60 | .elevator_next_req_fn = elevator_noop_next_request, | ||
61 | .elevator_add_req_fn = elevator_noop_add_request, | 23 | .elevator_add_req_fn = elevator_noop_add_request, |
62 | }, | 24 | }, |
63 | .elevator_name = "noop", | 25 | .elevator_name = "noop", |
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c index 1fef136c0e41..ce94aa11f6a7 100644 --- a/drivers/block/paride/paride.c +++ b/drivers/block/paride/paride.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
31 | #include <linux/wait.h> | 31 | #include <linux/wait.h> |
32 | #include <linux/sched.h> /* TASK_* */ | ||
32 | 33 | ||
33 | #ifdef CONFIG_PARPORT_MODULE | 34 | #ifdef CONFIG_PARPORT_MODULE |
34 | #define CONFIG_PARPORT | 35 | #define CONFIG_PARPORT |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 94af920465b5..e9746af29b9f 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -807,10 +807,6 @@ static int pf_next_buf(void) | |||
807 | return 1; | 807 | return 1; |
808 | spin_lock_irqsave(&pf_spin_lock, saved_flags); | 808 | spin_lock_irqsave(&pf_spin_lock, saved_flags); |
809 | pf_end_request(1); | 809 | pf_end_request(1); |
810 | if (pf_req) { | ||
811 | pf_count = pf_req->current_nr_sectors; | ||
812 | pf_buf = pf_req->buffer; | ||
813 | } | ||
814 | spin_unlock_irqrestore(&pf_spin_lock, saved_flags); | 810 | spin_unlock_irqrestore(&pf_spin_lock, saved_flags); |
815 | return 1; | 811 | return 1; |
816 | } | 812 | } |
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index b3982395f22b..6f5df0fad703 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c | |||
@@ -162,6 +162,8 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; | |||
162 | #include <linux/mtio.h> | 162 | #include <linux/mtio.h> |
163 | #include <linux/pg.h> | 163 | #include <linux/pg.h> |
164 | #include <linux/device.h> | 164 | #include <linux/device.h> |
165 | #include <linux/sched.h> /* current, TASK_* */ | ||
166 | #include <linux/jiffies.h> | ||
165 | 167 | ||
166 | #include <asm/uaccess.h> | 168 | #include <asm/uaccess.h> |
167 | 169 | ||
@@ -674,7 +676,7 @@ static int __init pg_init(void) | |||
674 | for (unit = 0; unit < PG_UNITS; unit++) { | 676 | for (unit = 0; unit < PG_UNITS; unit++) { |
675 | struct pg *dev = &devices[unit]; | 677 | struct pg *dev = &devices[unit]; |
676 | if (dev->present) { | 678 | if (dev->present) { |
677 | class_device_create(pg_class, MKDEV(major, unit), | 679 | class_device_create(pg_class, NULL, MKDEV(major, unit), |
678 | NULL, "pg%u", unit); | 680 | NULL, "pg%u", unit); |
679 | err = devfs_mk_cdev(MKDEV(major, unit), | 681 | err = devfs_mk_cdev(MKDEV(major, unit), |
680 | S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u", | 682 | S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u", |
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index d8d35233cf49..715ae5dc88fb 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c | |||
@@ -146,6 +146,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3}; | |||
146 | #include <linux/slab.h> | 146 | #include <linux/slab.h> |
147 | #include <linux/mtio.h> | 147 | #include <linux/mtio.h> |
148 | #include <linux/device.h> | 148 | #include <linux/device.h> |
149 | #include <linux/sched.h> /* current, TASK_*, schedule_timeout() */ | ||
149 | 150 | ||
150 | #include <asm/uaccess.h> | 151 | #include <asm/uaccess.h> |
151 | 152 | ||
@@ -971,7 +972,7 @@ static int __init pt_init(void) | |||
971 | devfs_mk_dir("pt"); | 972 | devfs_mk_dir("pt"); |
972 | for (unit = 0; unit < PT_UNITS; unit++) | 973 | for (unit = 0; unit < PT_UNITS; unit++) |
973 | if (pt[unit].present) { | 974 | if (pt[unit].present) { |
974 | class_device_create(pt_class, MKDEV(major, unit), | 975 | class_device_create(pt_class, NULL, MKDEV(major, unit), |
975 | NULL, "pt%d", unit); | 976 | NULL, "pt%d", unit); |
976 | err = devfs_mk_cdev(MKDEV(major, unit), | 977 | err = devfs_mk_cdev(MKDEV(major, unit), |
977 | S_IFCHR | S_IRUSR | S_IWUSR, | 978 | S_IFCHR | S_IRUSR | S_IWUSR, |
@@ -980,7 +981,7 @@ static int __init pt_init(void) | |||
980 | class_device_destroy(pt_class, MKDEV(major, unit)); | 981 | class_device_destroy(pt_class, MKDEV(major, unit)); |
981 | goto out_class; | 982 | goto out_class; |
982 | } | 983 | } |
983 | class_device_create(pt_class, MKDEV(major, unit + 128), | 984 | class_device_create(pt_class, NULL, MKDEV(major, unit + 128), |
984 | NULL, "pt%dn", unit); | 985 | NULL, "pt%dn", unit); |
985 | err = devfs_mk_cdev(MKDEV(major, unit + 128), | 986 | err = devfs_mk_cdev(MKDEV(major, unit + 128), |
986 | S_IFCHR | S_IRUSR | S_IWUSR, | 987 | S_IFCHR | S_IRUSR | S_IWUSR, |
diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 145c1fbffe01..68c60a5bcdab 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c | |||
@@ -348,7 +348,7 @@ static int rd_open(struct inode *inode, struct file *filp) | |||
348 | struct block_device *bdev = inode->i_bdev; | 348 | struct block_device *bdev = inode->i_bdev; |
349 | struct address_space *mapping; | 349 | struct address_space *mapping; |
350 | unsigned bsize; | 350 | unsigned bsize; |
351 | int gfp_mask; | 351 | gfp_t gfp_mask; |
352 | 352 | ||
353 | inode = igrab(bdev->bd_inode); | 353 | inode = igrab(bdev->bd_inode); |
354 | rd_bdev[unit] = bdev; | 354 | rd_bdev[unit] = bdev; |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index d57007b92f77..1ded3b433459 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware | 2 | * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware |
3 | * | 3 | * |
4 | * Copyright 2004 Red Hat, Inc. | 4 | * Copyright 2004-2005 Red Hat, Inc. |
5 | * | 5 | * |
6 | * Author/maintainer: Jeff Garzik <jgarzik@pobox.com> | 6 | * Author/maintainer: Jeff Garzik <jgarzik@pobox.com> |
7 | * | 7 | * |
@@ -31,10 +31,6 @@ | |||
31 | #include <asm/semaphore.h> | 31 | #include <asm/semaphore.h> |
32 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
33 | 33 | ||
34 | MODULE_AUTHOR("Jeff Garzik"); | ||
35 | MODULE_LICENSE("GPL"); | ||
36 | MODULE_DESCRIPTION("Promise SATA SX8 block driver"); | ||
37 | |||
38 | #if 0 | 34 | #if 0 |
39 | #define CARM_DEBUG | 35 | #define CARM_DEBUG |
40 | #define CARM_VERBOSE_DEBUG | 36 | #define CARM_VERBOSE_DEBUG |
@@ -45,9 +41,35 @@ MODULE_DESCRIPTION("Promise SATA SX8 block driver"); | |||
45 | #undef CARM_NDEBUG | 41 | #undef CARM_NDEBUG |
46 | 42 | ||
47 | #define DRV_NAME "sx8" | 43 | #define DRV_NAME "sx8" |
48 | #define DRV_VERSION "0.8" | 44 | #define DRV_VERSION "1.0" |
49 | #define PFX DRV_NAME ": " | 45 | #define PFX DRV_NAME ": " |
50 | 46 | ||
47 | MODULE_AUTHOR("Jeff Garzik"); | ||
48 | MODULE_LICENSE("GPL"); | ||
49 | MODULE_DESCRIPTION("Promise SATA SX8 block driver"); | ||
50 | MODULE_VERSION(DRV_VERSION); | ||
51 | |||
52 | /* | ||
53 | * SX8 hardware has a single message queue for all ATA ports. | ||
54 | * When this driver was written, the hardware (firmware?) would | ||
55 | * corrupt data eventually, if more than one request was outstanding. | ||
56 | * As one can imagine, having 8 ports bottlenecking on a single | ||
57 | * command hurts performance. | ||
58 | * | ||
59 | * Based on user reports, later versions of the hardware (firmware?) | ||
60 | * seem to be able to survive with more than one command queued. | ||
61 | * | ||
62 | * Therefore, we default to the safe option -- 1 command -- but | ||
63 | * allow the user to increase this. | ||
64 | * | ||
65 | * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ), | ||
66 | * but problems seem to occur when you exceed ~30, even on newer hardware. | ||
67 | */ | ||
68 | static int max_queue = 1; | ||
69 | module_param(max_queue, int, 0444); | ||
70 | MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)"); | ||
71 | |||
72 | |||
51 | #define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN) | 73 | #define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN) |
52 | 74 | ||
53 | /* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */ | 75 | /* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */ |
@@ -90,12 +112,10 @@ enum { | |||
90 | 112 | ||
91 | /* command message queue limits */ | 113 | /* command message queue limits */ |
92 | CARM_MAX_REQ = 64, /* max command msgs per host */ | 114 | CARM_MAX_REQ = 64, /* max command msgs per host */ |
93 | CARM_MAX_Q = 1, /* one command at a time */ | ||
94 | CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */ | 115 | CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */ |
95 | 116 | ||
96 | /* S/G limits, host-wide and per-request */ | 117 | /* S/G limits, host-wide and per-request */ |
97 | CARM_MAX_REQ_SG = 32, /* max s/g entries per request */ | 118 | CARM_MAX_REQ_SG = 32, /* max s/g entries per request */ |
98 | CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */ | ||
99 | CARM_MAX_HOST_SG = 600, /* max s/g entries per host */ | 119 | CARM_MAX_HOST_SG = 600, /* max s/g entries per host */ |
100 | CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */ | 120 | CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */ |
101 | 121 | ||
@@ -181,6 +201,10 @@ enum { | |||
181 | FL_DYN_MAJOR = (1 << 17), | 201 | FL_DYN_MAJOR = (1 << 17), |
182 | }; | 202 | }; |
183 | 203 | ||
204 | enum { | ||
205 | CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */ | ||
206 | }; | ||
207 | |||
184 | enum scatter_gather_types { | 208 | enum scatter_gather_types { |
185 | SGT_32BIT = 0, | 209 | SGT_32BIT = 0, |
186 | SGT_64BIT = 1, | 210 | SGT_64BIT = 1, |
@@ -218,7 +242,6 @@ static const char *state_name[] = { | |||
218 | 242 | ||
219 | struct carm_port { | 243 | struct carm_port { |
220 | unsigned int port_no; | 244 | unsigned int port_no; |
221 | unsigned int n_queued; | ||
222 | struct gendisk *disk; | 245 | struct gendisk *disk; |
223 | struct carm_host *host; | 246 | struct carm_host *host; |
224 | 247 | ||
@@ -448,7 +471,7 @@ static inline int carm_lookup_bucket(u32 msg_size) | |||
448 | for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) | 471 | for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) |
449 | if (msg_size <= msg_sizes[i]) | 472 | if (msg_size <= msg_sizes[i]) |
450 | return i; | 473 | return i; |
451 | 474 | ||
452 | return -ENOENT; | 475 | return -ENOENT; |
453 | } | 476 | } |
454 | 477 | ||
@@ -509,7 +532,7 @@ static struct carm_request *carm_get_request(struct carm_host *host) | |||
509 | if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG)) | 532 | if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG)) |
510 | return NULL; | 533 | return NULL; |
511 | 534 | ||
512 | for (i = 0; i < CARM_MAX_Q; i++) | 535 | for (i = 0; i < max_queue; i++) |
513 | if ((host->msg_alloc & (1ULL << i)) == 0) { | 536 | if ((host->msg_alloc & (1ULL << i)) == 0) { |
514 | struct carm_request *crq = &host->req[i]; | 537 | struct carm_request *crq = &host->req[i]; |
515 | crq->port = NULL; | 538 | crq->port = NULL; |
@@ -521,14 +544,14 @@ static struct carm_request *carm_get_request(struct carm_host *host) | |||
521 | assert(host->n_msgs <= CARM_MAX_REQ); | 544 | assert(host->n_msgs <= CARM_MAX_REQ); |
522 | return crq; | 545 | return crq; |
523 | } | 546 | } |
524 | 547 | ||
525 | DPRINTK("no request available, returning NULL\n"); | 548 | DPRINTK("no request available, returning NULL\n"); |
526 | return NULL; | 549 | return NULL; |
527 | } | 550 | } |
528 | 551 | ||
529 | static int carm_put_request(struct carm_host *host, struct carm_request *crq) | 552 | static int carm_put_request(struct carm_host *host, struct carm_request *crq) |
530 | { | 553 | { |
531 | assert(crq->tag < CARM_MAX_Q); | 554 | assert(crq->tag < max_queue); |
532 | 555 | ||
533 | if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0)) | 556 | if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0)) |
534 | return -EINVAL; /* tried to clear a tag that was not active */ | 557 | return -EINVAL; /* tried to clear a tag that was not active */ |
@@ -791,7 +814,7 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, | |||
791 | int is_ok) | 814 | int is_ok) |
792 | { | 815 | { |
793 | carm_end_request_queued(host, crq, is_ok); | 816 | carm_end_request_queued(host, crq, is_ok); |
794 | if (CARM_MAX_Q == 1) | 817 | if (max_queue == 1) |
795 | carm_round_robin(host); | 818 | carm_round_robin(host); |
796 | else if ((host->n_msgs <= CARM_MSG_LOW_WATER) && | 819 | else if ((host->n_msgs <= CARM_MSG_LOW_WATER) && |
797 | (host->hw_sg_used <= CARM_SG_LOW_WATER)) { | 820 | (host->hw_sg_used <= CARM_SG_LOW_WATER)) { |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index ed4d5006fe62..bfb23d543ff7 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -1512,7 +1512,7 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1512 | scmd->nsg = 1; | 1512 | scmd->nsg = 1; |
1513 | sg = &scmd->sgv[0]; | 1513 | sg = &scmd->sgv[0]; |
1514 | sg->page = virt_to_page(sc->top_sense); | 1514 | sg->page = virt_to_page(sc->top_sense); |
1515 | sg->offset = (unsigned int)sc->top_sense & (PAGE_SIZE-1); | 1515 | sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1); |
1516 | sg->length = UB_SENSE_SIZE; | 1516 | sg->length = UB_SENSE_SIZE; |
1517 | scmd->len = UB_SENSE_SIZE; | 1517 | scmd->len = UB_SENSE_SIZE; |
1518 | scmd->lun = cmd->lun; | 1518 | scmd->lun = cmd->lun; |
@@ -1891,7 +1891,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | |||
1891 | cmd->nsg = 1; | 1891 | cmd->nsg = 1; |
1892 | sg = &cmd->sgv[0]; | 1892 | sg = &cmd->sgv[0]; |
1893 | sg->page = virt_to_page(p); | 1893 | sg->page = virt_to_page(p); |
1894 | sg->offset = (unsigned int)p & (PAGE_SIZE-1); | 1894 | sg->offset = (unsigned long)p & (PAGE_SIZE-1); |
1895 | sg->length = 8; | 1895 | sg->length = 8; |
1896 | cmd->len = 8; | 1896 | cmd->len = 8; |
1897 | cmd->lun = lun; | 1897 | cmd->lun = lun; |