aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig.iosched2
-rw-r--r--block/as-iosched.c64
-rw-r--r--block/cfq-iosched.c199
-rw-r--r--block/deadline-iosched.c52
-rw-r--r--block/elevator.c3
-rw-r--r--block/genhd.c7
-rw-r--r--block/ll_rw_blk.c15
7 files changed, 153 insertions, 189 deletions
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index f3b7753aac99..48d090e266fc 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -40,7 +40,7 @@ config IOSCHED_CFQ
40 40
41choice 41choice
42 prompt "Default I/O scheduler" 42 prompt "Default I/O scheduler"
43 default DEFAULT_AS 43 default DEFAULT_CFQ
44 help 44 help
45 Select the I/O scheduler which will be used by default for all 45 Select the I/O scheduler which will be used by default for all
46 block devices. 46 block devices.
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 0c750393be4a..1ec5df466708 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -96,7 +96,7 @@ struct as_data {
96 96
97 struct as_rq *next_arq[2]; /* next in sort order */ 97 struct as_rq *next_arq[2]; /* next in sort order */
98 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 98 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
99 struct list_head *hash; /* request hash */ 99 struct hlist_head *hash; /* request hash */
100 100
101 unsigned long exit_prob; /* probability a task will exit while 101 unsigned long exit_prob; /* probability a task will exit while
102 being waited on */ 102 being waited on */
@@ -165,8 +165,7 @@ struct as_rq {
165 /* 165 /*
166 * request hash, key is the ending offset (for back merge lookup) 166 * request hash, key is the ending offset (for back merge lookup)
167 */ 167 */
168 struct list_head hash; 168 struct hlist_node hash;
169 unsigned int on_hash;
170 169
171 /* 170 /*
172 * expire fifo 171 * expire fifo
@@ -282,17 +281,15 @@ static const int as_hash_shift = 6;
282#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) 281#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
283#define AS_HASH_ENTRIES (1 << as_hash_shift) 282#define AS_HASH_ENTRIES (1 << as_hash_shift)
284#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 283#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
285#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
286 284
287static inline void __as_del_arq_hash(struct as_rq *arq) 285static inline void __as_del_arq_hash(struct as_rq *arq)
288{ 286{
289 arq->on_hash = 0; 287 hlist_del_init(&arq->hash);
290 list_del_init(&arq->hash);
291} 288}
292 289
293static inline void as_del_arq_hash(struct as_rq *arq) 290static inline void as_del_arq_hash(struct as_rq *arq)
294{ 291{
295 if (arq->on_hash) 292 if (!hlist_unhashed(&arq->hash))
296 __as_del_arq_hash(arq); 293 __as_del_arq_hash(arq);
297} 294}
298 295
@@ -300,10 +297,9 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
300{ 297{
301 struct request *rq = arq->request; 298 struct request *rq = arq->request;
302 299
303 BUG_ON(arq->on_hash); 300 BUG_ON(!hlist_unhashed(&arq->hash));
304 301
305 arq->on_hash = 1; 302 hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
306 list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
307} 303}
308 304
309/* 305/*
@@ -312,31 +308,29 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
312static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) 308static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
313{ 309{
314 struct request *rq = arq->request; 310 struct request *rq = arq->request;
315 struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; 311 struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
316 312
317 if (!arq->on_hash) { 313 if (hlist_unhashed(&arq->hash)) {
318 WARN_ON(1); 314 WARN_ON(1);
319 return; 315 return;
320 } 316 }
321 317
322 if (arq->hash.prev != head) { 318 if (&arq->hash != head->first) {
323 list_del(&arq->hash); 319 hlist_del(&arq->hash);
324 list_add(&arq->hash, head); 320 hlist_add_head(&arq->hash, head);
325 } 321 }
326} 322}
327 323
328static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) 324static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
329{ 325{
330 struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; 326 struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
331 struct list_head *entry, *next = hash_list->next; 327 struct hlist_node *entry, *next;
328 struct as_rq *arq;
332 329
333 while ((entry = next) != hash_list) { 330 hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
334 struct as_rq *arq = list_entry_hash(entry);
335 struct request *__rq = arq->request; 331 struct request *__rq = arq->request;
336 332
337 next = entry->next; 333 BUG_ON(hlist_unhashed(&arq->hash));
338
339 BUG_ON(!arq->on_hash);
340 334
341 if (!rq_mergeable(__rq)) { 335 if (!rq_mergeable(__rq)) {
342 as_del_arq_hash(arq); 336 as_del_arq_hash(arq);
@@ -353,9 +347,6 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
353/* 347/*
354 * rb tree support functions 348 * rb tree support functions
355 */ 349 */
356#define RB_EMPTY(root) ((root)->rb_node == NULL)
357#define ON_RB(node) (rb_parent(node) != node)
358#define RB_CLEAR(node) (rb_set_parent(node, node))
359#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) 350#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
360#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) 351#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
361#define rq_rb_key(rq) (rq)->sector 352#define rq_rb_key(rq) (rq)->sector
@@ -424,13 +415,13 @@ static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
424 415
425static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) 416static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
426{ 417{
427 if (!ON_RB(&arq->rb_node)) { 418 if (!RB_EMPTY_NODE(&arq->rb_node)) {
428 WARN_ON(1); 419 WARN_ON(1);
429 return; 420 return;
430 } 421 }
431 422
432 rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); 423 rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
433 RB_CLEAR(&arq->rb_node); 424 RB_CLEAR_NODE(&arq->rb_node);
434} 425}
435 426
436static struct request * 427static struct request *
@@ -551,7 +542,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
551 struct rb_node *rbprev = rb_prev(&last->rb_node); 542 struct rb_node *rbprev = rb_prev(&last->rb_node);
552 struct as_rq *arq_next, *arq_prev; 543 struct as_rq *arq_next, *arq_prev;
553 544
554 BUG_ON(!ON_RB(&last->rb_node)); 545 BUG_ON(!RB_EMPTY_NODE(&last->rb_node));
555 546
556 if (rbprev) 547 if (rbprev)
557 arq_prev = rb_entry_arq(rbprev); 548 arq_prev = rb_entry_arq(rbprev);
@@ -1128,7 +1119,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1128 struct request *rq = arq->request; 1119 struct request *rq = arq->request;
1129 const int data_dir = arq->is_sync; 1120 const int data_dir = arq->is_sync;
1130 1121
1131 BUG_ON(!ON_RB(&arq->rb_node)); 1122 BUG_ON(!RB_EMPTY_NODE(&arq->rb_node));
1132 1123
1133 as_antic_stop(ad); 1124 as_antic_stop(ad);
1134 ad->antic_status = ANTIC_OFF; 1125 ad->antic_status = ANTIC_OFF;
@@ -1253,7 +1244,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
1253 */ 1244 */
1254 1245
1255 if (reads) { 1246 if (reads) {
1256 BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC])); 1247 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC]));
1257 1248
1258 if (writes && ad->batch_data_dir == REQ_SYNC) 1249 if (writes && ad->batch_data_dir == REQ_SYNC)
1259 /* 1250 /*
@@ -1277,7 +1268,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
1277 1268
1278 if (writes) { 1269 if (writes) {
1279dispatch_writes: 1270dispatch_writes:
1280 BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC])); 1271 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC]));
1281 1272
1282 if (ad->batch_data_dir == REQ_SYNC) { 1273 if (ad->batch_data_dir == REQ_SYNC) {
1283 ad->changed_batch = 1; 1274 ad->changed_batch = 1;
@@ -1345,7 +1336,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1345 arq->state = AS_RQ_NEW; 1336 arq->state = AS_RQ_NEW;
1346 1337
1347 if (rq_data_dir(arq->request) == READ 1338 if (rq_data_dir(arq->request) == READ
1348 || current->flags&PF_SYNCWRITE) 1339 || (arq->request->flags & REQ_RW_SYNC))
1349 arq->is_sync = 1; 1340 arq->is_sync = 1;
1350 else 1341 else
1351 arq->is_sync = 0; 1342 arq->is_sync = 0;
@@ -1597,12 +1588,11 @@ static int as_set_request(request_queue_t *q, struct request *rq,
1597 1588
1598 if (arq) { 1589 if (arq) {
1599 memset(arq, 0, sizeof(*arq)); 1590 memset(arq, 0, sizeof(*arq));
1600 RB_CLEAR(&arq->rb_node); 1591 RB_CLEAR_NODE(&arq->rb_node);
1601 arq->request = rq; 1592 arq->request = rq;
1602 arq->state = AS_RQ_PRESCHED; 1593 arq->state = AS_RQ_PRESCHED;
1603 arq->io_context = NULL; 1594 arq->io_context = NULL;
1604 INIT_LIST_HEAD(&arq->hash); 1595 INIT_HLIST_NODE(&arq->hash);
1605 arq->on_hash = 0;
1606 INIT_LIST_HEAD(&arq->fifo); 1596 INIT_LIST_HEAD(&arq->fifo);
1607 rq->elevator_private = arq; 1597 rq->elevator_private = arq;
1608 return 0; 1598 return 0;
@@ -1662,7 +1652,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1662 1652
1663 ad->q = q; /* Identify what queue the data belongs to */ 1653 ad->q = q; /* Identify what queue the data belongs to */
1664 1654
1665 ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, 1655 ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
1666 GFP_KERNEL, q->node); 1656 GFP_KERNEL, q->node);
1667 if (!ad->hash) { 1657 if (!ad->hash) {
1668 kfree(ad); 1658 kfree(ad);
@@ -1684,7 +1674,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1684 INIT_WORK(&ad->antic_work, as_work_handler, q); 1674 INIT_WORK(&ad->antic_work, as_work_handler, q);
1685 1675
1686 for (i = 0; i < AS_HASH_ENTRIES; i++) 1676 for (i = 0; i < AS_HASH_ENTRIES; i++)
1687 INIT_LIST_HEAD(&ad->hash[i]); 1677 INIT_HLIST_HEAD(&ad->hash[i]);
1688 1678
1689 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1679 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1690 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1680 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e2e6ad0a158e..e25223e147a2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -26,7 +26,7 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
26static const int cfq_slice_sync = HZ / 10; 26static const int cfq_slice_sync = HZ / 10;
27static int cfq_slice_async = HZ / 25; 27static int cfq_slice_async = HZ / 25;
28static const int cfq_slice_async_rq = 2; 28static const int cfq_slice_async_rq = 2;
29static int cfq_slice_idle = HZ / 70; 29static int cfq_slice_idle = HZ / 125;
30 30
31#define CFQ_IDLE_GRACE (HZ / 10) 31#define CFQ_IDLE_GRACE (HZ / 10)
32#define CFQ_SLICE_SCALE (5) 32#define CFQ_SLICE_SCALE (5)
@@ -60,11 +60,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock);
60/* 60/*
61 * rb-tree defines 61 * rb-tree defines
62 */ 62 */
63#define RB_EMPTY(node) ((node)->rb_node == NULL)
64#define RB_CLEAR(node) do { \
65 memset(node, 0, sizeof(*node)); \
66} while (0)
67#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
68#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) 63#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
69#define rq_rb_key(rq) (rq)->sector 64#define rq_rb_key(rq) (rq)->sector
70 65
@@ -123,8 +118,6 @@ struct cfq_data {
123 */ 118 */
124 struct hlist_head *crq_hash; 119 struct hlist_head *crq_hash;
125 120
126 unsigned int max_queued;
127
128 mempool_t *crq_pool; 121 mempool_t *crq_pool;
129 122
130 int rq_in_driver; 123 int rq_in_driver;
@@ -279,8 +272,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi
279static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); 272static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
280static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); 273static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
281 274
282#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
283
284/* 275/*
285 * lots of deadline iosched dupes, can be abstracted later... 276 * lots of deadline iosched dupes, can be abstracted later...
286 */ 277 */
@@ -336,7 +327,7 @@ static int cfq_queue_empty(request_queue_t *q)
336 327
337static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) 328static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
338{ 329{
339 if (rw == READ || process_sync(task)) 330 if (rw == READ || rw == WRITE_SYNC)
340 return task->pid; 331 return task->pid;
341 332
342 return CFQ_KEY_ASYNC; 333 return CFQ_KEY_ASYNC;
@@ -563,7 +554,7 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
563 554
564 rb_erase(&crq->rb_node, &cfqq->sort_list); 555 rb_erase(&crq->rb_node, &cfqq->sort_list);
565 556
566 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) 557 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
567 cfq_del_cfqq_rr(cfqd, cfqq); 558 cfq_del_cfqq_rr(cfqd, cfqq);
568} 559}
569 560
@@ -910,13 +901,15 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
910 return cfqq; 901 return cfqq;
911} 902}
912 903
904#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
905
913static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 906static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
914 907
915{ 908{
916 struct cfq_io_context *cic; 909 struct cfq_io_context *cic;
917 unsigned long sl; 910 unsigned long sl;
918 911
919 WARN_ON(!RB_EMPTY(&cfqq->sort_list)); 912 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
920 WARN_ON(cfqq != cfqd->active_queue); 913 WARN_ON(cfqq != cfqd->active_queue);
921 914
922 /* 915 /*
@@ -943,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
943 * fair distribution of slice time for a process doing back-to-back 936 * fair distribution of slice time for a process doing back-to-back
944 * seeks. so allow a little bit of time for him to submit a new rq 937 * seeks. so allow a little bit of time for him to submit a new rq
945 */ 938 */
946 if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072) 939 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
947 sl = 2; 940 sl = 2;
948 941
949 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 942 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
@@ -954,11 +947,15 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
954{ 947{
955 struct cfq_data *cfqd = q->elevator->elevator_data; 948 struct cfq_data *cfqd = q->elevator->elevator_data;
956 struct cfq_queue *cfqq = crq->cfq_queue; 949 struct cfq_queue *cfqq = crq->cfq_queue;
950 struct request *rq;
957 951
958 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); 952 cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
959 cfq_remove_request(crq->request); 953 cfq_remove_request(crq->request);
960 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; 954 cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
961 elv_dispatch_sort(q, crq->request); 955 elv_dispatch_sort(q, crq->request);
956
957 rq = list_entry(q->queue_head.prev, struct request, queuelist);
958 cfqd->last_sector = rq->sector + rq->nr_sectors;
962} 959}
963 960
964/* 961/*
@@ -1040,10 +1037,12 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1040 * if queue has requests, dispatch one. if not, check if 1037 * if queue has requests, dispatch one. if not, check if
1041 * enough slice is left to wait for one 1038 * enough slice is left to wait for one
1042 */ 1039 */
1043 if (!RB_EMPTY(&cfqq->sort_list)) 1040 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1044 goto keep_queue; 1041 goto keep_queue;
1045 else if (cfq_cfqq_class_sync(cfqq) && 1042 else if (cfq_cfqq_dispatched(cfqq)) {
1046 time_before(now, cfqq->slice_end)) { 1043 cfqq = NULL;
1044 goto keep_queue;
1045 } else if (cfq_cfqq_class_sync(cfqq)) {
1047 if (cfq_arm_slice_timer(cfqd, cfqq)) 1046 if (cfq_arm_slice_timer(cfqd, cfqq))
1048 return NULL; 1047 return NULL;
1049 } 1048 }
@@ -1062,7 +1061,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1062{ 1061{
1063 int dispatched = 0; 1062 int dispatched = 0;
1064 1063
1065 BUG_ON(RB_EMPTY(&cfqq->sort_list)); 1064 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1066 1065
1067 do { 1066 do {
1068 struct cfq_rq *crq; 1067 struct cfq_rq *crq;
@@ -1086,14 +1085,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1086 cfqd->active_cic = crq->io_context; 1085 cfqd->active_cic = crq->io_context;
1087 } 1086 }
1088 1087
1089 if (RB_EMPTY(&cfqq->sort_list)) 1088 if (RB_EMPTY_ROOT(&cfqq->sort_list))
1090 break; 1089 break;
1091 1090
1092 } while (dispatched < max_dispatch); 1091 } while (dispatched < max_dispatch);
1093 1092
1094 /* 1093 /*
1095 * if slice end isn't set yet, set it. if at least one request was 1094 * if slice end isn't set yet, set it.
1096 * sync, use the sync time slice value
1097 */ 1095 */
1098 if (!cfqq->slice_end) 1096 if (!cfqq->slice_end)
1099 cfq_set_prio_slice(cfqd, cfqq); 1097 cfq_set_prio_slice(cfqd, cfqq);
@@ -1104,7 +1102,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1104 */ 1102 */
1105 if ((!cfq_cfqq_sync(cfqq) && 1103 if ((!cfq_cfqq_sync(cfqq) &&
1106 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || 1104 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1107 cfq_class_idle(cfqq)) 1105 cfq_class_idle(cfqq) ||
1106 !cfq_cfqq_idle_window(cfqq))
1108 cfq_slice_expired(cfqd, 0); 1107 cfq_slice_expired(cfqd, 0);
1109 1108
1110 return dispatched; 1109 return dispatched;
@@ -1113,10 +1112,11 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1113static int 1112static int
1114cfq_forced_dispatch_cfqqs(struct list_head *list) 1113cfq_forced_dispatch_cfqqs(struct list_head *list)
1115{ 1114{
1116 int dispatched = 0;
1117 struct cfq_queue *cfqq, *next; 1115 struct cfq_queue *cfqq, *next;
1118 struct cfq_rq *crq; 1116 struct cfq_rq *crq;
1117 int dispatched;
1119 1118
1119 dispatched = 0;
1120 list_for_each_entry_safe(cfqq, next, list, cfq_list) { 1120 list_for_each_entry_safe(cfqq, next, list, cfq_list) {
1121 while ((crq = cfqq->next_crq)) { 1121 while ((crq = cfqq->next_crq)) {
1122 cfq_dispatch_insert(cfqq->cfqd->queue, crq); 1122 cfq_dispatch_insert(cfqq->cfqd->queue, crq);
@@ -1124,6 +1124,7 @@ cfq_forced_dispatch_cfqqs(struct list_head *list)
1124 } 1124 }
1125 BUG_ON(!list_empty(&cfqq->fifo)); 1125 BUG_ON(!list_empty(&cfqq->fifo));
1126 } 1126 }
1127
1127 return dispatched; 1128 return dispatched;
1128} 1129}
1129 1130
@@ -1150,7 +1151,8 @@ static int
1150cfq_dispatch_requests(request_queue_t *q, int force) 1151cfq_dispatch_requests(request_queue_t *q, int force)
1151{ 1152{
1152 struct cfq_data *cfqd = q->elevator->elevator_data; 1153 struct cfq_data *cfqd = q->elevator->elevator_data;
1153 struct cfq_queue *cfqq; 1154 struct cfq_queue *cfqq, *prev_cfqq;
1155 int dispatched;
1154 1156
1155 if (!cfqd->busy_queues) 1157 if (!cfqd->busy_queues)
1156 return 0; 1158 return 0;
@@ -1158,10 +1160,17 @@ cfq_dispatch_requests(request_queue_t *q, int force)
1158 if (unlikely(force)) 1160 if (unlikely(force))
1159 return cfq_forced_dispatch(cfqd); 1161 return cfq_forced_dispatch(cfqd);
1160 1162
1161 cfqq = cfq_select_queue(cfqd); 1163 dispatched = 0;
1162 if (cfqq) { 1164 prev_cfqq = NULL;
1165 while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
1163 int max_dispatch; 1166 int max_dispatch;
1164 1167
1168 /*
1169 * Don't repeat dispatch from the previous queue.
1170 */
1171 if (prev_cfqq == cfqq)
1172 break;
1173
1165 cfq_clear_cfqq_must_dispatch(cfqq); 1174 cfq_clear_cfqq_must_dispatch(cfqq);
1166 cfq_clear_cfqq_wait_request(cfqq); 1175 cfq_clear_cfqq_wait_request(cfqq);
1167 del_timer(&cfqd->idle_slice_timer); 1176 del_timer(&cfqd->idle_slice_timer);
@@ -1170,10 +1179,19 @@ cfq_dispatch_requests(request_queue_t *q, int force)
1170 if (cfq_class_idle(cfqq)) 1179 if (cfq_class_idle(cfqq))
1171 max_dispatch = 1; 1180 max_dispatch = 1;
1172 1181
1173 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1182 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1183
1184 /*
1185 * If the dispatch cfqq has idling enabled and is still
1186 * the active queue, break out.
1187 */
1188 if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
1189 break;
1190
1191 prev_cfqq = cfqq;
1174 } 1192 }
1175 1193
1176 return 0; 1194 return dispatched;
1177} 1195}
1178 1196
1179/* 1197/*
@@ -1379,25 +1397,28 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
1379{ 1397{
1380 struct cfq_data *cfqd = cic->key; 1398 struct cfq_data *cfqd = cic->key;
1381 struct cfq_queue *cfqq; 1399 struct cfq_queue *cfqq;
1382 if (cfqd) { 1400
1383 spin_lock(cfqd->queue->queue_lock); 1401 if (unlikely(!cfqd))
1384 cfqq = cic->cfqq[ASYNC]; 1402 return;
1385 if (cfqq) { 1403
1386 struct cfq_queue *new_cfqq; 1404 spin_lock(cfqd->queue->queue_lock);
1387 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, 1405
1388 cic->ioc->task, GFP_ATOMIC); 1406 cfqq = cic->cfqq[ASYNC];
1389 if (new_cfqq) { 1407 if (cfqq) {
1390 cic->cfqq[ASYNC] = new_cfqq; 1408 struct cfq_queue *new_cfqq;
1391 cfq_put_queue(cfqq); 1409 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
1392 } 1410 GFP_ATOMIC);
1393 } 1411 if (new_cfqq) {
1394 cfqq = cic->cfqq[SYNC]; 1412 cic->cfqq[ASYNC] = new_cfqq;
1395 if (cfqq) { 1413 cfq_put_queue(cfqq);
1396 cfq_mark_cfqq_prio_changed(cfqq);
1397 cfq_init_prio_data(cfqq);
1398 } 1414 }
1399 spin_unlock(cfqd->queue->queue_lock);
1400 } 1415 }
1416
1417 cfqq = cic->cfqq[SYNC];
1418 if (cfqq)
1419 cfq_mark_cfqq_prio_changed(cfqq);
1420
1421 spin_unlock(cfqd->queue->queue_lock);
1401} 1422}
1402 1423
1403/* 1424/*
@@ -1454,7 +1475,6 @@ retry:
1454 1475
1455 INIT_HLIST_NODE(&cfqq->cfq_hash); 1476 INIT_HLIST_NODE(&cfqq->cfq_hash);
1456 INIT_LIST_HEAD(&cfqq->cfq_list); 1477 INIT_LIST_HEAD(&cfqq->cfq_list);
1457 RB_CLEAR_ROOT(&cfqq->sort_list);
1458 INIT_LIST_HEAD(&cfqq->fifo); 1478 INIT_LIST_HEAD(&cfqq->fifo);
1459 1479
1460 cfqq->key = key; 1480 cfqq->key = key;
@@ -1466,8 +1486,7 @@ retry:
1466 * set ->slice_left to allow preemption for a new process 1486 * set ->slice_left to allow preemption for a new process
1467 */ 1487 */
1468 cfqq->slice_left = 2 * cfqd->cfq_slice_idle; 1488 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1469 if (!cfqd->hw_tag) 1489 cfq_mark_cfqq_idle_window(cfqq);
1470 cfq_mark_cfqq_idle_window(cfqq);
1471 cfq_mark_cfqq_prio_changed(cfqq); 1490 cfq_mark_cfqq_prio_changed(cfqq);
1472 cfq_init_prio_data(cfqq); 1491 cfq_init_prio_data(cfqq);
1473 } 1492 }
@@ -1658,7 +1677,8 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1658{ 1677{
1659 int enable_idle = cfq_cfqq_idle_window(cfqq); 1678 int enable_idle = cfq_cfqq_idle_window(cfqq);
1660 1679
1661 if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag) 1680 if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
1681 (cfqd->hw_tag && CIC_SEEKY(cic)))
1662 enable_idle = 0; 1682 enable_idle = 0;
1663 else if (sample_valid(cic->ttime_samples)) { 1683 else if (sample_valid(cic->ttime_samples)) {
1664 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1684 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1688,7 +1708,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1688 return 0; 1708 return 0;
1689 1709
1690 if (!cfqq) 1710 if (!cfqq)
1691 return 1; 1711 return 0;
1692 1712
1693 if (cfq_class_idle(cfqq)) 1713 if (cfq_class_idle(cfqq))
1694 return 1; 1714 return 1;
@@ -1720,7 +1740,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1720 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; 1740 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
1721 1741
1722 cfqq->slice_end = cfqq->slice_left + jiffies; 1742 cfqq->slice_end = cfqq->slice_left + jiffies;
1723 __cfq_slice_expired(cfqd, cfqq, 1); 1743 cfq_slice_expired(cfqd, 1);
1724 __cfq_set_active_queue(cfqd, cfqq); 1744 __cfq_set_active_queue(cfqd, cfqq);
1725} 1745}
1726 1746
@@ -1745,11 +1765,7 @@ static void
1745cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1765cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1746 struct cfq_rq *crq) 1766 struct cfq_rq *crq)
1747{ 1767{
1748 struct cfq_io_context *cic; 1768 struct cfq_io_context *cic = crq->io_context;
1749
1750 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1751
1752 cic = crq->io_context;
1753 1769
1754 /* 1770 /*
1755 * we never wait for an async request and we don't allow preemption 1771 * we never wait for an async request and we don't allow preemption
@@ -1839,11 +1855,23 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
1839 cfqq->service_last = now; 1855 cfqq->service_last = now;
1840 cfq_resort_rr_list(cfqq, 0); 1856 cfq_resort_rr_list(cfqq, 0);
1841 } 1857 }
1842 cfq_schedule_dispatch(cfqd);
1843 } 1858 }
1844 1859
1845 if (cfq_crq_is_sync(crq)) 1860 if (sync)
1846 crq->io_context->last_end_request = now; 1861 crq->io_context->last_end_request = now;
1862
1863 /*
1864 * If this is the active queue, check if it needs to be expired,
1865 * or if we want to idle in case it has no pending requests.
1866 */
1867 if (cfqd->active_queue == cfqq) {
1868 if (time_after(now, cfqq->slice_end))
1869 cfq_slice_expired(cfqd, 0);
1870 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1871 if (!cfq_arm_slice_timer(cfqd, cfqq))
1872 cfq_schedule_dispatch(cfqd);
1873 }
1874 }
1847} 1875}
1848 1876
1849static struct request * 1877static struct request *
@@ -1910,7 +1938,6 @@ static inline int
1910__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1938__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1911 struct task_struct *task, int rw) 1939 struct task_struct *task, int rw)
1912{ 1940{
1913#if 1
1914 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1941 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
1915 !cfq_cfqq_must_alloc_slice(cfqq)) { 1942 !cfq_cfqq_must_alloc_slice(cfqq)) {
1916 cfq_mark_cfqq_must_alloc_slice(cfqq); 1943 cfq_mark_cfqq_must_alloc_slice(cfqq);
@@ -1918,39 +1945,6 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1918 } 1945 }
1919 1946
1920 return ELV_MQUEUE_MAY; 1947 return ELV_MQUEUE_MAY;
1921#else
1922 if (!cfqq || task->flags & PF_MEMALLOC)
1923 return ELV_MQUEUE_MAY;
1924 if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
1925 if (cfq_cfqq_wait_request(cfqq))
1926 return ELV_MQUEUE_MUST;
1927
1928 /*
1929 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
1930 * can quickly flood the queue with writes from a single task
1931 */
1932 if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
1933 cfq_mark_cfqq_must_alloc_slice(cfqq);
1934 return ELV_MQUEUE_MUST;
1935 }
1936
1937 return ELV_MQUEUE_MAY;
1938 }
1939 if (cfq_class_idle(cfqq))
1940 return ELV_MQUEUE_NO;
1941 if (cfqq->allocated[rw] >= cfqd->max_queued) {
1942 struct io_context *ioc = get_io_context(GFP_ATOMIC);
1943 int ret = ELV_MQUEUE_NO;
1944
1945 if (ioc && ioc->nr_batch_requests)
1946 ret = ELV_MQUEUE_MAY;
1947
1948 put_io_context(ioc);
1949 return ret;
1950 }
1951
1952 return ELV_MQUEUE_MAY;
1953#endif
1954} 1948}
1955 1949
1956static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) 1950static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
@@ -1979,16 +1973,13 @@ static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
1979static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) 1973static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1980{ 1974{
1981 struct cfq_data *cfqd = q->elevator->elevator_data; 1975 struct cfq_data *cfqd = q->elevator->elevator_data;
1982 struct request_list *rl = &q->rq;
1983 1976
1984 if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { 1977 if (unlikely(cfqd->rq_starved)) {
1978 struct request_list *rl = &q->rq;
1979
1985 smp_mb(); 1980 smp_mb();
1986 if (waitqueue_active(&rl->wait[READ])) 1981 if (waitqueue_active(&rl->wait[READ]))
1987 wake_up(&rl->wait[READ]); 1982 wake_up(&rl->wait[READ]);
1988 }
1989
1990 if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
1991 smp_mb();
1992 if (waitqueue_active(&rl->wait[WRITE])) 1983 if (waitqueue_active(&rl->wait[WRITE]))
1993 wake_up(&rl->wait[WRITE]); 1984 wake_up(&rl->wait[WRITE]);
1994 } 1985 }
@@ -2062,7 +2053,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
2062 2053
2063 crq = mempool_alloc(cfqd->crq_pool, gfp_mask); 2054 crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
2064 if (crq) { 2055 if (crq) {
2065 RB_CLEAR(&crq->rb_node); 2056 RB_CLEAR_NODE(&crq->rb_node);
2066 crq->rb_key = 0; 2057 crq->rb_key = 0;
2067 crq->request = rq; 2058 crq->request = rq;
2068 INIT_HLIST_NODE(&crq->hash); 2059 INIT_HLIST_NODE(&crq->hash);
@@ -2148,16 +2139,13 @@ static void cfq_idle_slice_timer(unsigned long data)
2148 * only expire and reinvoke request handler, if there are 2139 * only expire and reinvoke request handler, if there are
2149 * other queues with pending requests 2140 * other queues with pending requests
2150 */ 2141 */
2151 if (!cfqd->busy_queues) { 2142 if (!cfqd->busy_queues)
2152 cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
2153 add_timer(&cfqd->idle_slice_timer);
2154 goto out_cont; 2143 goto out_cont;
2155 }
2156 2144
2157 /* 2145 /*
2158 * not expired and it has a request pending, let it dispatch 2146 * not expired and it has a request pending, let it dispatch
2159 */ 2147 */
2160 if (!RB_EMPTY(&cfqq->sort_list)) { 2148 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
2161 cfq_mark_cfqq_must_dispatch(cfqq); 2149 cfq_mark_cfqq_must_dispatch(cfqq);
2162 goto out_kick; 2150 goto out_kick;
2163 } 2151 }
@@ -2278,9 +2266,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2278 2266
2279 cfqd->queue = q; 2267 cfqd->queue = q;
2280 2268
2281 cfqd->max_queued = q->nr_requests / 4;
2282 q->nr_batching = cfq_queued;
2283
2284 init_timer(&cfqd->idle_slice_timer); 2269 init_timer(&cfqd->idle_slice_timer);
2285 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2270 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2286 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2271 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c94de8e12fbf..4469dd84623c 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -30,8 +30,7 @@ static const int deadline_hash_shift = 5;
30#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) 30#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
31#define DL_HASH_ENTRIES (1 << deadline_hash_shift) 31#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
32#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 32#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
33#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash) 33#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
34#define ON_HASH(drq) (drq)->on_hash
35 34
36struct deadline_data { 35struct deadline_data {
37 /* 36 /*
@@ -48,7 +47,7 @@ struct deadline_data {
48 * next in sort order. read, write or both are NULL 47 * next in sort order. read, write or both are NULL
49 */ 48 */
50 struct deadline_rq *next_drq[2]; 49 struct deadline_rq *next_drq[2];
51 struct list_head *hash; /* request hash */ 50 struct hlist_head *hash; /* request hash */
52 unsigned int batching; /* number of sequential requests made */ 51 unsigned int batching; /* number of sequential requests made */
53 sector_t last_sector; /* head position */ 52 sector_t last_sector; /* head position */
54 unsigned int starved; /* times reads have starved writes */ 53 unsigned int starved; /* times reads have starved writes */
@@ -79,8 +78,7 @@ struct deadline_rq {
79 /* 78 /*
80 * request hash, key is the ending offset (for back merge lookup) 79 * request hash, key is the ending offset (for back merge lookup)
81 */ 80 */
82 struct list_head hash; 81 struct hlist_node hash;
83 char on_hash;
84 82
85 /* 83 /*
86 * expire fifo 84 * expire fifo
@@ -100,8 +98,7 @@ static kmem_cache_t *drq_pool;
100 */ 98 */
101static inline void __deadline_del_drq_hash(struct deadline_rq *drq) 99static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
102{ 100{
103 drq->on_hash = 0; 101 hlist_del_init(&drq->hash);
104 list_del_init(&drq->hash);
105} 102}
106 103
107static inline void deadline_del_drq_hash(struct deadline_rq *drq) 104static inline void deadline_del_drq_hash(struct deadline_rq *drq)
@@ -117,8 +114,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
117 114
118 BUG_ON(ON_HASH(drq)); 115 BUG_ON(ON_HASH(drq));
119 116
120 drq->on_hash = 1; 117 hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
121 list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
122} 118}
123 119
124/* 120/*
@@ -128,26 +124,24 @@ static inline void
128deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) 124deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
129{ 125{
130 struct request *rq = drq->request; 126 struct request *rq = drq->request;
131 struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; 127 struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
132 128
133 if (ON_HASH(drq) && drq->hash.prev != head) { 129 if (ON_HASH(drq) && &drq->hash != head->first) {
134 list_del(&drq->hash); 130 hlist_del(&drq->hash);
135 list_add(&drq->hash, head); 131 hlist_add_head(&drq->hash, head);
136 } 132 }
137} 133}
138 134
139static struct request * 135static struct request *
140deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) 136deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
141{ 137{
142 struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; 138 struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
143 struct list_head *entry, *next = hash_list->next; 139 struct hlist_node *entry, *next;
140 struct deadline_rq *drq;
144 141
145 while ((entry = next) != hash_list) { 142 hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
146 struct deadline_rq *drq = list_entry_hash(entry);
147 struct request *__rq = drq->request; 143 struct request *__rq = drq->request;
148 144
149 next = entry->next;
150
151 BUG_ON(!ON_HASH(drq)); 145 BUG_ON(!ON_HASH(drq));
152 146
153 if (!rq_mergeable(__rq)) { 147 if (!rq_mergeable(__rq)) {
@@ -165,9 +159,6 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
165/* 159/*
166 * rb tree support functions 160 * rb tree support functions
167 */ 161 */
168#define RB_EMPTY(root) ((root)->rb_node == NULL)
169#define ON_RB(node) (rb_parent(node) != node)
170#define RB_CLEAR(node) (rb_set_parent(node, node))
171#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) 162#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
172#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) 163#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
173#define rq_rb_key(rq) (rq)->sector 164#define rq_rb_key(rq) (rq)->sector
@@ -226,9 +217,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
226 dd->next_drq[data_dir] = rb_entry_drq(rbnext); 217 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
227 } 218 }
228 219
229 BUG_ON(!ON_RB(&drq->rb_node)); 220 BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
230 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); 221 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
231 RB_CLEAR(&drq->rb_node); 222 RB_CLEAR_NODE(&drq->rb_node);
232} 223}
233 224
234static struct request * 225static struct request *
@@ -502,7 +493,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
502 */ 493 */
503 494
504 if (reads) { 495 if (reads) {
505 BUG_ON(RB_EMPTY(&dd->sort_list[READ])); 496 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
506 497
507 if (writes && (dd->starved++ >= dd->writes_starved)) 498 if (writes && (dd->starved++ >= dd->writes_starved))
508 goto dispatch_writes; 499 goto dispatch_writes;
@@ -518,7 +509,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
518 509
519 if (writes) { 510 if (writes) {
520dispatch_writes: 511dispatch_writes:
521 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE])); 512 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
522 513
523 dd->starved = 0; 514 dd->starved = 0;
524 515
@@ -625,7 +616,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
625 return NULL; 616 return NULL;
626 memset(dd, 0, sizeof(*dd)); 617 memset(dd, 0, sizeof(*dd));
627 618
628 dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, 619 dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
629 GFP_KERNEL, q->node); 620 GFP_KERNEL, q->node);
630 if (!dd->hash) { 621 if (!dd->hash) {
631 kfree(dd); 622 kfree(dd);
@@ -641,7 +632,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
641 } 632 }
642 633
643 for (i = 0; i < DL_HASH_ENTRIES; i++) 634 for (i = 0; i < DL_HASH_ENTRIES; i++)
644 INIT_LIST_HEAD(&dd->hash[i]); 635 INIT_HLIST_HEAD(&dd->hash[i]);
645 636
646 INIT_LIST_HEAD(&dd->fifo_list[READ]); 637 INIT_LIST_HEAD(&dd->fifo_list[READ]);
647 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 638 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -674,11 +665,10 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
674 drq = mempool_alloc(dd->drq_pool, gfp_mask); 665 drq = mempool_alloc(dd->drq_pool, gfp_mask);
675 if (drq) { 666 if (drq) {
676 memset(drq, 0, sizeof(*drq)); 667 memset(drq, 0, sizeof(*drq));
677 RB_CLEAR(&drq->rb_node); 668 RB_CLEAR_NODE(&drq->rb_node);
678 drq->request = rq; 669 drq->request = rq;
679 670
680 INIT_LIST_HEAD(&drq->hash); 671 INIT_HLIST_NODE(&drq->hash);
681 drq->on_hash = 0;
682 672
683 INIT_LIST_HEAD(&drq->fifo); 673 INIT_LIST_HEAD(&drq->fifo);
684 674
diff --git a/block/elevator.c b/block/elevator.c
index a0afdd317cef..d00b283f31d2 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -850,12 +850,9 @@ fail_register:
850 * one again (along with re-adding the sysfs dir) 850 * one again (along with re-adding the sysfs dir)
851 */ 851 */
852 elevator_exit(e); 852 elevator_exit(e);
853 e = NULL;
854 q->elevator = old_elevator; 853 q->elevator = old_elevator;
855 elv_register_queue(q); 854 elv_register_queue(q);
856 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 855 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
857 if (e)
858 kobject_put(&e->kobj);
859 return 0; 856 return 0;
860} 857}
861 858
diff --git a/block/genhd.c b/block/genhd.c
index 5a8d3bf02f17..8d7339511e5e 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -17,8 +17,7 @@
17#include <linux/buffer_head.h> 17#include <linux/buffer_head.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19 19
20static struct subsystem block_subsys; 20struct subsystem block_subsys;
21
22static DEFINE_MUTEX(block_subsys_lock); 21static DEFINE_MUTEX(block_subsys_lock);
23 22
24/* 23/*
@@ -511,9 +510,7 @@ static struct kset_uevent_ops block_uevent_ops = {
511 .uevent = block_uevent, 510 .uevent = block_uevent,
512}; 511};
513 512
514/* declare block_subsys. */ 513decl_subsys(block, &ktype_block, &block_uevent_ops);
515static decl_subsys(block, &ktype_block, &block_uevent_ops);
516
517 514
518/* 515/*
519 * aggregate disk stat collector. Uses the same stats that the sysfs 516 * aggregate disk stat collector. Uses the same stats that the sysfs
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 7eb36c53f4b7..0603ab2f3692 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -638,7 +638,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
638 /* Assume anything <= 4GB can be handled by IOMMU. 638 /* Assume anything <= 4GB can be handled by IOMMU.
639 Actually some IOMMUs can handle everything, but I don't 639 Actually some IOMMUs can handle everything, but I don't
640 know of a way to test this here. */ 640 know of a way to test this here. */
641 if (bounce_pfn < (0xffffffff>>PAGE_SHIFT)) 641 if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
642 dma = 1; 642 dma = 1;
643 q->bounce_pfn = max_low_pfn; 643 q->bounce_pfn = max_low_pfn;
644#else 644#else
@@ -1663,6 +1663,8 @@ static void blk_unplug_timeout(unsigned long data)
1663 **/ 1663 **/
1664void blk_start_queue(request_queue_t *q) 1664void blk_start_queue(request_queue_t *q)
1665{ 1665{
1666 WARN_ON(!irqs_disabled());
1667
1666 clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 1668 clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
1667 1669
1668 /* 1670 /*
@@ -1878,7 +1880,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
1878 * get dealt with eventually. 1880 * get dealt with eventually.
1879 * 1881 *
1880 * The queue spin lock must be held while manipulating the requests on the 1882 * The queue spin lock must be held while manipulating the requests on the
1881 * request queue. 1883 * request queue; this lock will be taken also from interrupt context, so irq
1884 * disabling is needed for it.
1882 * 1885 *
1883 * Function returns a pointer to the initialized request queue, or NULL if 1886 * Function returns a pointer to the initialized request queue, or NULL if
1884 * it didn't succeed. 1887 * it didn't succeed.
@@ -2824,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
2824 if (unlikely(bio_barrier(bio))) 2827 if (unlikely(bio_barrier(bio)))
2825 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 2828 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2826 2829
2830 if (bio_sync(bio))
2831 req->flags |= REQ_RW_SYNC;
2832
2827 req->errors = 0; 2833 req->errors = 0;
2828 req->hard_sector = req->sector = bio->bi_sector; 2834 req->hard_sector = req->sector = bio->bi_sector;
2829 req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); 2835 req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
@@ -3359,12 +3365,11 @@ EXPORT_SYMBOL(end_that_request_chunk);
3359 */ 3365 */
3360static void blk_done_softirq(struct softirq_action *h) 3366static void blk_done_softirq(struct softirq_action *h)
3361{ 3367{
3362 struct list_head *cpu_list; 3368 struct list_head *cpu_list, local_list;
3363 LIST_HEAD(local_list);
3364 3369
3365 local_irq_disable(); 3370 local_irq_disable();
3366 cpu_list = &__get_cpu_var(blk_cpu_done); 3371 cpu_list = &__get_cpu_var(blk_cpu_done);
3367 list_splice_init(cpu_list, &local_list); 3372 list_replace_init(cpu_list, &local_list);
3368 local_irq_enable(); 3373 local_irq_enable();
3369 3374
3370 while (!list_empty(&local_list)) { 3375 while (!list_empty(&local_list)) {