diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-03 10:25:08 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-03 10:25:08 -0400 |
commit | 0a1340c185734a57fbf4775927966ad4a1347b02 (patch) | |
tree | d9ed8f0dd809a7c542a3356601125ea5b5aaa804 /block | |
parent | af18ddb8864b096e3ed4732e2d4b21c956dcfe3a (diff) | |
parent | 29454dde27d8e340bb1987bad9aa504af7081eba (diff) |
Merge rsync://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
include/linux/kernel.h
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig.iosched | 2 | ||||
-rw-r--r-- | block/as-iosched.c | 68 | ||||
-rw-r--r-- | block/blktrace.c | 1 | ||||
-rw-r--r-- | block/cfq-iosched.c | 207 | ||||
-rw-r--r-- | block/deadline-iosched.c | 54 | ||||
-rw-r--r-- | block/elevator.c | 4 | ||||
-rw-r--r-- | block/genhd.c | 8 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 30 |
8 files changed, 160 insertions, 214 deletions
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index f3b7753aac99..48d090e266fc 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched | |||
@@ -40,7 +40,7 @@ config IOSCHED_CFQ | |||
40 | 40 | ||
41 | choice | 41 | choice |
42 | prompt "Default I/O scheduler" | 42 | prompt "Default I/O scheduler" |
43 | default DEFAULT_AS | 43 | default DEFAULT_CFQ |
44 | help | 44 | help |
45 | Select the I/O scheduler which will be used by default for all | 45 | Select the I/O scheduler which will be used by default for all |
46 | block devices. | 46 | block devices. |
diff --git a/block/as-iosched.c b/block/as-iosched.c index a7caf35ca0c2..5da56d48fbd3 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
11 | #include <linux/elevator.h> | 11 | #include <linux/elevator.h> |
12 | #include <linux/bio.h> | 12 | #include <linux/bio.h> |
13 | #include <linux/config.h> | ||
14 | #include <linux/module.h> | 13 | #include <linux/module.h> |
15 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
16 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -96,7 +95,7 @@ struct as_data { | |||
96 | 95 | ||
97 | struct as_rq *next_arq[2]; /* next in sort order */ | 96 | struct as_rq *next_arq[2]; /* next in sort order */ |
98 | sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ | 97 | sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ |
99 | struct list_head *hash; /* request hash */ | 98 | struct hlist_head *hash; /* request hash */ |
100 | 99 | ||
101 | unsigned long exit_prob; /* probability a task will exit while | 100 | unsigned long exit_prob; /* probability a task will exit while |
102 | being waited on */ | 101 | being waited on */ |
@@ -165,8 +164,7 @@ struct as_rq { | |||
165 | /* | 164 | /* |
166 | * request hash, key is the ending offset (for back merge lookup) | 165 | * request hash, key is the ending offset (for back merge lookup) |
167 | */ | 166 | */ |
168 | struct list_head hash; | 167 | struct hlist_node hash; |
169 | unsigned int on_hash; | ||
170 | 168 | ||
171 | /* | 169 | /* |
172 | * expire fifo | 170 | * expire fifo |
@@ -282,17 +280,15 @@ static const int as_hash_shift = 6; | |||
282 | #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) | 280 | #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) |
283 | #define AS_HASH_ENTRIES (1 << as_hash_shift) | 281 | #define AS_HASH_ENTRIES (1 << as_hash_shift) |
284 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | 282 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
285 | #define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash) | ||
286 | 283 | ||
287 | static inline void __as_del_arq_hash(struct as_rq *arq) | 284 | static inline void __as_del_arq_hash(struct as_rq *arq) |
288 | { | 285 | { |
289 | arq->on_hash = 0; | 286 | hlist_del_init(&arq->hash); |
290 | list_del_init(&arq->hash); | ||
291 | } | 287 | } |
292 | 288 | ||
293 | static inline void as_del_arq_hash(struct as_rq *arq) | 289 | static inline void as_del_arq_hash(struct as_rq *arq) |
294 | { | 290 | { |
295 | if (arq->on_hash) | 291 | if (!hlist_unhashed(&arq->hash)) |
296 | __as_del_arq_hash(arq); | 292 | __as_del_arq_hash(arq); |
297 | } | 293 | } |
298 | 294 | ||
@@ -300,10 +296,9 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) | |||
300 | { | 296 | { |
301 | struct request *rq = arq->request; | 297 | struct request *rq = arq->request; |
302 | 298 | ||
303 | BUG_ON(arq->on_hash); | 299 | BUG_ON(!hlist_unhashed(&arq->hash)); |
304 | 300 | ||
305 | arq->on_hash = 1; | 301 | hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); |
306 | list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); | ||
307 | } | 302 | } |
308 | 303 | ||
309 | /* | 304 | /* |
@@ -312,31 +307,29 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) | |||
312 | static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) | 307 | static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) |
313 | { | 308 | { |
314 | struct request *rq = arq->request; | 309 | struct request *rq = arq->request; |
315 | struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; | 310 | struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; |
316 | 311 | ||
317 | if (!arq->on_hash) { | 312 | if (hlist_unhashed(&arq->hash)) { |
318 | WARN_ON(1); | 313 | WARN_ON(1); |
319 | return; | 314 | return; |
320 | } | 315 | } |
321 | 316 | ||
322 | if (arq->hash.prev != head) { | 317 | if (&arq->hash != head->first) { |
323 | list_del(&arq->hash); | 318 | hlist_del(&arq->hash); |
324 | list_add(&arq->hash, head); | 319 | hlist_add_head(&arq->hash, head); |
325 | } | 320 | } |
326 | } | 321 | } |
327 | 322 | ||
328 | static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) | 323 | static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) |
329 | { | 324 | { |
330 | struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; | 325 | struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; |
331 | struct list_head *entry, *next = hash_list->next; | 326 | struct hlist_node *entry, *next; |
327 | struct as_rq *arq; | ||
332 | 328 | ||
333 | while ((entry = next) != hash_list) { | 329 | hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) { |
334 | struct as_rq *arq = list_entry_hash(entry); | ||
335 | struct request *__rq = arq->request; | 330 | struct request *__rq = arq->request; |
336 | 331 | ||
337 | next = entry->next; | 332 | BUG_ON(hlist_unhashed(&arq->hash)); |
338 | |||
339 | BUG_ON(!arq->on_hash); | ||
340 | 333 | ||
341 | if (!rq_mergeable(__rq)) { | 334 | if (!rq_mergeable(__rq)) { |
342 | as_del_arq_hash(arq); | 335 | as_del_arq_hash(arq); |
@@ -353,10 +346,6 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) | |||
353 | /* | 346 | /* |
354 | * rb tree support functions | 347 | * rb tree support functions |
355 | */ | 348 | */ |
356 | #define RB_NONE (2) | ||
357 | #define RB_EMPTY(root) ((root)->rb_node == NULL) | ||
358 | #define ON_RB(node) ((node)->rb_color != RB_NONE) | ||
359 | #define RB_CLEAR(node) ((node)->rb_color = RB_NONE) | ||
360 | #define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) | 349 | #define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) |
361 | #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) | 350 | #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) |
362 | #define rq_rb_key(rq) (rq)->sector | 351 | #define rq_rb_key(rq) (rq)->sector |
@@ -425,13 +414,13 @@ static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq) | |||
425 | 414 | ||
426 | static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) | 415 | static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) |
427 | { | 416 | { |
428 | if (!ON_RB(&arq->rb_node)) { | 417 | if (!RB_EMPTY_NODE(&arq->rb_node)) { |
429 | WARN_ON(1); | 418 | WARN_ON(1); |
430 | return; | 419 | return; |
431 | } | 420 | } |
432 | 421 | ||
433 | rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); | 422 | rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); |
434 | RB_CLEAR(&arq->rb_node); | 423 | RB_CLEAR_NODE(&arq->rb_node); |
435 | } | 424 | } |
436 | 425 | ||
437 | static struct request * | 426 | static struct request * |
@@ -552,7 +541,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last) | |||
552 | struct rb_node *rbprev = rb_prev(&last->rb_node); | 541 | struct rb_node *rbprev = rb_prev(&last->rb_node); |
553 | struct as_rq *arq_next, *arq_prev; | 542 | struct as_rq *arq_next, *arq_prev; |
554 | 543 | ||
555 | BUG_ON(!ON_RB(&last->rb_node)); | 544 | BUG_ON(!RB_EMPTY_NODE(&last->rb_node)); |
556 | 545 | ||
557 | if (rbprev) | 546 | if (rbprev) |
558 | arq_prev = rb_entry_arq(rbprev); | 547 | arq_prev = rb_entry_arq(rbprev); |
@@ -902,7 +891,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq) | |||
902 | } | 891 | } |
903 | 892 | ||
904 | /* | 893 | /* |
905 | * as_can_anticipate indicates weather we should either run arq | 894 | * as_can_anticipate indicates whether we should either run arq |
906 | * or keep anticipating a better request. | 895 | * or keep anticipating a better request. |
907 | */ | 896 | */ |
908 | static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) | 897 | static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) |
@@ -1129,7 +1118,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) | |||
1129 | struct request *rq = arq->request; | 1118 | struct request *rq = arq->request; |
1130 | const int data_dir = arq->is_sync; | 1119 | const int data_dir = arq->is_sync; |
1131 | 1120 | ||
1132 | BUG_ON(!ON_RB(&arq->rb_node)); | 1121 | BUG_ON(!RB_EMPTY_NODE(&arq->rb_node)); |
1133 | 1122 | ||
1134 | as_antic_stop(ad); | 1123 | as_antic_stop(ad); |
1135 | ad->antic_status = ANTIC_OFF; | 1124 | ad->antic_status = ANTIC_OFF; |
@@ -1254,7 +1243,7 @@ static int as_dispatch_request(request_queue_t *q, int force) | |||
1254 | */ | 1243 | */ |
1255 | 1244 | ||
1256 | if (reads) { | 1245 | if (reads) { |
1257 | BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC])); | 1246 | BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); |
1258 | 1247 | ||
1259 | if (writes && ad->batch_data_dir == REQ_SYNC) | 1248 | if (writes && ad->batch_data_dir == REQ_SYNC) |
1260 | /* | 1249 | /* |
@@ -1278,7 +1267,7 @@ static int as_dispatch_request(request_queue_t *q, int force) | |||
1278 | 1267 | ||
1279 | if (writes) { | 1268 | if (writes) { |
1280 | dispatch_writes: | 1269 | dispatch_writes: |
1281 | BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC])); | 1270 | BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); |
1282 | 1271 | ||
1283 | if (ad->batch_data_dir == REQ_SYNC) { | 1272 | if (ad->batch_data_dir == REQ_SYNC) { |
1284 | ad->changed_batch = 1; | 1273 | ad->changed_batch = 1; |
@@ -1346,7 +1335,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) | |||
1346 | arq->state = AS_RQ_NEW; | 1335 | arq->state = AS_RQ_NEW; |
1347 | 1336 | ||
1348 | if (rq_data_dir(arq->request) == READ | 1337 | if (rq_data_dir(arq->request) == READ |
1349 | || current->flags&PF_SYNCWRITE) | 1338 | || (arq->request->flags & REQ_RW_SYNC)) |
1350 | arq->is_sync = 1; | 1339 | arq->is_sync = 1; |
1351 | else | 1340 | else |
1352 | arq->is_sync = 0; | 1341 | arq->is_sync = 0; |
@@ -1598,12 +1587,11 @@ static int as_set_request(request_queue_t *q, struct request *rq, | |||
1598 | 1587 | ||
1599 | if (arq) { | 1588 | if (arq) { |
1600 | memset(arq, 0, sizeof(*arq)); | 1589 | memset(arq, 0, sizeof(*arq)); |
1601 | RB_CLEAR(&arq->rb_node); | 1590 | RB_CLEAR_NODE(&arq->rb_node); |
1602 | arq->request = rq; | 1591 | arq->request = rq; |
1603 | arq->state = AS_RQ_PRESCHED; | 1592 | arq->state = AS_RQ_PRESCHED; |
1604 | arq->io_context = NULL; | 1593 | arq->io_context = NULL; |
1605 | INIT_LIST_HEAD(&arq->hash); | 1594 | INIT_HLIST_NODE(&arq->hash); |
1606 | arq->on_hash = 0; | ||
1607 | INIT_LIST_HEAD(&arq->fifo); | 1595 | INIT_LIST_HEAD(&arq->fifo); |
1608 | rq->elevator_private = arq; | 1596 | rq->elevator_private = arq; |
1609 | return 0; | 1597 | return 0; |
@@ -1663,7 +1651,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) | |||
1663 | 1651 | ||
1664 | ad->q = q; /* Identify what queue the data belongs to */ | 1652 | ad->q = q; /* Identify what queue the data belongs to */ |
1665 | 1653 | ||
1666 | ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, | 1654 | ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES, |
1667 | GFP_KERNEL, q->node); | 1655 | GFP_KERNEL, q->node); |
1668 | if (!ad->hash) { | 1656 | if (!ad->hash) { |
1669 | kfree(ad); | 1657 | kfree(ad); |
@@ -1685,7 +1673,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) | |||
1685 | INIT_WORK(&ad->antic_work, as_work_handler, q); | 1673 | INIT_WORK(&ad->antic_work, as_work_handler, q); |
1686 | 1674 | ||
1687 | for (i = 0; i < AS_HASH_ENTRIES; i++) | 1675 | for (i = 0; i < AS_HASH_ENTRIES; i++) |
1688 | INIT_LIST_HEAD(&ad->hash[i]); | 1676 | INIT_HLIST_HEAD(&ad->hash[i]); |
1689 | 1677 | ||
1690 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); | 1678 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); |
1691 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); | 1679 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); |
diff --git a/block/blktrace.c b/block/blktrace.c index 36f3a172275f..92925e7d9e6c 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -15,7 +15,6 @@ | |||
15 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 15 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | #include <linux/config.h> | ||
19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
20 | #include <linux/blkdev.h> | 19 | #include <linux/blkdev.h> |
21 | #include <linux/blktrace_api.h> | 20 | #include <linux/blktrace_api.h> |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 052b17487625..102ebc2c5c34 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -6,7 +6,6 @@ | |||
6 | * | 6 | * |
7 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> | 7 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> |
8 | */ | 8 | */ |
9 | #include <linux/config.h> | ||
10 | #include <linux/module.h> | 9 | #include <linux/module.h> |
11 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
12 | #include <linux/elevator.h> | 11 | #include <linux/elevator.h> |
@@ -26,7 +25,7 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ | |||
26 | static const int cfq_slice_sync = HZ / 10; | 25 | static const int cfq_slice_sync = HZ / 10; |
27 | static int cfq_slice_async = HZ / 25; | 26 | static int cfq_slice_async = HZ / 25; |
28 | static const int cfq_slice_async_rq = 2; | 27 | static const int cfq_slice_async_rq = 2; |
29 | static int cfq_slice_idle = HZ / 70; | 28 | static int cfq_slice_idle = HZ / 125; |
30 | 29 | ||
31 | #define CFQ_IDLE_GRACE (HZ / 10) | 30 | #define CFQ_IDLE_GRACE (HZ / 10) |
32 | #define CFQ_SLICE_SCALE (5) | 31 | #define CFQ_SLICE_SCALE (5) |
@@ -60,16 +59,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock); | |||
60 | /* | 59 | /* |
61 | * rb-tree defines | 60 | * rb-tree defines |
62 | */ | 61 | */ |
63 | #define RB_NONE (2) | ||
64 | #define RB_EMPTY(node) ((node)->rb_node == NULL) | ||
65 | #define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE | ||
66 | #define RB_CLEAR(node) do { \ | ||
67 | (node)->rb_parent = NULL; \ | ||
68 | RB_CLEAR_COLOR((node)); \ | ||
69 | (node)->rb_right = NULL; \ | ||
70 | (node)->rb_left = NULL; \ | ||
71 | } while (0) | ||
72 | #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) | ||
73 | #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) | 62 | #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) |
74 | #define rq_rb_key(rq) (rq)->sector | 63 | #define rq_rb_key(rq) (rq)->sector |
75 | 64 | ||
@@ -128,8 +117,6 @@ struct cfq_data { | |||
128 | */ | 117 | */ |
129 | struct hlist_head *crq_hash; | 118 | struct hlist_head *crq_hash; |
130 | 119 | ||
131 | unsigned int max_queued; | ||
132 | |||
133 | mempool_t *crq_pool; | 120 | mempool_t *crq_pool; |
134 | 121 | ||
135 | int rq_in_driver; | 122 | int rq_in_driver; |
@@ -284,8 +271,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi | |||
284 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); | 271 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); |
285 | static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); | 272 | static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); |
286 | 273 | ||
287 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) | ||
288 | |||
289 | /* | 274 | /* |
290 | * lots of deadline iosched dupes, can be abstracted later... | 275 | * lots of deadline iosched dupes, can be abstracted later... |
291 | */ | 276 | */ |
@@ -341,7 +326,7 @@ static int cfq_queue_empty(request_queue_t *q) | |||
341 | 326 | ||
342 | static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) | 327 | static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) |
343 | { | 328 | { |
344 | if (rw == READ || process_sync(task)) | 329 | if (rw == READ || rw == WRITE_SYNC) |
345 | return task->pid; | 330 | return task->pid; |
346 | 331 | ||
347 | return CFQ_KEY_ASYNC; | 332 | return CFQ_KEY_ASYNC; |
@@ -567,9 +552,8 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq) | |||
567 | cfq_update_next_crq(crq); | 552 | cfq_update_next_crq(crq); |
568 | 553 | ||
569 | rb_erase(&crq->rb_node, &cfqq->sort_list); | 554 | rb_erase(&crq->rb_node, &cfqq->sort_list); |
570 | RB_CLEAR_COLOR(&crq->rb_node); | ||
571 | 555 | ||
572 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) | 556 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) |
573 | cfq_del_cfqq_rr(cfqd, cfqq); | 557 | cfq_del_cfqq_rr(cfqd, cfqq); |
574 | } | 558 | } |
575 | 559 | ||
@@ -916,13 +900,15 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | |||
916 | return cfqq; | 900 | return cfqq; |
917 | } | 901 | } |
918 | 902 | ||
903 | #define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024)) | ||
904 | |||
919 | static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 905 | static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
920 | 906 | ||
921 | { | 907 | { |
922 | struct cfq_io_context *cic; | 908 | struct cfq_io_context *cic; |
923 | unsigned long sl; | 909 | unsigned long sl; |
924 | 910 | ||
925 | WARN_ON(!RB_EMPTY(&cfqq->sort_list)); | 911 | WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); |
926 | WARN_ON(cfqq != cfqd->active_queue); | 912 | WARN_ON(cfqq != cfqd->active_queue); |
927 | 913 | ||
928 | /* | 914 | /* |
@@ -949,7 +935,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
949 | * fair distribution of slice time for a process doing back-to-back | 935 | * fair distribution of slice time for a process doing back-to-back |
950 | * seeks. so allow a little bit of time for him to submit a new rq | 936 | * seeks. so allow a little bit of time for him to submit a new rq |
951 | */ | 937 | */ |
952 | if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072) | 938 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) |
953 | sl = 2; | 939 | sl = 2; |
954 | 940 | ||
955 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 941 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
@@ -960,11 +946,15 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq) | |||
960 | { | 946 | { |
961 | struct cfq_data *cfqd = q->elevator->elevator_data; | 947 | struct cfq_data *cfqd = q->elevator->elevator_data; |
962 | struct cfq_queue *cfqq = crq->cfq_queue; | 948 | struct cfq_queue *cfqq = crq->cfq_queue; |
949 | struct request *rq; | ||
963 | 950 | ||
964 | cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); | 951 | cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); |
965 | cfq_remove_request(crq->request); | 952 | cfq_remove_request(crq->request); |
966 | cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; | 953 | cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; |
967 | elv_dispatch_sort(q, crq->request); | 954 | elv_dispatch_sort(q, crq->request); |
955 | |||
956 | rq = list_entry(q->queue_head.prev, struct request, queuelist); | ||
957 | cfqd->last_sector = rq->sector + rq->nr_sectors; | ||
968 | } | 958 | } |
969 | 959 | ||
970 | /* | 960 | /* |
@@ -1046,10 +1036,12 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
1046 | * if queue has requests, dispatch one. if not, check if | 1036 | * if queue has requests, dispatch one. if not, check if |
1047 | * enough slice is left to wait for one | 1037 | * enough slice is left to wait for one |
1048 | */ | 1038 | */ |
1049 | if (!RB_EMPTY(&cfqq->sort_list)) | 1039 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
1050 | goto keep_queue; | 1040 | goto keep_queue; |
1051 | else if (cfq_cfqq_class_sync(cfqq) && | 1041 | else if (cfq_cfqq_dispatched(cfqq)) { |
1052 | time_before(now, cfqq->slice_end)) { | 1042 | cfqq = NULL; |
1043 | goto keep_queue; | ||
1044 | } else if (cfq_cfqq_class_sync(cfqq)) { | ||
1053 | if (cfq_arm_slice_timer(cfqd, cfqq)) | 1045 | if (cfq_arm_slice_timer(cfqd, cfqq)) |
1054 | return NULL; | 1046 | return NULL; |
1055 | } | 1047 | } |
@@ -1068,7 +1060,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1068 | { | 1060 | { |
1069 | int dispatched = 0; | 1061 | int dispatched = 0; |
1070 | 1062 | ||
1071 | BUG_ON(RB_EMPTY(&cfqq->sort_list)); | 1063 | BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); |
1072 | 1064 | ||
1073 | do { | 1065 | do { |
1074 | struct cfq_rq *crq; | 1066 | struct cfq_rq *crq; |
@@ -1092,14 +1084,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1092 | cfqd->active_cic = crq->io_context; | 1084 | cfqd->active_cic = crq->io_context; |
1093 | } | 1085 | } |
1094 | 1086 | ||
1095 | if (RB_EMPTY(&cfqq->sort_list)) | 1087 | if (RB_EMPTY_ROOT(&cfqq->sort_list)) |
1096 | break; | 1088 | break; |
1097 | 1089 | ||
1098 | } while (dispatched < max_dispatch); | 1090 | } while (dispatched < max_dispatch); |
1099 | 1091 | ||
1100 | /* | 1092 | /* |
1101 | * if slice end isn't set yet, set it. if at least one request was | 1093 | * if slice end isn't set yet, set it. |
1102 | * sync, use the sync time slice value | ||
1103 | */ | 1094 | */ |
1104 | if (!cfqq->slice_end) | 1095 | if (!cfqq->slice_end) |
1105 | cfq_set_prio_slice(cfqd, cfqq); | 1096 | cfq_set_prio_slice(cfqd, cfqq); |
@@ -1110,7 +1101,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1110 | */ | 1101 | */ |
1111 | if ((!cfq_cfqq_sync(cfqq) && | 1102 | if ((!cfq_cfqq_sync(cfqq) && |
1112 | cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || | 1103 | cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || |
1113 | cfq_class_idle(cfqq)) | 1104 | cfq_class_idle(cfqq) || |
1105 | !cfq_cfqq_idle_window(cfqq)) | ||
1114 | cfq_slice_expired(cfqd, 0); | 1106 | cfq_slice_expired(cfqd, 0); |
1115 | 1107 | ||
1116 | return dispatched; | 1108 | return dispatched; |
@@ -1119,10 +1111,11 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1119 | static int | 1111 | static int |
1120 | cfq_forced_dispatch_cfqqs(struct list_head *list) | 1112 | cfq_forced_dispatch_cfqqs(struct list_head *list) |
1121 | { | 1113 | { |
1122 | int dispatched = 0; | ||
1123 | struct cfq_queue *cfqq, *next; | 1114 | struct cfq_queue *cfqq, *next; |
1124 | struct cfq_rq *crq; | 1115 | struct cfq_rq *crq; |
1116 | int dispatched; | ||
1125 | 1117 | ||
1118 | dispatched = 0; | ||
1126 | list_for_each_entry_safe(cfqq, next, list, cfq_list) { | 1119 | list_for_each_entry_safe(cfqq, next, list, cfq_list) { |
1127 | while ((crq = cfqq->next_crq)) { | 1120 | while ((crq = cfqq->next_crq)) { |
1128 | cfq_dispatch_insert(cfqq->cfqd->queue, crq); | 1121 | cfq_dispatch_insert(cfqq->cfqd->queue, crq); |
@@ -1130,6 +1123,7 @@ cfq_forced_dispatch_cfqqs(struct list_head *list) | |||
1130 | } | 1123 | } |
1131 | BUG_ON(!list_empty(&cfqq->fifo)); | 1124 | BUG_ON(!list_empty(&cfqq->fifo)); |
1132 | } | 1125 | } |
1126 | |||
1133 | return dispatched; | 1127 | return dispatched; |
1134 | } | 1128 | } |
1135 | 1129 | ||
@@ -1156,7 +1150,8 @@ static int | |||
1156 | cfq_dispatch_requests(request_queue_t *q, int force) | 1150 | cfq_dispatch_requests(request_queue_t *q, int force) |
1157 | { | 1151 | { |
1158 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1152 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1159 | struct cfq_queue *cfqq; | 1153 | struct cfq_queue *cfqq, *prev_cfqq; |
1154 | int dispatched; | ||
1160 | 1155 | ||
1161 | if (!cfqd->busy_queues) | 1156 | if (!cfqd->busy_queues) |
1162 | return 0; | 1157 | return 0; |
@@ -1164,10 +1159,17 @@ cfq_dispatch_requests(request_queue_t *q, int force) | |||
1164 | if (unlikely(force)) | 1159 | if (unlikely(force)) |
1165 | return cfq_forced_dispatch(cfqd); | 1160 | return cfq_forced_dispatch(cfqd); |
1166 | 1161 | ||
1167 | cfqq = cfq_select_queue(cfqd); | 1162 | dispatched = 0; |
1168 | if (cfqq) { | 1163 | prev_cfqq = NULL; |
1164 | while ((cfqq = cfq_select_queue(cfqd)) != NULL) { | ||
1169 | int max_dispatch; | 1165 | int max_dispatch; |
1170 | 1166 | ||
1167 | /* | ||
1168 | * Don't repeat dispatch from the previous queue. | ||
1169 | */ | ||
1170 | if (prev_cfqq == cfqq) | ||
1171 | break; | ||
1172 | |||
1171 | cfq_clear_cfqq_must_dispatch(cfqq); | 1173 | cfq_clear_cfqq_must_dispatch(cfqq); |
1172 | cfq_clear_cfqq_wait_request(cfqq); | 1174 | cfq_clear_cfqq_wait_request(cfqq); |
1173 | del_timer(&cfqd->idle_slice_timer); | 1175 | del_timer(&cfqd->idle_slice_timer); |
@@ -1176,10 +1178,19 @@ cfq_dispatch_requests(request_queue_t *q, int force) | |||
1176 | if (cfq_class_idle(cfqq)) | 1178 | if (cfq_class_idle(cfqq)) |
1177 | max_dispatch = 1; | 1179 | max_dispatch = 1; |
1178 | 1180 | ||
1179 | return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); | 1181 | dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); |
1182 | |||
1183 | /* | ||
1184 | * If the dispatch cfqq has idling enabled and is still | ||
1185 | * the active queue, break out. | ||
1186 | */ | ||
1187 | if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue) | ||
1188 | break; | ||
1189 | |||
1190 | prev_cfqq = cfqq; | ||
1180 | } | 1191 | } |
1181 | 1192 | ||
1182 | return 0; | 1193 | return dispatched; |
1183 | } | 1194 | } |
1184 | 1195 | ||
1185 | /* | 1196 | /* |
@@ -1324,7 +1335,6 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1324 | 1335 | ||
1325 | if (cic) { | 1336 | if (cic) { |
1326 | memset(cic, 0, sizeof(*cic)); | 1337 | memset(cic, 0, sizeof(*cic)); |
1327 | RB_CLEAR_COLOR(&cic->rb_node); | ||
1328 | cic->last_end_request = jiffies; | 1338 | cic->last_end_request = jiffies; |
1329 | INIT_LIST_HEAD(&cic->queue_list); | 1339 | INIT_LIST_HEAD(&cic->queue_list); |
1330 | cic->dtor = cfq_free_io_context; | 1340 | cic->dtor = cfq_free_io_context; |
@@ -1386,25 +1396,28 @@ static inline void changed_ioprio(struct cfq_io_context *cic) | |||
1386 | { | 1396 | { |
1387 | struct cfq_data *cfqd = cic->key; | 1397 | struct cfq_data *cfqd = cic->key; |
1388 | struct cfq_queue *cfqq; | 1398 | struct cfq_queue *cfqq; |
1389 | if (cfqd) { | 1399 | |
1390 | spin_lock(cfqd->queue->queue_lock); | 1400 | if (unlikely(!cfqd)) |
1391 | cfqq = cic->cfqq[ASYNC]; | 1401 | return; |
1392 | if (cfqq) { | 1402 | |
1393 | struct cfq_queue *new_cfqq; | 1403 | spin_lock(cfqd->queue->queue_lock); |
1394 | new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, | 1404 | |
1395 | cic->ioc->task, GFP_ATOMIC); | 1405 | cfqq = cic->cfqq[ASYNC]; |
1396 | if (new_cfqq) { | 1406 | if (cfqq) { |
1397 | cic->cfqq[ASYNC] = new_cfqq; | 1407 | struct cfq_queue *new_cfqq; |
1398 | cfq_put_queue(cfqq); | 1408 | new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task, |
1399 | } | 1409 | GFP_ATOMIC); |
1400 | } | 1410 | if (new_cfqq) { |
1401 | cfqq = cic->cfqq[SYNC]; | 1411 | cic->cfqq[ASYNC] = new_cfqq; |
1402 | if (cfqq) { | 1412 | cfq_put_queue(cfqq); |
1403 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1404 | cfq_init_prio_data(cfqq); | ||
1405 | } | 1413 | } |
1406 | spin_unlock(cfqd->queue->queue_lock); | ||
1407 | } | 1414 | } |
1415 | |||
1416 | cfqq = cic->cfqq[SYNC]; | ||
1417 | if (cfqq) | ||
1418 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1419 | |||
1420 | spin_unlock(cfqd->queue->queue_lock); | ||
1408 | } | 1421 | } |
1409 | 1422 | ||
1410 | /* | 1423 | /* |
@@ -1461,7 +1474,6 @@ retry: | |||
1461 | 1474 | ||
1462 | INIT_HLIST_NODE(&cfqq->cfq_hash); | 1475 | INIT_HLIST_NODE(&cfqq->cfq_hash); |
1463 | INIT_LIST_HEAD(&cfqq->cfq_list); | 1476 | INIT_LIST_HEAD(&cfqq->cfq_list); |
1464 | RB_CLEAR_ROOT(&cfqq->sort_list); | ||
1465 | INIT_LIST_HEAD(&cfqq->fifo); | 1477 | INIT_LIST_HEAD(&cfqq->fifo); |
1466 | 1478 | ||
1467 | cfqq->key = key; | 1479 | cfqq->key = key; |
@@ -1473,8 +1485,7 @@ retry: | |||
1473 | * set ->slice_left to allow preemption for a new process | 1485 | * set ->slice_left to allow preemption for a new process |
1474 | */ | 1486 | */ |
1475 | cfqq->slice_left = 2 * cfqd->cfq_slice_idle; | 1487 | cfqq->slice_left = 2 * cfqd->cfq_slice_idle; |
1476 | if (!cfqd->hw_tag) | 1488 | cfq_mark_cfqq_idle_window(cfqq); |
1477 | cfq_mark_cfqq_idle_window(cfqq); | ||
1478 | cfq_mark_cfqq_prio_changed(cfqq); | 1489 | cfq_mark_cfqq_prio_changed(cfqq); |
1479 | cfq_init_prio_data(cfqq); | 1490 | cfq_init_prio_data(cfqq); |
1480 | } | 1491 | } |
@@ -1665,7 +1676,8 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1665 | { | 1676 | { |
1666 | int enable_idle = cfq_cfqq_idle_window(cfqq); | 1677 | int enable_idle = cfq_cfqq_idle_window(cfqq); |
1667 | 1678 | ||
1668 | if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag) | 1679 | if (!cic->ioc->task || !cfqd->cfq_slice_idle || |
1680 | (cfqd->hw_tag && CIC_SEEKY(cic))) | ||
1669 | enable_idle = 0; | 1681 | enable_idle = 0; |
1670 | else if (sample_valid(cic->ttime_samples)) { | 1682 | else if (sample_valid(cic->ttime_samples)) { |
1671 | if (cic->ttime_mean > cfqd->cfq_slice_idle) | 1683 | if (cic->ttime_mean > cfqd->cfq_slice_idle) |
@@ -1695,7 +1707,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
1695 | return 0; | 1707 | return 0; |
1696 | 1708 | ||
1697 | if (!cfqq) | 1709 | if (!cfqq) |
1698 | return 1; | 1710 | return 0; |
1699 | 1711 | ||
1700 | if (cfq_class_idle(cfqq)) | 1712 | if (cfq_class_idle(cfqq)) |
1701 | return 1; | 1713 | return 1; |
@@ -1727,7 +1739,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1727 | cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; | 1739 | cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; |
1728 | 1740 | ||
1729 | cfqq->slice_end = cfqq->slice_left + jiffies; | 1741 | cfqq->slice_end = cfqq->slice_left + jiffies; |
1730 | __cfq_slice_expired(cfqd, cfqq, 1); | 1742 | cfq_slice_expired(cfqd, 1); |
1731 | __cfq_set_active_queue(cfqd, cfqq); | 1743 | __cfq_set_active_queue(cfqd, cfqq); |
1732 | } | 1744 | } |
1733 | 1745 | ||
@@ -1752,11 +1764,7 @@ static void | |||
1752 | cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1764 | cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1753 | struct cfq_rq *crq) | 1765 | struct cfq_rq *crq) |
1754 | { | 1766 | { |
1755 | struct cfq_io_context *cic; | 1767 | struct cfq_io_context *cic = crq->io_context; |
1756 | |||
1757 | cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); | ||
1758 | |||
1759 | cic = crq->io_context; | ||
1760 | 1768 | ||
1761 | /* | 1769 | /* |
1762 | * we never wait for an async request and we don't allow preemption | 1770 | * we never wait for an async request and we don't allow preemption |
@@ -1846,11 +1854,23 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) | |||
1846 | cfqq->service_last = now; | 1854 | cfqq->service_last = now; |
1847 | cfq_resort_rr_list(cfqq, 0); | 1855 | cfq_resort_rr_list(cfqq, 0); |
1848 | } | 1856 | } |
1849 | cfq_schedule_dispatch(cfqd); | ||
1850 | } | 1857 | } |
1851 | 1858 | ||
1852 | if (cfq_crq_is_sync(crq)) | 1859 | if (sync) |
1853 | crq->io_context->last_end_request = now; | 1860 | crq->io_context->last_end_request = now; |
1861 | |||
1862 | /* | ||
1863 | * If this is the active queue, check if it needs to be expired, | ||
1864 | * or if we want to idle in case it has no pending requests. | ||
1865 | */ | ||
1866 | if (cfqd->active_queue == cfqq) { | ||
1867 | if (time_after(now, cfqq->slice_end)) | ||
1868 | cfq_slice_expired(cfqd, 0); | ||
1869 | else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) { | ||
1870 | if (!cfq_arm_slice_timer(cfqd, cfqq)) | ||
1871 | cfq_schedule_dispatch(cfqd); | ||
1872 | } | ||
1873 | } | ||
1854 | } | 1874 | } |
1855 | 1875 | ||
1856 | static struct request * | 1876 | static struct request * |
@@ -1917,7 +1937,6 @@ static inline int | |||
1917 | __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1937 | __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1918 | struct task_struct *task, int rw) | 1938 | struct task_struct *task, int rw) |
1919 | { | 1939 | { |
1920 | #if 1 | ||
1921 | if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && | 1940 | if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && |
1922 | !cfq_cfqq_must_alloc_slice(cfqq)) { | 1941 | !cfq_cfqq_must_alloc_slice(cfqq)) { |
1923 | cfq_mark_cfqq_must_alloc_slice(cfqq); | 1942 | cfq_mark_cfqq_must_alloc_slice(cfqq); |
@@ -1925,39 +1944,6 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1925 | } | 1944 | } |
1926 | 1945 | ||
1927 | return ELV_MQUEUE_MAY; | 1946 | return ELV_MQUEUE_MAY; |
1928 | #else | ||
1929 | if (!cfqq || task->flags & PF_MEMALLOC) | ||
1930 | return ELV_MQUEUE_MAY; | ||
1931 | if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) { | ||
1932 | if (cfq_cfqq_wait_request(cfqq)) | ||
1933 | return ELV_MQUEUE_MUST; | ||
1934 | |||
1935 | /* | ||
1936 | * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we | ||
1937 | * can quickly flood the queue with writes from a single task | ||
1938 | */ | ||
1939 | if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { | ||
1940 | cfq_mark_cfqq_must_alloc_slice(cfqq); | ||
1941 | return ELV_MQUEUE_MUST; | ||
1942 | } | ||
1943 | |||
1944 | return ELV_MQUEUE_MAY; | ||
1945 | } | ||
1946 | if (cfq_class_idle(cfqq)) | ||
1947 | return ELV_MQUEUE_NO; | ||
1948 | if (cfqq->allocated[rw] >= cfqd->max_queued) { | ||
1949 | struct io_context *ioc = get_io_context(GFP_ATOMIC); | ||
1950 | int ret = ELV_MQUEUE_NO; | ||
1951 | |||
1952 | if (ioc && ioc->nr_batch_requests) | ||
1953 | ret = ELV_MQUEUE_MAY; | ||
1954 | |||
1955 | put_io_context(ioc); | ||
1956 | return ret; | ||
1957 | } | ||
1958 | |||
1959 | return ELV_MQUEUE_MAY; | ||
1960 | #endif | ||
1961 | } | 1947 | } |
1962 | 1948 | ||
1963 | static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) | 1949 | static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) |
@@ -1986,16 +1972,13 @@ static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) | |||
1986 | static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) | 1972 | static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) |
1987 | { | 1973 | { |
1988 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1974 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1989 | struct request_list *rl = &q->rq; | ||
1990 | 1975 | ||
1991 | if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { | 1976 | if (unlikely(cfqd->rq_starved)) { |
1977 | struct request_list *rl = &q->rq; | ||
1978 | |||
1992 | smp_mb(); | 1979 | smp_mb(); |
1993 | if (waitqueue_active(&rl->wait[READ])) | 1980 | if (waitqueue_active(&rl->wait[READ])) |
1994 | wake_up(&rl->wait[READ]); | 1981 | wake_up(&rl->wait[READ]); |
1995 | } | ||
1996 | |||
1997 | if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { | ||
1998 | smp_mb(); | ||
1999 | if (waitqueue_active(&rl->wait[WRITE])) | 1982 | if (waitqueue_active(&rl->wait[WRITE])) |
2000 | wake_up(&rl->wait[WRITE]); | 1983 | wake_up(&rl->wait[WRITE]); |
2001 | } | 1984 | } |
@@ -2069,7 +2052,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
2069 | 2052 | ||
2070 | crq = mempool_alloc(cfqd->crq_pool, gfp_mask); | 2053 | crq = mempool_alloc(cfqd->crq_pool, gfp_mask); |
2071 | if (crq) { | 2054 | if (crq) { |
2072 | RB_CLEAR(&crq->rb_node); | 2055 | RB_CLEAR_NODE(&crq->rb_node); |
2073 | crq->rb_key = 0; | 2056 | crq->rb_key = 0; |
2074 | crq->request = rq; | 2057 | crq->request = rq; |
2075 | INIT_HLIST_NODE(&crq->hash); | 2058 | INIT_HLIST_NODE(&crq->hash); |
@@ -2155,16 +2138,13 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
2155 | * only expire and reinvoke request handler, if there are | 2138 | * only expire and reinvoke request handler, if there are |
2156 | * other queues with pending requests | 2139 | * other queues with pending requests |
2157 | */ | 2140 | */ |
2158 | if (!cfqd->busy_queues) { | 2141 | if (!cfqd->busy_queues) |
2159 | cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); | ||
2160 | add_timer(&cfqd->idle_slice_timer); | ||
2161 | goto out_cont; | 2142 | goto out_cont; |
2162 | } | ||
2163 | 2143 | ||
2164 | /* | 2144 | /* |
2165 | * not expired and it has a request pending, let it dispatch | 2145 | * not expired and it has a request pending, let it dispatch |
2166 | */ | 2146 | */ |
2167 | if (!RB_EMPTY(&cfqq->sort_list)) { | 2147 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { |
2168 | cfq_mark_cfqq_must_dispatch(cfqq); | 2148 | cfq_mark_cfqq_must_dispatch(cfqq); |
2169 | goto out_kick; | 2149 | goto out_kick; |
2170 | } | 2150 | } |
@@ -2285,9 +2265,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2285 | 2265 | ||
2286 | cfqd->queue = q; | 2266 | cfqd->queue = q; |
2287 | 2267 | ||
2288 | cfqd->max_queued = q->nr_requests / 4; | ||
2289 | q->nr_batching = cfq_queued; | ||
2290 | |||
2291 | init_timer(&cfqd->idle_slice_timer); | 2268 | init_timer(&cfqd->idle_slice_timer); |
2292 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | 2269 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
2293 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | 2270 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 3bd0415a9828..c7ca9f0b6498 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/elevator.h> | 9 | #include <linux/elevator.h> |
10 | #include <linux/bio.h> | 10 | #include <linux/bio.h> |
11 | #include <linux/config.h> | ||
12 | #include <linux/module.h> | 11 | #include <linux/module.h> |
13 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
14 | #include <linux/init.h> | 13 | #include <linux/init.h> |
@@ -30,8 +29,7 @@ static const int deadline_hash_shift = 5; | |||
30 | #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) | 29 | #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) |
31 | #define DL_HASH_ENTRIES (1 << deadline_hash_shift) | 30 | #define DL_HASH_ENTRIES (1 << deadline_hash_shift) |
32 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | 31 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
33 | #define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash) | 32 | #define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash)) |
34 | #define ON_HASH(drq) (drq)->on_hash | ||
35 | 33 | ||
36 | struct deadline_data { | 34 | struct deadline_data { |
37 | /* | 35 | /* |
@@ -48,7 +46,7 @@ struct deadline_data { | |||
48 | * next in sort order. read, write or both are NULL | 46 | * next in sort order. read, write or both are NULL |
49 | */ | 47 | */ |
50 | struct deadline_rq *next_drq[2]; | 48 | struct deadline_rq *next_drq[2]; |
51 | struct list_head *hash; /* request hash */ | 49 | struct hlist_head *hash; /* request hash */ |
52 | unsigned int batching; /* number of sequential requests made */ | 50 | unsigned int batching; /* number of sequential requests made */ |
53 | sector_t last_sector; /* head position */ | 51 | sector_t last_sector; /* head position */ |
54 | unsigned int starved; /* times reads have starved writes */ | 52 | unsigned int starved; /* times reads have starved writes */ |
@@ -79,8 +77,7 @@ struct deadline_rq { | |||
79 | /* | 77 | /* |
80 | * request hash, key is the ending offset (for back merge lookup) | 78 | * request hash, key is the ending offset (for back merge lookup) |
81 | */ | 79 | */ |
82 | struct list_head hash; | 80 | struct hlist_node hash; |
83 | char on_hash; | ||
84 | 81 | ||
85 | /* | 82 | /* |
86 | * expire fifo | 83 | * expire fifo |
@@ -100,8 +97,7 @@ static kmem_cache_t *drq_pool; | |||
100 | */ | 97 | */ |
101 | static inline void __deadline_del_drq_hash(struct deadline_rq *drq) | 98 | static inline void __deadline_del_drq_hash(struct deadline_rq *drq) |
102 | { | 99 | { |
103 | drq->on_hash = 0; | 100 | hlist_del_init(&drq->hash); |
104 | list_del_init(&drq->hash); | ||
105 | } | 101 | } |
106 | 102 | ||
107 | static inline void deadline_del_drq_hash(struct deadline_rq *drq) | 103 | static inline void deadline_del_drq_hash(struct deadline_rq *drq) |
@@ -117,8 +113,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | |||
117 | 113 | ||
118 | BUG_ON(ON_HASH(drq)); | 114 | BUG_ON(ON_HASH(drq)); |
119 | 115 | ||
120 | drq->on_hash = 1; | 116 | hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); |
121 | list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); | ||
122 | } | 117 | } |
123 | 118 | ||
124 | /* | 119 | /* |
@@ -128,26 +123,24 @@ static inline void | |||
128 | deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | 123 | deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) |
129 | { | 124 | { |
130 | struct request *rq = drq->request; | 125 | struct request *rq = drq->request; |
131 | struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; | 126 | struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; |
132 | 127 | ||
133 | if (ON_HASH(drq) && drq->hash.prev != head) { | 128 | if (ON_HASH(drq) && &drq->hash != head->first) { |
134 | list_del(&drq->hash); | 129 | hlist_del(&drq->hash); |
135 | list_add(&drq->hash, head); | 130 | hlist_add_head(&drq->hash, head); |
136 | } | 131 | } |
137 | } | 132 | } |
138 | 133 | ||
139 | static struct request * | 134 | static struct request * |
140 | deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) | 135 | deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) |
141 | { | 136 | { |
142 | struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; | 137 | struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; |
143 | struct list_head *entry, *next = hash_list->next; | 138 | struct hlist_node *entry, *next; |
139 | struct deadline_rq *drq; | ||
144 | 140 | ||
145 | while ((entry = next) != hash_list) { | 141 | hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) { |
146 | struct deadline_rq *drq = list_entry_hash(entry); | ||
147 | struct request *__rq = drq->request; | 142 | struct request *__rq = drq->request; |
148 | 143 | ||
149 | next = entry->next; | ||
150 | |||
151 | BUG_ON(!ON_HASH(drq)); | 144 | BUG_ON(!ON_HASH(drq)); |
152 | 145 | ||
153 | if (!rq_mergeable(__rq)) { | 146 | if (!rq_mergeable(__rq)) { |
@@ -165,10 +158,6 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) | |||
165 | /* | 158 | /* |
166 | * rb tree support functions | 159 | * rb tree support functions |
167 | */ | 160 | */ |
168 | #define RB_NONE (2) | ||
169 | #define RB_EMPTY(root) ((root)->rb_node == NULL) | ||
170 | #define ON_RB(node) ((node)->rb_color != RB_NONE) | ||
171 | #define RB_CLEAR(node) ((node)->rb_color = RB_NONE) | ||
172 | #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) | 161 | #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node) |
173 | #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) | 162 | #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)]) |
174 | #define rq_rb_key(rq) (rq)->sector | 163 | #define rq_rb_key(rq) (rq)->sector |
@@ -227,9 +216,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | |||
227 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | 216 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); |
228 | } | 217 | } |
229 | 218 | ||
230 | BUG_ON(!ON_RB(&drq->rb_node)); | 219 | BUG_ON(!RB_EMPTY_NODE(&drq->rb_node)); |
231 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); | 220 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); |
232 | RB_CLEAR(&drq->rb_node); | 221 | RB_CLEAR_NODE(&drq->rb_node); |
233 | } | 222 | } |
234 | 223 | ||
235 | static struct request * | 224 | static struct request * |
@@ -503,7 +492,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force) | |||
503 | */ | 492 | */ |
504 | 493 | ||
505 | if (reads) { | 494 | if (reads) { |
506 | BUG_ON(RB_EMPTY(&dd->sort_list[READ])); | 495 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); |
507 | 496 | ||
508 | if (writes && (dd->starved++ >= dd->writes_starved)) | 497 | if (writes && (dd->starved++ >= dd->writes_starved)) |
509 | goto dispatch_writes; | 498 | goto dispatch_writes; |
@@ -519,7 +508,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force) | |||
519 | 508 | ||
520 | if (writes) { | 509 | if (writes) { |
521 | dispatch_writes: | 510 | dispatch_writes: |
522 | BUG_ON(RB_EMPTY(&dd->sort_list[WRITE])); | 511 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); |
523 | 512 | ||
524 | dd->starved = 0; | 513 | dd->starved = 0; |
525 | 514 | ||
@@ -626,7 +615,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
626 | return NULL; | 615 | return NULL; |
627 | memset(dd, 0, sizeof(*dd)); | 616 | memset(dd, 0, sizeof(*dd)); |
628 | 617 | ||
629 | dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, | 618 | dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES, |
630 | GFP_KERNEL, q->node); | 619 | GFP_KERNEL, q->node); |
631 | if (!dd->hash) { | 620 | if (!dd->hash) { |
632 | kfree(dd); | 621 | kfree(dd); |
@@ -642,7 +631,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
642 | } | 631 | } |
643 | 632 | ||
644 | for (i = 0; i < DL_HASH_ENTRIES; i++) | 633 | for (i = 0; i < DL_HASH_ENTRIES; i++) |
645 | INIT_LIST_HEAD(&dd->hash[i]); | 634 | INIT_HLIST_HEAD(&dd->hash[i]); |
646 | 635 | ||
647 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | 636 | INIT_LIST_HEAD(&dd->fifo_list[READ]); |
648 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | 637 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
@@ -675,11 +664,10 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
675 | drq = mempool_alloc(dd->drq_pool, gfp_mask); | 664 | drq = mempool_alloc(dd->drq_pool, gfp_mask); |
676 | if (drq) { | 665 | if (drq) { |
677 | memset(drq, 0, sizeof(*drq)); | 666 | memset(drq, 0, sizeof(*drq)); |
678 | RB_CLEAR(&drq->rb_node); | 667 | RB_CLEAR_NODE(&drq->rb_node); |
679 | drq->request = rq; | 668 | drq->request = rq; |
680 | 669 | ||
681 | INIT_LIST_HEAD(&drq->hash); | 670 | INIT_HLIST_NODE(&drq->hash); |
682 | drq->on_hash = 0; | ||
683 | 671 | ||
684 | INIT_LIST_HEAD(&drq->fifo); | 672 | INIT_LIST_HEAD(&drq->fifo); |
685 | 673 | ||
diff --git a/block/elevator.c b/block/elevator.c index a0afdd317cef..bc7baeec0d10 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/blkdev.h> | 27 | #include <linux/blkdev.h> |
28 | #include <linux/elevator.h> | 28 | #include <linux/elevator.h> |
29 | #include <linux/bio.h> | 29 | #include <linux/bio.h> |
30 | #include <linux/config.h> | ||
31 | #include <linux/module.h> | 30 | #include <linux/module.h> |
32 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
@@ -850,12 +849,9 @@ fail_register: | |||
850 | * one again (along with re-adding the sysfs dir) | 849 | * one again (along with re-adding the sysfs dir) |
851 | */ | 850 | */ |
852 | elevator_exit(e); | 851 | elevator_exit(e); |
853 | e = NULL; | ||
854 | q->elevator = old_elevator; | 852 | q->elevator = old_elevator; |
855 | elv_register_queue(q); | 853 | elv_register_queue(q); |
856 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 854 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
857 | if (e) | ||
858 | kobject_put(&e->kobj); | ||
859 | return 0; | 855 | return 0; |
860 | } | 856 | } |
861 | 857 | ||
diff --git a/block/genhd.c b/block/genhd.c index 5a8d3bf02f17..25d1f42568cc 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -2,7 +2,6 @@ | |||
2 | * gendisk handling | 2 | * gendisk handling |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/config.h> | ||
6 | #include <linux/module.h> | 5 | #include <linux/module.h> |
7 | #include <linux/fs.h> | 6 | #include <linux/fs.h> |
8 | #include <linux/genhd.h> | 7 | #include <linux/genhd.h> |
@@ -17,8 +16,7 @@ | |||
17 | #include <linux/buffer_head.h> | 16 | #include <linux/buffer_head.h> |
18 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
19 | 18 | ||
20 | static struct subsystem block_subsys; | 19 | struct subsystem block_subsys; |
21 | |||
22 | static DEFINE_MUTEX(block_subsys_lock); | 20 | static DEFINE_MUTEX(block_subsys_lock); |
23 | 21 | ||
24 | /* | 22 | /* |
@@ -511,9 +509,7 @@ static struct kset_uevent_ops block_uevent_ops = { | |||
511 | .uevent = block_uevent, | 509 | .uevent = block_uevent, |
512 | }; | 510 | }; |
513 | 511 | ||
514 | /* declare block_subsys. */ | 512 | decl_subsys(block, &ktype_block, &block_uevent_ops); |
515 | static decl_subsys(block, &ktype_block, &block_uevent_ops); | ||
516 | |||
517 | 513 | ||
518 | /* | 514 | /* |
519 | * aggregate disk stat collector. Uses the same stats that the sysfs | 515 | * aggregate disk stat collector. Uses the same stats that the sysfs |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 7eb36c53f4b7..5813d63c20af 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -10,7 +10,6 @@ | |||
10 | /* | 10 | /* |
11 | * This handles all read/write requests to block devices | 11 | * This handles all read/write requests to block devices |
12 | */ | 12 | */ |
13 | #include <linux/config.h> | ||
14 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
16 | #include <linux/backing-dev.h> | 15 | #include <linux/backing-dev.h> |
@@ -638,7 +637,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) | |||
638 | /* Assume anything <= 4GB can be handled by IOMMU. | 637 | /* Assume anything <= 4GB can be handled by IOMMU. |
639 | Actually some IOMMUs can handle everything, but I don't | 638 | Actually some IOMMUs can handle everything, but I don't |
640 | know of a way to test this here. */ | 639 | know of a way to test this here. */ |
641 | if (bounce_pfn < (0xffffffff>>PAGE_SHIFT)) | 640 | if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
642 | dma = 1; | 641 | dma = 1; |
643 | q->bounce_pfn = max_low_pfn; | 642 | q->bounce_pfn = max_low_pfn; |
644 | #else | 643 | #else |
@@ -1663,6 +1662,8 @@ static void blk_unplug_timeout(unsigned long data) | |||
1663 | **/ | 1662 | **/ |
1664 | void blk_start_queue(request_queue_t *q) | 1663 | void blk_start_queue(request_queue_t *q) |
1665 | { | 1664 | { |
1665 | WARN_ON(!irqs_disabled()); | ||
1666 | |||
1666 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 1667 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); |
1667 | 1668 | ||
1668 | /* | 1669 | /* |
@@ -1878,7 +1879,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node); | |||
1878 | * get dealt with eventually. | 1879 | * get dealt with eventually. |
1879 | * | 1880 | * |
1880 | * The queue spin lock must be held while manipulating the requests on the | 1881 | * The queue spin lock must be held while manipulating the requests on the |
1881 | * request queue. | 1882 | * request queue; this lock will be taken also from interrupt context, so irq |
1883 | * disabling is needed for it. | ||
1882 | * | 1884 | * |
1883 | * Function returns a pointer to the initialized request queue, or NULL if | 1885 | * Function returns a pointer to the initialized request queue, or NULL if |
1884 | * it didn't succeed. | 1886 | * it didn't succeed. |
@@ -2742,7 +2744,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, | |||
2742 | return 0; | 2744 | return 0; |
2743 | 2745 | ||
2744 | /* | 2746 | /* |
2745 | * not contigious | 2747 | * not contiguous |
2746 | */ | 2748 | */ |
2747 | if (req->sector + req->nr_sectors != next->sector) | 2749 | if (req->sector + req->nr_sectors != next->sector) |
2748 | return 0; | 2750 | return 0; |
@@ -2824,6 +2826,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) | |||
2824 | if (unlikely(bio_barrier(bio))) | 2826 | if (unlikely(bio_barrier(bio))) |
2825 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | 2827 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); |
2826 | 2828 | ||
2829 | if (bio_sync(bio)) | ||
2830 | req->flags |= REQ_RW_SYNC; | ||
2831 | |||
2827 | req->errors = 0; | 2832 | req->errors = 0; |
2828 | req->hard_sector = req->sector = bio->bi_sector; | 2833 | req->hard_sector = req->sector = bio->bi_sector; |
2829 | req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); | 2834 | req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); |
@@ -3111,9 +3116,9 @@ void submit_bio(int rw, struct bio *bio) | |||
3111 | BIO_BUG_ON(!bio->bi_io_vec); | 3116 | BIO_BUG_ON(!bio->bi_io_vec); |
3112 | bio->bi_rw |= rw; | 3117 | bio->bi_rw |= rw; |
3113 | if (rw & WRITE) | 3118 | if (rw & WRITE) |
3114 | mod_page_state(pgpgout, count); | 3119 | count_vm_events(PGPGOUT, count); |
3115 | else | 3120 | else |
3116 | mod_page_state(pgpgin, count); | 3121 | count_vm_events(PGPGIN, count); |
3117 | 3122 | ||
3118 | if (unlikely(block_dump)) { | 3123 | if (unlikely(block_dump)) { |
3119 | char b[BDEVNAME_SIZE]; | 3124 | char b[BDEVNAME_SIZE]; |
@@ -3359,12 +3364,11 @@ EXPORT_SYMBOL(end_that_request_chunk); | |||
3359 | */ | 3364 | */ |
3360 | static void blk_done_softirq(struct softirq_action *h) | 3365 | static void blk_done_softirq(struct softirq_action *h) |
3361 | { | 3366 | { |
3362 | struct list_head *cpu_list; | 3367 | struct list_head *cpu_list, local_list; |
3363 | LIST_HEAD(local_list); | ||
3364 | 3368 | ||
3365 | local_irq_disable(); | 3369 | local_irq_disable(); |
3366 | cpu_list = &__get_cpu_var(blk_cpu_done); | 3370 | cpu_list = &__get_cpu_var(blk_cpu_done); |
3367 | list_splice_init(cpu_list, &local_list); | 3371 | list_replace_init(cpu_list, &local_list); |
3368 | local_irq_enable(); | 3372 | local_irq_enable(); |
3369 | 3373 | ||
3370 | while (!list_empty(&local_list)) { | 3374 | while (!list_empty(&local_list)) { |
@@ -3398,7 +3402,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, | |||
3398 | } | 3402 | } |
3399 | 3403 | ||
3400 | 3404 | ||
3401 | static struct notifier_block blk_cpu_notifier = { | 3405 | static struct notifier_block __devinitdata blk_cpu_notifier = { |
3402 | .notifier_call = blk_cpu_notify, | 3406 | .notifier_call = blk_cpu_notify, |
3403 | }; | 3407 | }; |
3404 | 3408 | ||
@@ -3410,7 +3414,7 @@ static struct notifier_block blk_cpu_notifier = { | |||
3410 | * | 3414 | * |
3411 | * Description: | 3415 | * Description: |
3412 | * Ends all I/O on a request. It does not handle partial completions, | 3416 | * Ends all I/O on a request. It does not handle partial completions, |
3413 | * unless the driver actually implements this in its completionc callback | 3417 | * unless the driver actually implements this in its completion callback |
3414 | * through requeueing. Theh actual completion happens out-of-order, | 3418 | * through requeueing. Theh actual completion happens out-of-order, |
3415 | * through a softirq handler. The user must have registered a completion | 3419 | * through a softirq handler. The user must have registered a completion |
3416 | * callback through blk_queue_softirq_done(). | 3420 | * callback through blk_queue_softirq_done(). |
@@ -3536,9 +3540,7 @@ int __init blk_dev_init(void) | |||
3536 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | 3540 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); |
3537 | 3541 | ||
3538 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); | 3542 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); |
3539 | #ifdef CONFIG_HOTPLUG_CPU | 3543 | register_hotcpu_notifier(&blk_cpu_notifier); |
3540 | register_cpu_notifier(&blk_cpu_notifier); | ||
3541 | #endif | ||
3542 | 3544 | ||
3543 | blk_max_low_pfn = max_low_pfn; | 3545 | blk_max_low_pfn = max_low_pfn; |
3544 | blk_max_pfn = max_pfn; | 3546 | blk_max_pfn = max_pfn; |