diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-02-13 01:43:25 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-02-13 01:43:25 -0500 |
commit | d9bc125caf592b7d081021f32ce5b717efdf70c8 (patch) | |
tree | 263b7066ba22ddce21db610c0300f6eaac6f2064 /block | |
parent | 43d78ef2ba5bec26d0315859e8324bfc0be23766 (diff) | |
parent | ec2f9d1331f658433411c58077871e1eef4ee1b4 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Conflicts:
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_spkm3_token.c
net/sunrpc/clnt.c
Merge with mainline and fix conflicts.
Diffstat (limited to 'block')
-rw-r--r-- | block/blktrace.c | 5 | ||||
-rw-r--r-- | block/cfq-iosched.c | 297 | ||||
-rw-r--r-- | block/elevator.c | 31 | ||||
-rw-r--r-- | block/genhd.c | 8 | ||||
-rw-r--r-- | block/ioctl.c | 2 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 2 |
6 files changed, 190 insertions, 155 deletions
diff --git a/block/blktrace.c b/block/blktrace.c index d3679dd1d2..3f0e7c37c0 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -264,7 +264,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, | |||
264 | return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); | 264 | return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); |
265 | } | 265 | } |
266 | 266 | ||
267 | static struct file_operations blk_dropped_fops = { | 267 | static const struct file_operations blk_dropped_fops = { |
268 | .owner = THIS_MODULE, | 268 | .owner = THIS_MODULE, |
269 | .open = blk_dropped_open, | 269 | .open = blk_dropped_open, |
270 | .read = blk_dropped_read, | 270 | .read = blk_dropped_read, |
@@ -363,10 +363,9 @@ static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, | |||
363 | if (!bt->dropped_file) | 363 | if (!bt->dropped_file) |
364 | goto err; | 364 | goto err; |
365 | 365 | ||
366 | bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks); | 366 | bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks, bt); |
367 | if (!bt->rchan) | 367 | if (!bt->rchan) |
368 | goto err; | 368 | goto err; |
369 | bt->rchan->private_data = bt; | ||
370 | 369 | ||
371 | bt->act_mask = buts.act_mask; | 370 | bt->act_mask = buts.act_mask; |
372 | if (!bt->act_mask) | 371 | if (!bt->act_mask) |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 07b7062437..b6491c020f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -146,9 +146,9 @@ struct cfq_queue { | |||
146 | /* fifo list of requests in sort_list */ | 146 | /* fifo list of requests in sort_list */ |
147 | struct list_head fifo; | 147 | struct list_head fifo; |
148 | 148 | ||
149 | unsigned long slice_start; | ||
150 | unsigned long slice_end; | 149 | unsigned long slice_end; |
151 | unsigned long slice_left; | 150 | unsigned long service_last; |
151 | long slice_resid; | ||
152 | 152 | ||
153 | /* number of requests that are on the dispatch list */ | 153 | /* number of requests that are on the dispatch list */ |
154 | int on_dispatch[2]; | 154 | int on_dispatch[2]; |
@@ -162,15 +162,16 @@ struct cfq_queue { | |||
162 | }; | 162 | }; |
163 | 163 | ||
164 | enum cfqq_state_flags { | 164 | enum cfqq_state_flags { |
165 | CFQ_CFQQ_FLAG_on_rr = 0, | 165 | CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ |
166 | CFQ_CFQQ_FLAG_wait_request, | 166 | CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ |
167 | CFQ_CFQQ_FLAG_must_alloc, | 167 | CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ |
168 | CFQ_CFQQ_FLAG_must_alloc_slice, | 168 | CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ |
169 | CFQ_CFQQ_FLAG_must_dispatch, | 169 | CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ |
170 | CFQ_CFQQ_FLAG_fifo_expire, | 170 | CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ |
171 | CFQ_CFQQ_FLAG_idle_window, | 171 | CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ |
172 | CFQ_CFQQ_FLAG_prio_changed, | 172 | CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ |
173 | CFQ_CFQQ_FLAG_queue_new, | 173 | CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ |
174 | CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ | ||
174 | }; | 175 | }; |
175 | 176 | ||
176 | #define CFQ_CFQQ_FNS(name) \ | 177 | #define CFQ_CFQQ_FNS(name) \ |
@@ -196,6 +197,7 @@ CFQ_CFQQ_FNS(fifo_expire); | |||
196 | CFQ_CFQQ_FNS(idle_window); | 197 | CFQ_CFQQ_FNS(idle_window); |
197 | CFQ_CFQQ_FNS(prio_changed); | 198 | CFQ_CFQQ_FNS(prio_changed); |
198 | CFQ_CFQQ_FNS(queue_new); | 199 | CFQ_CFQQ_FNS(queue_new); |
200 | CFQ_CFQQ_FNS(slice_new); | ||
199 | #undef CFQ_CFQQ_FNS | 201 | #undef CFQ_CFQQ_FNS |
200 | 202 | ||
201 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | 203 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); |
@@ -231,6 +233,50 @@ static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync) | |||
231 | } | 233 | } |
232 | 234 | ||
233 | /* | 235 | /* |
236 | * Scale schedule slice based on io priority. Use the sync time slice only | ||
237 | * if a queue is marked sync and has sync io queued. A sync queue with async | ||
238 | * io only, should not get full sync slice length. | ||
239 | */ | ||
240 | static inline int | ||
241 | cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
242 | { | ||
243 | const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; | ||
244 | |||
245 | WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); | ||
246 | |||
247 | return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); | ||
248 | } | ||
249 | |||
250 | static inline void | ||
251 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
252 | { | ||
253 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; | ||
254 | cfqq->slice_end += cfqq->slice_resid; | ||
255 | |||
256 | /* | ||
257 | * Don't carry over residual for more than one slice, we only want | ||
258 | * to slightly correct the fairness. Carrying over forever would | ||
259 | * easily introduce oscillations. | ||
260 | */ | ||
261 | cfqq->slice_resid = 0; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end | ||
266 | * isn't valid until the first request from the dispatch is activated | ||
267 | * and the slice time set. | ||
268 | */ | ||
269 | static inline int cfq_slice_used(struct cfq_queue *cfqq) | ||
270 | { | ||
271 | if (cfq_cfqq_slice_new(cfqq)) | ||
272 | return 0; | ||
273 | if (time_before(jiffies, cfqq->slice_end)) | ||
274 | return 0; | ||
275 | |||
276 | return 1; | ||
277 | } | ||
278 | |||
279 | /* | ||
234 | * Lifted from AS - choose which of rq1 and rq2 that is best served now. | 280 | * Lifted from AS - choose which of rq1 and rq2 that is best served now. |
235 | * We choose the request that is closest to the head right now. Distance | 281 | * We choose the request that is closest to the head right now. Distance |
236 | * behind the head is penalized and only allowed to a certain extent. | 282 | * behind the head is penalized and only allowed to a certain extent. |
@@ -355,9 +401,14 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
355 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | 401 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) |
356 | { | 402 | { |
357 | struct cfq_data *cfqd = cfqq->cfqd; | 403 | struct cfq_data *cfqd = cfqq->cfqd; |
358 | struct list_head *list; | 404 | struct list_head *list, *n; |
405 | struct cfq_queue *__cfqq; | ||
359 | 406 | ||
360 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); | 407 | /* |
408 | * Resorting requires the cfqq to be on the RR list already. | ||
409 | */ | ||
410 | if (!cfq_cfqq_on_rr(cfqq)) | ||
411 | return; | ||
361 | 412 | ||
362 | list_del(&cfqq->cfq_list); | 413 | list_del(&cfqq->cfq_list); |
363 | 414 | ||
@@ -379,15 +430,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | |||
379 | list = &cfqd->rr_list[cfqq->ioprio]; | 430 | list = &cfqd->rr_list[cfqq->ioprio]; |
380 | } | 431 | } |
381 | 432 | ||
382 | /* | ||
383 | * If this queue was preempted or is new (never been serviced), let | ||
384 | * it be added first for fairness but beind other new queues. | ||
385 | * Otherwise, just add to the back of the list. | ||
386 | */ | ||
387 | if (preempted || cfq_cfqq_queue_new(cfqq)) { | 433 | if (preempted || cfq_cfqq_queue_new(cfqq)) { |
388 | struct list_head *n = list; | 434 | /* |
389 | struct cfq_queue *__cfqq; | 435 | * If this queue was preempted or is new (never been serviced), |
390 | 436 | * let it be added first for fairness but beind other new | |
437 | * queues. | ||
438 | */ | ||
439 | n = list; | ||
391 | while (n->next != list) { | 440 | while (n->next != list) { |
392 | __cfqq = list_entry_cfqq(n->next); | 441 | __cfqq = list_entry_cfqq(n->next); |
393 | if (!cfq_cfqq_queue_new(__cfqq)) | 442 | if (!cfq_cfqq_queue_new(__cfqq)) |
@@ -395,11 +444,32 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | |||
395 | 444 | ||
396 | n = n->next; | 445 | n = n->next; |
397 | } | 446 | } |
447 | list_add_tail(&cfqq->cfq_list, n); | ||
448 | } else if (!cfq_cfqq_class_sync(cfqq)) { | ||
449 | /* | ||
450 | * async queue always goes to the end. this wont be overly | ||
451 | * unfair to writes, as the sort of the sync queue wont be | ||
452 | * allowed to pass the async queue again. | ||
453 | */ | ||
454 | list_add_tail(&cfqq->cfq_list, list); | ||
455 | } else { | ||
456 | /* | ||
457 | * sort by last service, but don't cross a new or async | ||
458 | * queue. we don't cross a new queue because it hasn't been | ||
459 | * service before, and we don't cross an async queue because | ||
460 | * it gets added to the end on expire. | ||
461 | */ | ||
462 | n = list; | ||
463 | while ((n = n->prev) != list) { | ||
464 | struct cfq_queue *__cfqq = list_entry_cfqq(n); | ||
398 | 465 | ||
399 | list = n; | 466 | if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last) |
467 | break; | ||
468 | if (time_before(__cfqq->service_last, cfqq->service_last)) | ||
469 | break; | ||
470 | } | ||
471 | list_add(&cfqq->cfq_list, n); | ||
400 | } | 472 | } |
401 | |||
402 | list_add_tail(&cfqq->cfq_list, list); | ||
403 | } | 473 | } |
404 | 474 | ||
405 | /* | 475 | /* |
@@ -604,11 +674,10 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
604 | */ | 674 | */ |
605 | del_timer(&cfqd->idle_class_timer); | 675 | del_timer(&cfqd->idle_class_timer); |
606 | 676 | ||
607 | cfqq->slice_start = jiffies; | ||
608 | cfqq->slice_end = 0; | 677 | cfqq->slice_end = 0; |
609 | cfqq->slice_left = 0; | ||
610 | cfq_clear_cfqq_must_alloc_slice(cfqq); | 678 | cfq_clear_cfqq_must_alloc_slice(cfqq); |
611 | cfq_clear_cfqq_fifo_expire(cfqq); | 679 | cfq_clear_cfqq_fifo_expire(cfqq); |
680 | cfq_mark_cfqq_slice_new(cfqq); | ||
612 | } | 681 | } |
613 | 682 | ||
614 | cfqd->active_queue = cfqq; | 683 | cfqd->active_queue = cfqq; |
@@ -619,16 +688,11 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
619 | */ | 688 | */ |
620 | static void | 689 | static void |
621 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 690 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
622 | int preempted) | 691 | int preempted, int timed_out) |
623 | { | 692 | { |
624 | unsigned long now = jiffies; | ||
625 | |||
626 | if (cfq_cfqq_wait_request(cfqq)) | 693 | if (cfq_cfqq_wait_request(cfqq)) |
627 | del_timer(&cfqd->idle_slice_timer); | 694 | del_timer(&cfqd->idle_slice_timer); |
628 | 695 | ||
629 | if (!preempted && !cfq_cfqq_dispatched(cfqq)) | ||
630 | cfq_schedule_dispatch(cfqd); | ||
631 | |||
632 | cfq_clear_cfqq_must_dispatch(cfqq); | 696 | cfq_clear_cfqq_must_dispatch(cfqq); |
633 | cfq_clear_cfqq_wait_request(cfqq); | 697 | cfq_clear_cfqq_wait_request(cfqq); |
634 | cfq_clear_cfqq_queue_new(cfqq); | 698 | cfq_clear_cfqq_queue_new(cfqq); |
@@ -637,13 +701,10 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
637 | * store what was left of this slice, if the queue idled out | 701 | * store what was left of this slice, if the queue idled out |
638 | * or was preempted | 702 | * or was preempted |
639 | */ | 703 | */ |
640 | if (time_after(cfqq->slice_end, now)) | 704 | if (timed_out && !cfq_cfqq_slice_new(cfqq)) |
641 | cfqq->slice_left = cfqq->slice_end - now; | 705 | cfqq->slice_resid = cfqq->slice_end - jiffies; |
642 | else | ||
643 | cfqq->slice_left = 0; | ||
644 | 706 | ||
645 | if (cfq_cfqq_on_rr(cfqq)) | 707 | cfq_resort_rr_list(cfqq, preempted); |
646 | cfq_resort_rr_list(cfqq, preempted); | ||
647 | 708 | ||
648 | if (cfqq == cfqd->active_queue) | 709 | if (cfqq == cfqd->active_queue) |
649 | cfqd->active_queue = NULL; | 710 | cfqd->active_queue = NULL; |
@@ -656,12 +717,13 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
656 | cfqd->dispatch_slice = 0; | 717 | cfqd->dispatch_slice = 0; |
657 | } | 718 | } |
658 | 719 | ||
659 | static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) | 720 | static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted, |
721 | int timed_out) | ||
660 | { | 722 | { |
661 | struct cfq_queue *cfqq = cfqd->active_queue; | 723 | struct cfq_queue *cfqq = cfqd->active_queue; |
662 | 724 | ||
663 | if (cfqq) | 725 | if (cfqq) |
664 | __cfq_slice_expired(cfqd, cfqq, preempted); | 726 | __cfq_slice_expired(cfqd, cfqq, preempted, timed_out); |
665 | } | 727 | } |
666 | 728 | ||
667 | /* | 729 | /* |
@@ -758,14 +820,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | |||
758 | 820 | ||
759 | #define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024)) | 821 | #define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024)) |
760 | 822 | ||
761 | static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 823 | static int cfq_arm_slice_timer(struct cfq_data *cfqd) |
762 | |||
763 | { | 824 | { |
825 | struct cfq_queue *cfqq = cfqd->active_queue; | ||
764 | struct cfq_io_context *cic; | 826 | struct cfq_io_context *cic; |
765 | unsigned long sl; | 827 | unsigned long sl; |
766 | 828 | ||
767 | WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); | 829 | WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); |
768 | WARN_ON(cfqq != cfqd->active_queue); | ||
769 | 830 | ||
770 | /* | 831 | /* |
771 | * idle is disabled, either manually or by past process history | 832 | * idle is disabled, either manually or by past process history |
@@ -822,41 +883,21 @@ static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) | |||
822 | 883 | ||
823 | if (cfq_cfqq_fifo_expire(cfqq)) | 884 | if (cfq_cfqq_fifo_expire(cfqq)) |
824 | return NULL; | 885 | return NULL; |
886 | |||
887 | cfq_mark_cfqq_fifo_expire(cfqq); | ||
888 | |||
825 | if (list_empty(&cfqq->fifo)) | 889 | if (list_empty(&cfqq->fifo)) |
826 | return NULL; | 890 | return NULL; |
827 | 891 | ||
828 | fifo = cfq_cfqq_class_sync(cfqq); | 892 | fifo = cfq_cfqq_class_sync(cfqq); |
829 | rq = rq_entry_fifo(cfqq->fifo.next); | 893 | rq = rq_entry_fifo(cfqq->fifo.next); |
830 | 894 | ||
831 | if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { | 895 | if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) |
832 | cfq_mark_cfqq_fifo_expire(cfqq); | ||
833 | return rq; | 896 | return rq; |
834 | } | ||
835 | 897 | ||
836 | return NULL; | 898 | return NULL; |
837 | } | 899 | } |
838 | 900 | ||
839 | /* | ||
840 | * Scale schedule slice based on io priority. Use the sync time slice only | ||
841 | * if a queue is marked sync and has sync io queued. A sync queue with async | ||
842 | * io only, should not get full sync slice length. | ||
843 | */ | ||
844 | static inline int | ||
845 | cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
846 | { | ||
847 | const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; | ||
848 | |||
849 | WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); | ||
850 | |||
851 | return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); | ||
852 | } | ||
853 | |||
854 | static inline void | ||
855 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
856 | { | ||
857 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; | ||
858 | } | ||
859 | |||
860 | static inline int | 901 | static inline int |
861 | cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 902 | cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
862 | { | 903 | { |
@@ -872,7 +913,6 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
872 | */ | 913 | */ |
873 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | 914 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) |
874 | { | 915 | { |
875 | unsigned long now = jiffies; | ||
876 | struct cfq_queue *cfqq; | 916 | struct cfq_queue *cfqq; |
877 | 917 | ||
878 | cfqq = cfqd->active_queue; | 918 | cfqq = cfqd->active_queue; |
@@ -882,7 +922,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
882 | /* | 922 | /* |
883 | * slice has expired | 923 | * slice has expired |
884 | */ | 924 | */ |
885 | if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end)) | 925 | if (!cfq_cfqq_must_dispatch(cfqq) && cfq_slice_used(cfqq)) |
886 | goto expire; | 926 | goto expire; |
887 | 927 | ||
888 | /* | 928 | /* |
@@ -891,16 +931,16 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
891 | */ | 931 | */ |
892 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) | 932 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
893 | goto keep_queue; | 933 | goto keep_queue; |
894 | else if (cfq_cfqq_dispatched(cfqq)) { | 934 | else if (cfq_cfqq_slice_new(cfqq) || cfq_cfqq_dispatched(cfqq)) { |
895 | cfqq = NULL; | 935 | cfqq = NULL; |
896 | goto keep_queue; | 936 | goto keep_queue; |
897 | } else if (cfq_cfqq_class_sync(cfqq)) { | 937 | } else if (cfq_cfqq_class_sync(cfqq)) { |
898 | if (cfq_arm_slice_timer(cfqd, cfqq)) | 938 | if (cfq_arm_slice_timer(cfqd)) |
899 | return NULL; | 939 | return NULL; |
900 | } | 940 | } |
901 | 941 | ||
902 | expire: | 942 | expire: |
903 | cfq_slice_expired(cfqd, 0); | 943 | cfq_slice_expired(cfqd, 0, 0); |
904 | new_queue: | 944 | new_queue: |
905 | cfqq = cfq_set_active_queue(cfqd); | 945 | cfqq = cfq_set_active_queue(cfqd); |
906 | keep_queue: | 946 | keep_queue: |
@@ -943,20 +983,15 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
943 | } while (dispatched < max_dispatch); | 983 | } while (dispatched < max_dispatch); |
944 | 984 | ||
945 | /* | 985 | /* |
946 | * if slice end isn't set yet, set it. | ||
947 | */ | ||
948 | if (!cfqq->slice_end) | ||
949 | cfq_set_prio_slice(cfqd, cfqq); | ||
950 | |||
951 | /* | ||
952 | * expire an async queue immediately if it has used up its slice. idle | 986 | * expire an async queue immediately if it has used up its slice. idle |
953 | * queue always expire after 1 dispatch round. | 987 | * queue always expire after 1 dispatch round. |
954 | */ | 988 | */ |
955 | if ((!cfq_cfqq_sync(cfqq) && | 989 | if ((!cfq_cfqq_sync(cfqq) && |
956 | cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || | 990 | cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || |
957 | cfq_class_idle(cfqq) || | 991 | cfq_class_idle(cfqq)) { |
958 | !cfq_cfqq_idle_window(cfqq)) | 992 | cfqq->slice_end = jiffies + 1; |
959 | cfq_slice_expired(cfqd, 0); | 993 | cfq_slice_expired(cfqd, 0, 0); |
994 | } | ||
960 | 995 | ||
961 | return dispatched; | 996 | return dispatched; |
962 | } | 997 | } |
@@ -991,7 +1026,7 @@ cfq_forced_dispatch(struct cfq_data *cfqd) | |||
991 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); | 1026 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); |
992 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr); | 1027 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr); |
993 | 1028 | ||
994 | cfq_slice_expired(cfqd, 0); | 1029 | cfq_slice_expired(cfqd, 0, 0); |
995 | 1030 | ||
996 | BUG_ON(cfqd->busy_queues); | 1031 | BUG_ON(cfqd->busy_queues); |
997 | 1032 | ||
@@ -1022,6 +1057,14 @@ cfq_dispatch_requests(request_queue_t *q, int force) | |||
1022 | if (prev_cfqq == cfqq) | 1057 | if (prev_cfqq == cfqq) |
1023 | break; | 1058 | break; |
1024 | 1059 | ||
1060 | /* | ||
1061 | * So we have dispatched before in this round, if the | ||
1062 | * next queue has idling enabled (must be sync), don't | ||
1063 | * allow it service until the previous have continued. | ||
1064 | */ | ||
1065 | if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq)) | ||
1066 | break; | ||
1067 | |||
1025 | cfq_clear_cfqq_must_dispatch(cfqq); | 1068 | cfq_clear_cfqq_must_dispatch(cfqq); |
1026 | cfq_clear_cfqq_wait_request(cfqq); | 1069 | cfq_clear_cfqq_wait_request(cfqq); |
1027 | del_timer(&cfqd->idle_slice_timer); | 1070 | del_timer(&cfqd->idle_slice_timer); |
@@ -1031,14 +1074,6 @@ cfq_dispatch_requests(request_queue_t *q, int force) | |||
1031 | max_dispatch = 1; | 1074 | max_dispatch = 1; |
1032 | 1075 | ||
1033 | dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); | 1076 | dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); |
1034 | |||
1035 | /* | ||
1036 | * If the dispatch cfqq has idling enabled and is still | ||
1037 | * the active queue, break out. | ||
1038 | */ | ||
1039 | if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue) | ||
1040 | break; | ||
1041 | |||
1042 | prev_cfqq = cfqq; | 1077 | prev_cfqq = cfqq; |
1043 | } | 1078 | } |
1044 | 1079 | ||
@@ -1064,8 +1099,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1064 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); | 1099 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
1065 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 1100 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
1066 | 1101 | ||
1067 | if (unlikely(cfqd->active_queue == cfqq)) | 1102 | if (unlikely(cfqd->active_queue == cfqq)) { |
1068 | __cfq_slice_expired(cfqd, cfqq, 0); | 1103 | __cfq_slice_expired(cfqd, cfqq, 0, 0); |
1104 | cfq_schedule_dispatch(cfqd); | ||
1105 | } | ||
1069 | 1106 | ||
1070 | /* | 1107 | /* |
1071 | * it's on the empty list and still hashed | 1108 | * it's on the empty list and still hashed |
@@ -1120,8 +1157,10 @@ static void cfq_free_io_context(struct io_context *ioc) | |||
1120 | 1157 | ||
1121 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1158 | static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1122 | { | 1159 | { |
1123 | if (unlikely(cfqq == cfqd->active_queue)) | 1160 | if (unlikely(cfqq == cfqd->active_queue)) { |
1124 | __cfq_slice_expired(cfqd, cfqq, 0); | 1161 | __cfq_slice_expired(cfqd, cfqq, 0, 0); |
1162 | cfq_schedule_dispatch(cfqd); | ||
1163 | } | ||
1125 | 1164 | ||
1126 | cfq_put_queue(cfqq); | 1165 | cfq_put_queue(cfqq); |
1127 | } | 1166 | } |
@@ -1238,9 +1277,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) | |||
1238 | cfqq->org_ioprio = cfqq->ioprio; | 1277 | cfqq->org_ioprio = cfqq->ioprio; |
1239 | cfqq->org_ioprio_class = cfqq->ioprio_class; | 1278 | cfqq->org_ioprio_class = cfqq->ioprio_class; |
1240 | 1279 | ||
1241 | if (cfq_cfqq_on_rr(cfqq)) | 1280 | cfq_resort_rr_list(cfqq, 0); |
1242 | cfq_resort_rr_list(cfqq, 0); | ||
1243 | |||
1244 | cfq_clear_cfqq_prio_changed(cfqq); | 1281 | cfq_clear_cfqq_prio_changed(cfqq); |
1245 | } | 1282 | } |
1246 | 1283 | ||
@@ -1332,10 +1369,7 @@ retry: | |||
1332 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | 1369 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); |
1333 | atomic_set(&cfqq->ref, 0); | 1370 | atomic_set(&cfqq->ref, 0); |
1334 | cfqq->cfqd = cfqd; | 1371 | cfqq->cfqd = cfqd; |
1335 | /* | 1372 | |
1336 | * set ->slice_left to allow preemption for a new process | ||
1337 | */ | ||
1338 | cfqq->slice_left = 2 * cfqd->cfq_slice_idle; | ||
1339 | cfq_mark_cfqq_idle_window(cfqq); | 1373 | cfq_mark_cfqq_idle_window(cfqq); |
1340 | cfq_mark_cfqq_prio_changed(cfqq); | 1374 | cfq_mark_cfqq_prio_changed(cfqq); |
1341 | cfq_mark_cfqq_queue_new(cfqq); | 1375 | cfq_mark_cfqq_queue_new(cfqq); |
@@ -1471,22 +1505,8 @@ err: | |||
1471 | static void | 1505 | static void |
1472 | cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) | 1506 | cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) |
1473 | { | 1507 | { |
1474 | unsigned long elapsed, ttime; | 1508 | unsigned long elapsed = jiffies - cic->last_end_request; |
1475 | 1509 | unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); | |
1476 | /* | ||
1477 | * if this context already has stuff queued, thinktime is from | ||
1478 | * last queue not last end | ||
1479 | */ | ||
1480 | #if 0 | ||
1481 | if (time_after(cic->last_end_request, cic->last_queue)) | ||
1482 | elapsed = jiffies - cic->last_end_request; | ||
1483 | else | ||
1484 | elapsed = jiffies - cic->last_queue; | ||
1485 | #else | ||
1486 | elapsed = jiffies - cic->last_end_request; | ||
1487 | #endif | ||
1488 | |||
1489 | ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); | ||
1490 | 1510 | ||
1491 | cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; | 1511 | cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; |
1492 | cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; | 1512 | cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; |
@@ -1546,7 +1566,6 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1546 | cfq_clear_cfqq_idle_window(cfqq); | 1566 | cfq_clear_cfqq_idle_window(cfqq); |
1547 | } | 1567 | } |
1548 | 1568 | ||
1549 | |||
1550 | /* | 1569 | /* |
1551 | * Check if new_cfqq should preempt the currently active queue. Return 0 for | 1570 | * Check if new_cfqq should preempt the currently active queue. Return 0 for |
1552 | * no or if we aren't sure, a 1 will cause a preempt. | 1571 | * no or if we aren't sure, a 1 will cause a preempt. |
@@ -1568,11 +1587,6 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
1568 | if (!cfq_cfqq_wait_request(new_cfqq)) | 1587 | if (!cfq_cfqq_wait_request(new_cfqq)) |
1569 | return 0; | 1588 | return 0; |
1570 | /* | 1589 | /* |
1571 | * if it doesn't have slice left, forget it | ||
1572 | */ | ||
1573 | if (new_cfqq->slice_left < cfqd->cfq_slice_idle) | ||
1574 | return 0; | ||
1575 | /* | ||
1576 | * if the new request is sync, but the currently running queue is | 1590 | * if the new request is sync, but the currently running queue is |
1577 | * not, let the sync request have priority. | 1591 | * not, let the sync request have priority. |
1578 | */ | 1592 | */ |
@@ -1594,10 +1608,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
1594 | */ | 1608 | */ |
1595 | static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1609 | static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1596 | { | 1610 | { |
1597 | cfq_slice_expired(cfqd, 1); | 1611 | cfq_slice_expired(cfqd, 1, 1); |
1598 | |||
1599 | if (!cfqq->slice_left) | ||
1600 | cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; | ||
1601 | 1612 | ||
1602 | /* | 1613 | /* |
1603 | * Put the new queue at the front of the of the current list, | 1614 | * Put the new queue at the front of the of the current list, |
@@ -1606,7 +1617,8 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1606 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); | 1617 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
1607 | list_move(&cfqq->cfq_list, &cfqd->cur_rr); | 1618 | list_move(&cfqq->cfq_list, &cfqd->cur_rr); |
1608 | 1619 | ||
1609 | cfqq->slice_end = cfqq->slice_left + jiffies; | 1620 | cfqq->slice_end = 0; |
1621 | cfq_mark_cfqq_slice_new(cfqq); | ||
1610 | } | 1622 | } |
1611 | 1623 | ||
1612 | /* | 1624 | /* |
@@ -1639,7 +1651,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1639 | */ | 1651 | */ |
1640 | if (cic == cfqd->active_cic && | 1652 | if (cic == cfqd->active_cic && |
1641 | del_timer(&cfqd->idle_slice_timer)) { | 1653 | del_timer(&cfqd->idle_slice_timer)) { |
1642 | cfq_slice_expired(cfqd, 0); | 1654 | cfq_slice_expired(cfqd, 0, 0); |
1643 | blk_start_queueing(cfqd->queue); | 1655 | blk_start_queueing(cfqd->queue); |
1644 | } | 1656 | } |
1645 | return; | 1657 | return; |
@@ -1649,7 +1661,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1649 | cfq_update_io_seektime(cic, rq); | 1661 | cfq_update_io_seektime(cic, rq); |
1650 | cfq_update_idle_window(cfqd, cfqq, cic); | 1662 | cfq_update_idle_window(cfqd, cfqq, cic); |
1651 | 1663 | ||
1652 | cic->last_queue = jiffies; | ||
1653 | cic->last_request_pos = rq->sector + rq->nr_sectors; | 1664 | cic->last_request_pos = rq->sector + rq->nr_sectors; |
1654 | 1665 | ||
1655 | if (cfqq == cfqd->active_queue) { | 1666 | if (cfqq == cfqd->active_queue) { |
@@ -1702,12 +1713,12 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) | |||
1702 | WARN_ON(!cfqq->on_dispatch[sync]); | 1713 | WARN_ON(!cfqq->on_dispatch[sync]); |
1703 | cfqd->rq_in_driver--; | 1714 | cfqd->rq_in_driver--; |
1704 | cfqq->on_dispatch[sync]--; | 1715 | cfqq->on_dispatch[sync]--; |
1716 | cfqq->service_last = now; | ||
1705 | 1717 | ||
1706 | if (!cfq_class_idle(cfqq)) | 1718 | if (!cfq_class_idle(cfqq)) |
1707 | cfqd->last_end_request = now; | 1719 | cfqd->last_end_request = now; |
1708 | 1720 | ||
1709 | if (!cfq_cfqq_dispatched(cfqq) && cfq_cfqq_on_rr(cfqq)) | 1721 | cfq_resort_rr_list(cfqq, 0); |
1710 | cfq_resort_rr_list(cfqq, 0); | ||
1711 | 1722 | ||
1712 | if (sync) | 1723 | if (sync) |
1713 | RQ_CIC(rq)->last_end_request = now; | 1724 | RQ_CIC(rq)->last_end_request = now; |
@@ -1717,10 +1728,14 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) | |||
1717 | * or if we want to idle in case it has no pending requests. | 1728 | * or if we want to idle in case it has no pending requests. |
1718 | */ | 1729 | */ |
1719 | if (cfqd->active_queue == cfqq) { | 1730 | if (cfqd->active_queue == cfqq) { |
1720 | if (time_after(now, cfqq->slice_end)) | 1731 | if (cfq_cfqq_slice_new(cfqq)) { |
1721 | cfq_slice_expired(cfqd, 0); | 1732 | cfq_set_prio_slice(cfqd, cfqq); |
1733 | cfq_clear_cfqq_slice_new(cfqq); | ||
1734 | } | ||
1735 | if (cfq_slice_used(cfqq)) | ||
1736 | cfq_slice_expired(cfqd, 0, 1); | ||
1722 | else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) { | 1737 | else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) { |
1723 | if (!cfq_arm_slice_timer(cfqd, cfqq)) | 1738 | if (!cfq_arm_slice_timer(cfqd)) |
1724 | cfq_schedule_dispatch(cfqd); | 1739 | cfq_schedule_dispatch(cfqd); |
1725 | } | 1740 | } |
1726 | } | 1741 | } |
@@ -1757,8 +1772,7 @@ static void cfq_prio_boost(struct cfq_queue *cfqq) | |||
1757 | /* | 1772 | /* |
1758 | * refile between round-robin lists if we moved the priority class | 1773 | * refile between round-robin lists if we moved the priority class |
1759 | */ | 1774 | */ |
1760 | if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) && | 1775 | if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio)) |
1761 | cfq_cfqq_on_rr(cfqq)) | ||
1762 | cfq_resort_rr_list(cfqq, 0); | 1776 | cfq_resort_rr_list(cfqq, 0); |
1763 | } | 1777 | } |
1764 | 1778 | ||
@@ -1893,16 +1907,17 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
1893 | struct cfq_data *cfqd = (struct cfq_data *) data; | 1907 | struct cfq_data *cfqd = (struct cfq_data *) data; |
1894 | struct cfq_queue *cfqq; | 1908 | struct cfq_queue *cfqq; |
1895 | unsigned long flags; | 1909 | unsigned long flags; |
1910 | int timed_out = 1; | ||
1896 | 1911 | ||
1897 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | 1912 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
1898 | 1913 | ||
1899 | if ((cfqq = cfqd->active_queue) != NULL) { | 1914 | if ((cfqq = cfqd->active_queue) != NULL) { |
1900 | unsigned long now = jiffies; | 1915 | timed_out = 0; |
1901 | 1916 | ||
1902 | /* | 1917 | /* |
1903 | * expired | 1918 | * expired |
1904 | */ | 1919 | */ |
1905 | if (time_after(now, cfqq->slice_end)) | 1920 | if (cfq_slice_used(cfqq)) |
1906 | goto expire; | 1921 | goto expire; |
1907 | 1922 | ||
1908 | /* | 1923 | /* |
@@ -1921,7 +1936,7 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
1921 | } | 1936 | } |
1922 | } | 1937 | } |
1923 | expire: | 1938 | expire: |
1924 | cfq_slice_expired(cfqd, 0); | 1939 | cfq_slice_expired(cfqd, 0, timed_out); |
1925 | out_kick: | 1940 | out_kick: |
1926 | cfq_schedule_dispatch(cfqd); | 1941 | cfq_schedule_dispatch(cfqd); |
1927 | out_cont: | 1942 | out_cont: |
@@ -1967,7 +1982,7 @@ static void cfq_exit_queue(elevator_t *e) | |||
1967 | spin_lock_irq(q->queue_lock); | 1982 | spin_lock_irq(q->queue_lock); |
1968 | 1983 | ||
1969 | if (cfqd->active_queue) | 1984 | if (cfqd->active_queue) |
1970 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); | 1985 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0, 0); |
1971 | 1986 | ||
1972 | while (!list_empty(&cfqd->cic_list)) { | 1987 | while (!list_empty(&cfqd->cic_list)) { |
1973 | struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, | 1988 | struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, |
diff --git a/block/elevator.c b/block/elevator.c index f6dafa8c7c..25f6ef28e3 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -269,6 +269,22 @@ void elevator_exit(elevator_t *e) | |||
269 | 269 | ||
270 | EXPORT_SYMBOL(elevator_exit); | 270 | EXPORT_SYMBOL(elevator_exit); |
271 | 271 | ||
272 | static void elv_activate_rq(request_queue_t *q, struct request *rq) | ||
273 | { | ||
274 | elevator_t *e = q->elevator; | ||
275 | |||
276 | if (e->ops->elevator_activate_req_fn) | ||
277 | e->ops->elevator_activate_req_fn(q, rq); | ||
278 | } | ||
279 | |||
280 | static void elv_deactivate_rq(request_queue_t *q, struct request *rq) | ||
281 | { | ||
282 | elevator_t *e = q->elevator; | ||
283 | |||
284 | if (e->ops->elevator_deactivate_req_fn) | ||
285 | e->ops->elevator_deactivate_req_fn(q, rq); | ||
286 | } | ||
287 | |||
272 | static inline void __elv_rqhash_del(struct request *rq) | 288 | static inline void __elv_rqhash_del(struct request *rq) |
273 | { | 289 | { |
274 | hlist_del_init(&rq->hash); | 290 | hlist_del_init(&rq->hash); |
@@ -397,6 +413,8 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq) | |||
397 | list_for_each_prev(entry, &q->queue_head) { | 413 | list_for_each_prev(entry, &q->queue_head) { |
398 | struct request *pos = list_entry_rq(entry); | 414 | struct request *pos = list_entry_rq(entry); |
399 | 415 | ||
416 | if (rq_data_dir(rq) != rq_data_dir(pos)) | ||
417 | break; | ||
400 | if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) | 418 | if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) |
401 | break; | 419 | break; |
402 | if (rq->sector >= boundary) { | 420 | if (rq->sector >= boundary) { |
@@ -498,16 +516,14 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, | |||
498 | 516 | ||
499 | void elv_requeue_request(request_queue_t *q, struct request *rq) | 517 | void elv_requeue_request(request_queue_t *q, struct request *rq) |
500 | { | 518 | { |
501 | elevator_t *e = q->elevator; | ||
502 | |||
503 | /* | 519 | /* |
504 | * it already went through dequeue, we need to decrement the | 520 | * it already went through dequeue, we need to decrement the |
505 | * in_flight count again | 521 | * in_flight count again |
506 | */ | 522 | */ |
507 | if (blk_account_rq(rq)) { | 523 | if (blk_account_rq(rq)) { |
508 | q->in_flight--; | 524 | q->in_flight--; |
509 | if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn) | 525 | if (blk_sorted_rq(rq)) |
510 | e->ops->elevator_deactivate_req_fn(q, rq); | 526 | elv_deactivate_rq(q, rq); |
511 | } | 527 | } |
512 | 528 | ||
513 | rq->cmd_flags &= ~REQ_STARTED; | 529 | rq->cmd_flags &= ~REQ_STARTED; |
@@ -700,16 +716,13 @@ struct request *elv_next_request(request_queue_t *q) | |||
700 | 716 | ||
701 | while ((rq = __elv_next_request(q)) != NULL) { | 717 | while ((rq = __elv_next_request(q)) != NULL) { |
702 | if (!(rq->cmd_flags & REQ_STARTED)) { | 718 | if (!(rq->cmd_flags & REQ_STARTED)) { |
703 | elevator_t *e = q->elevator; | ||
704 | |||
705 | /* | 719 | /* |
706 | * This is the first time the device driver | 720 | * This is the first time the device driver |
707 | * sees this request (possibly after | 721 | * sees this request (possibly after |
708 | * requeueing). Notify IO scheduler. | 722 | * requeueing). Notify IO scheduler. |
709 | */ | 723 | */ |
710 | if (blk_sorted_rq(rq) && | 724 | if (blk_sorted_rq(rq)) |
711 | e->ops->elevator_activate_req_fn) | 725 | elv_activate_rq(q, rq); |
712 | e->ops->elevator_activate_req_fn(q, rq); | ||
713 | 726 | ||
714 | /* | 727 | /* |
715 | * just mark as started even if we don't start | 728 | * just mark as started even if we don't start |
diff --git a/block/genhd.c b/block/genhd.c index 457fdac4c1..36bd3e12a6 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -61,6 +61,14 @@ int register_blkdev(unsigned int major, const char *name) | |||
61 | /* temporary */ | 61 | /* temporary */ |
62 | if (major == 0) { | 62 | if (major == 0) { |
63 | for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) { | 63 | for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) { |
64 | /* | ||
65 | * Disallow the LANANA-assigned LOCAL/EXPERIMENTAL | ||
66 | * majors | ||
67 | */ | ||
68 | if ((60 <= index && index <= 63) || | ||
69 | (120 <= index && index <= 127) || | ||
70 | (240 <= index && index <= 254)) | ||
71 | continue; | ||
64 | if (major_names[index] == NULL) | 72 | if (major_names[index] == NULL) |
65 | break; | 73 | break; |
66 | } | 74 | } |
diff --git a/block/ioctl.c b/block/ioctl.c index f6962b6466..e3f5eb9882 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -61,7 +61,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user | |||
61 | } | 61 | } |
62 | } | 62 | } |
63 | /* all seems OK */ | 63 | /* all seems OK */ |
64 | add_partition(disk, part, start, length); | 64 | add_partition(disk, part, start, length, ADDPART_FLAG_NONE); |
65 | mutex_unlock(&bdev->bd_mutex); | 65 | mutex_unlock(&bdev->bd_mutex); |
66 | return 0; | 66 | return 0; |
67 | case BLKPG_DEL_PARTITION: | 67 | case BLKPG_DEL_PARTITION: |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index fb6789725e..38c293b987 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1264,7 +1264,7 @@ new_hw_segment: | |||
1264 | bio->bi_hw_segments = nr_hw_segs; | 1264 | bio->bi_hw_segments = nr_hw_segs; |
1265 | bio->bi_flags |= (1 << BIO_SEG_VALID); | 1265 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
1266 | } | 1266 | } |
1267 | 1267 | EXPORT_SYMBOL(blk_recount_segments); | |
1268 | 1268 | ||
1269 | static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, | 1269 | static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, |
1270 | struct bio *nxt) | 1270 | struct bio *nxt) |