aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@austin.ibm.com>2006-03-14 18:05:45 -0500
committerDave Kleikamp <shaggy@austin.ibm.com>2006-03-14 18:05:45 -0500
commitc5111f504d2a9b0d258d7c4752b4093523315989 (patch)
tree6a52864aff79691689aea21cb0cb928327d5de5b /block
parent69eb66d7da7dba2696281981347698e1693c2340 (diff)
parenta488edc914aa1d766a4e2c982b5ae03d5657ec1b (diff)
Merge with /home/shaggy/git/linus-clean/
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c151
-rw-r--r--block/elevator.c114
-rw-r--r--block/ll_rw_blk.c86
-rw-r--r--block/scsi_ioctl.c3
4 files changed, 161 insertions, 193 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 74fae2daf87e..c8dbe38c81c8 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -239,7 +239,6 @@ enum cfqq_state_flags {
239 CFQ_CFQQ_FLAG_fifo_expire, 239 CFQ_CFQQ_FLAG_fifo_expire,
240 CFQ_CFQQ_FLAG_idle_window, 240 CFQ_CFQQ_FLAG_idle_window,
241 CFQ_CFQQ_FLAG_prio_changed, 241 CFQ_CFQQ_FLAG_prio_changed,
242 CFQ_CFQQ_FLAG_expired,
243}; 242};
244 243
245#define CFQ_CFQQ_FNS(name) \ 244#define CFQ_CFQQ_FNS(name) \
@@ -264,7 +263,6 @@ CFQ_CFQQ_FNS(must_dispatch);
264CFQ_CFQQ_FNS(fifo_expire); 263CFQ_CFQQ_FNS(fifo_expire);
265CFQ_CFQQ_FNS(idle_window); 264CFQ_CFQQ_FNS(idle_window);
266CFQ_CFQQ_FNS(prio_changed); 265CFQ_CFQQ_FNS(prio_changed);
267CFQ_CFQQ_FNS(expired);
268#undef CFQ_CFQQ_FNS 266#undef CFQ_CFQQ_FNS
269 267
270enum cfq_rq_state_flags { 268enum cfq_rq_state_flags {
@@ -336,7 +334,7 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
336 */ 334 */
337static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 335static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
338{ 336{
339 if (!cfqd->rq_in_driver && cfqd->busy_queues) 337 if (cfqd->busy_queues)
340 kblockd_schedule_work(&cfqd->unplug_work); 338 kblockd_schedule_work(&cfqd->unplug_work);
341} 339}
342 340
@@ -736,13 +734,63 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
736 cfqq->slice_left = 0; 734 cfqq->slice_left = 0;
737 cfq_clear_cfqq_must_alloc_slice(cfqq); 735 cfq_clear_cfqq_must_alloc_slice(cfqq);
738 cfq_clear_cfqq_fifo_expire(cfqq); 736 cfq_clear_cfqq_fifo_expire(cfqq);
739 cfq_clear_cfqq_expired(cfqq);
740 } 737 }
741 738
742 cfqd->active_queue = cfqq; 739 cfqd->active_queue = cfqq;
743} 740}
744 741
745/* 742/*
743 * current cfqq expired its slice (or was too idle), select new one
744 */
745static void
746__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
747 int preempted)
748{
749 unsigned long now = jiffies;
750
751 if (cfq_cfqq_wait_request(cfqq))
752 del_timer(&cfqd->idle_slice_timer);
753
754 if (!preempted && !cfq_cfqq_dispatched(cfqq)) {
755 cfqq->service_last = now;
756 cfq_schedule_dispatch(cfqd);
757 }
758
759 cfq_clear_cfqq_must_dispatch(cfqq);
760 cfq_clear_cfqq_wait_request(cfqq);
761
762 /*
763 * store what was left of this slice, if the queue idled out
764 * or was preempted
765 */
766 if (time_after(cfqq->slice_end, now))
767 cfqq->slice_left = cfqq->slice_end - now;
768 else
769 cfqq->slice_left = 0;
770
771 if (cfq_cfqq_on_rr(cfqq))
772 cfq_resort_rr_list(cfqq, preempted);
773
774 if (cfqq == cfqd->active_queue)
775 cfqd->active_queue = NULL;
776
777 if (cfqd->active_cic) {
778 put_io_context(cfqd->active_cic->ioc);
779 cfqd->active_cic = NULL;
780 }
781
782 cfqd->dispatch_slice = 0;
783}
784
785static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
786{
787 struct cfq_queue *cfqq = cfqd->active_queue;
788
789 if (cfqq)
790 __cfq_slice_expired(cfqd, cfqq, preempted);
791}
792
793/*
746 * 0 794 * 0
747 * 0,1 795 * 0,1
748 * 0,1,2 796 * 0,1,2
@@ -801,16 +849,7 @@ static int cfq_get_next_prio_level(struct cfq_data *cfqd)
801 849
802static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 850static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
803{ 851{
804 struct cfq_queue *cfqq; 852 struct cfq_queue *cfqq = NULL;
805
806 /*
807 * if current queue is expired but not done with its requests yet,
808 * wait for that to happen
809 */
810 if ((cfqq = cfqd->active_queue) != NULL) {
811 if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq))
812 return NULL;
813 }
814 853
815 /* 854 /*
816 * if current list is non-empty, grab first entry. if it is empty, 855 * if current list is non-empty, grab first entry. if it is empty,
@@ -837,66 +876,11 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
837 return cfqq; 876 return cfqq;
838} 877}
839 878
840/*
841 * current cfqq expired its slice (or was too idle), select new one
842 */
843static void
844__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
845 int preempted)
846{
847 unsigned long now = jiffies;
848
849 if (cfq_cfqq_wait_request(cfqq))
850 del_timer(&cfqd->idle_slice_timer);
851
852 if (!preempted && !cfq_cfqq_dispatched(cfqq))
853 cfqq->service_last = now;
854
855 cfq_clear_cfqq_must_dispatch(cfqq);
856 cfq_clear_cfqq_wait_request(cfqq);
857
858 /*
859 * store what was left of this slice, if the queue idled out
860 * or was preempted
861 */
862 if (time_after(cfqq->slice_end, now))
863 cfqq->slice_left = cfqq->slice_end - now;
864 else
865 cfqq->slice_left = 0;
866
867 if (cfq_cfqq_on_rr(cfqq))
868 cfq_resort_rr_list(cfqq, preempted);
869
870 if (cfqq == cfqd->active_queue)
871 cfqd->active_queue = NULL;
872
873 if (cfqd->active_cic) {
874 put_io_context(cfqd->active_cic->ioc);
875 cfqd->active_cic = NULL;
876 }
877
878 cfqd->dispatch_slice = 0;
879}
880
881static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
882{
883 struct cfq_queue *cfqq = cfqd->active_queue;
884
885 if (cfqq) {
886 /*
887 * use deferred expiry, if there are requests in progress as
888 * not to disturb the slice of the next queue
889 */
890 if (cfq_cfqq_dispatched(cfqq))
891 cfq_mark_cfqq_expired(cfqq);
892 else
893 __cfq_slice_expired(cfqd, cfqq, preempted);
894 }
895}
896
897static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 879static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
898 880
899{ 881{
882 unsigned long sl;
883
900 WARN_ON(!RB_EMPTY(&cfqq->sort_list)); 884 WARN_ON(!RB_EMPTY(&cfqq->sort_list));
901 WARN_ON(cfqq != cfqd->active_queue); 885 WARN_ON(cfqq != cfqd->active_queue);
902 886
@@ -916,13 +900,8 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
916 cfq_mark_cfqq_must_dispatch(cfqq); 900 cfq_mark_cfqq_must_dispatch(cfqq);
917 cfq_mark_cfqq_wait_request(cfqq); 901 cfq_mark_cfqq_wait_request(cfqq);
918 902
919 if (!timer_pending(&cfqd->idle_slice_timer)) { 903 sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
920 unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); 904 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
921
922 cfqd->idle_slice_timer.expires = jiffies + slice_left;
923 add_timer(&cfqd->idle_slice_timer);
924 }
925
926 return 1; 905 return 1;
927} 906}
928 907
@@ -1006,9 +985,6 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1006 if (!cfqq) 985 if (!cfqq)
1007 goto new_queue; 986 goto new_queue;
1008 987
1009 if (cfq_cfqq_expired(cfqq))
1010 goto new_queue;
1011
1012 /* 988 /*
1013 * slice has expired 989 * slice has expired
1014 */ 990 */
@@ -1181,10 +1157,8 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1181 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 1157 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1182 BUG_ON(cfq_cfqq_on_rr(cfqq)); 1158 BUG_ON(cfq_cfqq_on_rr(cfqq));
1183 1159
1184 if (unlikely(cfqd->active_queue == cfqq)) { 1160 if (unlikely(cfqd->active_queue == cfqq))
1185 __cfq_slice_expired(cfqd, cfqq, 0); 1161 __cfq_slice_expired(cfqd, cfqq, 0);
1186 cfq_schedule_dispatch(cfqd);
1187 }
1188 1162
1189 cfq_put_cfqd(cfqq->cfqd); 1163 cfq_put_cfqd(cfqq->cfqd);
1190 1164
@@ -1245,10 +1219,8 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1245 1219
1246 spin_lock(q->queue_lock); 1220 spin_lock(q->queue_lock);
1247 1221
1248 if (unlikely(cic->cfqq == cfqd->active_queue)) { 1222 if (unlikely(cic->cfqq == cfqd->active_queue))
1249 __cfq_slice_expired(cfqd, cic->cfqq, 0); 1223 __cfq_slice_expired(cfqd, cic->cfqq, 0);
1250 cfq_schedule_dispatch(cfqd);
1251 }
1252 1224
1253 cfq_put_queue(cic->cfqq); 1225 cfq_put_queue(cic->cfqq);
1254 cic->cfqq = NULL; 1226 cic->cfqq = NULL;
@@ -1715,10 +1687,7 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
1715 cfqq->service_last = now; 1687 cfqq->service_last = now;
1716 cfq_resort_rr_list(cfqq, 0); 1688 cfq_resort_rr_list(cfqq, 0);
1717 } 1689 }
1718 if (cfq_cfqq_expired(cfqq)) { 1690 cfq_schedule_dispatch(cfqd);
1719 __cfq_slice_expired(cfqd, cfqq, 0);
1720 cfq_schedule_dispatch(cfqd);
1721 }
1722 } 1691 }
1723 1692
1724 if (cfq_crq_is_sync(crq)) 1693 if (cfq_crq_is_sync(crq))
diff --git a/block/elevator.c b/block/elevator.c
index c9f424d5399c..24b702d649a9 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -139,35 +139,16 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
139 139
140static char chosen_elevator[16]; 140static char chosen_elevator[16];
141 141
142static void elevator_setup_default(void) 142static int __init elevator_setup(char *str)
143{ 143{
144 struct elevator_type *e;
145
146 /*
147 * If default has not been set, use the compiled-in selection.
148 */
149 if (!chosen_elevator[0])
150 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
151
152 /* 144 /*
153 * Be backwards-compatible with previous kernels, so users 145 * Be backwards-compatible with previous kernels, so users
154 * won't get the wrong elevator. 146 * won't get the wrong elevator.
155 */ 147 */
156 if (!strcmp(chosen_elevator, "as")) 148 if (!strcmp(str, "as"))
157 strcpy(chosen_elevator, "anticipatory"); 149 strcpy(chosen_elevator, "anticipatory");
158
159 /*
160 * If the given scheduler is not available, fall back to the default
161 */
162 if ((e = elevator_find(chosen_elevator)))
163 elevator_put(e);
164 else 150 else
165 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED); 151 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
166}
167
168static int __init elevator_setup(char *str)
169{
170 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
171 return 0; 152 return 0;
172} 153}
173 154
@@ -184,14 +165,16 @@ int elevator_init(request_queue_t *q, char *name)
184 q->end_sector = 0; 165 q->end_sector = 0;
185 q->boundary_rq = NULL; 166 q->boundary_rq = NULL;
186 167
187 elevator_setup_default(); 168 if (name && !(e = elevator_get(name)))
169 return -EINVAL;
188 170
189 if (!name) 171 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
190 name = chosen_elevator; 172 printk("I/O scheduler %s not found\n", chosen_elevator);
191 173
192 e = elevator_get(name); 174 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
193 if (!e) 175 printk("Default I/O scheduler not found, using no-op\n");
194 return -EINVAL; 176 e = elevator_get("noop");
177 }
195 178
196 eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); 179 eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
197 if (!eq) { 180 if (!eq) {
@@ -310,7 +293,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
310 293
311 rq->flags &= ~REQ_STARTED; 294 rq->flags &= ~REQ_STARTED;
312 295
313 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0); 296 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
314} 297}
315 298
316static void elv_drain_elevator(request_queue_t *q) 299static void elv_drain_elevator(request_queue_t *q)
@@ -327,40 +310,11 @@ static void elv_drain_elevator(request_queue_t *q)
327 } 310 }
328} 311}
329 312
330void __elv_add_request(request_queue_t *q, struct request *rq, int where, 313void elv_insert(request_queue_t *q, struct request *rq, int where)
331 int plug)
332{ 314{
333 struct list_head *pos; 315 struct list_head *pos;
334 unsigned ordseq; 316 unsigned ordseq;
335 317
336 if (q->ordcolor)
337 rq->flags |= REQ_ORDERED_COLOR;
338
339 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
340 /*
341 * toggle ordered color
342 */
343 q->ordcolor ^= 1;
344
345 /*
346 * barriers implicitly indicate back insertion
347 */
348 if (where == ELEVATOR_INSERT_SORT)
349 where = ELEVATOR_INSERT_BACK;
350
351 /*
352 * this request is scheduling boundary, update end_sector
353 */
354 if (blk_fs_request(rq)) {
355 q->end_sector = rq_end_sector(rq);
356 q->boundary_rq = rq;
357 }
358 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
359 where = ELEVATOR_INSERT_BACK;
360
361 if (plug)
362 blk_plug_device(q);
363
364 rq->q = q; 318 rq->q = q;
365 319
366 switch (where) { 320 switch (where) {
@@ -441,6 +395,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
441 } 395 }
442} 396}
443 397
398void __elv_add_request(request_queue_t *q, struct request *rq, int where,
399 int plug)
400{
401 if (q->ordcolor)
402 rq->flags |= REQ_ORDERED_COLOR;
403
404 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
405 /*
406 * toggle ordered color
407 */
408 if (blk_barrier_rq(rq))
409 q->ordcolor ^= 1;
410
411 /*
412 * barriers implicitly indicate back insertion
413 */
414 if (where == ELEVATOR_INSERT_SORT)
415 where = ELEVATOR_INSERT_BACK;
416
417 /*
418 * this request is scheduling boundary, update
419 * end_sector
420 */
421 if (blk_fs_request(rq)) {
422 q->end_sector = rq_end_sector(rq);
423 q->boundary_rq = rq;
424 }
425 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
426 where = ELEVATOR_INSERT_BACK;
427
428 if (plug)
429 blk_plug_device(q);
430
431 elv_insert(q, rq, where);
432}
433
444void elv_add_request(request_queue_t *q, struct request *rq, int where, 434void elv_add_request(request_queue_t *q, struct request *rq, int where,
445 int plug) 435 int plug)
446{ 436{
@@ -669,8 +659,10 @@ int elv_register(struct elevator_type *e)
669 spin_unlock_irq(&elv_list_lock); 659 spin_unlock_irq(&elv_list_lock);
670 660
671 printk(KERN_INFO "io scheduler %s registered", e->elevator_name); 661 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
672 if (!strcmp(e->elevator_name, chosen_elevator)) 662 if (!strcmp(e->elevator_name, chosen_elevator) ||
673 printk(" (default)"); 663 (!*chosen_elevator &&
664 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
665 printk(" (default)");
674 printk("\n"); 666 printk("\n");
675 return 0; 667 return 0;
676} 668}
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 8e27d0ab0d7c..0ef2971a9e82 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
304 * blk_queue_ordered - does this queue support ordered writes 304 * blk_queue_ordered - does this queue support ordered writes
305 * @q: the request queue 305 * @q: the request queue
306 * @ordered: one of QUEUE_ORDERED_* 306 * @ordered: one of QUEUE_ORDERED_*
307 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
307 * 308 *
308 * Description: 309 * Description:
309 * For journalled file systems, doing ordered writes on a commit 310 * For journalled file systems, doing ordered writes on a commit
@@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
332 return -EINVAL; 333 return -EINVAL;
333 } 334 }
334 335
336 q->ordered = ordered;
335 q->next_ordered = ordered; 337 q->next_ordered = ordered;
336 q->prepare_flush_fn = prepare_flush_fn; 338 q->prepare_flush_fn = prepare_flush_fn;
337 339
@@ -452,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
452 rq->end_io = end_io; 454 rq->end_io = end_io;
453 q->prepare_flush_fn(q, rq); 455 q->prepare_flush_fn(q, rq);
454 456
455 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 457 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
456} 458}
457 459
458static inline struct request *start_ordered(request_queue_t *q, 460static inline struct request *start_ordered(request_queue_t *q,
@@ -488,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q,
488 else 490 else
489 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 491 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
490 492
491 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 493 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
492 494
493 if (q->ordered & QUEUE_ORDERED_PREFLUSH) { 495 if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
494 queue_flush(q, QUEUE_ORDERED_PREFLUSH); 496 queue_flush(q, QUEUE_ORDERED_PREFLUSH);
@@ -506,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
506 508
507int blk_do_ordered(request_queue_t *q, struct request **rqp) 509int blk_do_ordered(request_queue_t *q, struct request **rqp)
508{ 510{
509 struct request *rq = *rqp, *allowed_rq; 511 struct request *rq = *rqp;
510 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
511 513
512 if (!q->ordseq) { 514 if (!q->ordseq) {
@@ -530,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
530 } 532 }
531 } 533 }
532 534
535 /*
536 * Ordered sequence in progress
537 */
538
539 /* Special requests are not subject to ordering rules. */
540 if (!blk_fs_request(rq) &&
541 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
542 return 1;
543
533 if (q->ordered & QUEUE_ORDERED_TAG) { 544 if (q->ordered & QUEUE_ORDERED_TAG) {
545 /* Ordered by tag. Blocking the next barrier is enough. */
534 if (is_barrier && rq != &q->bar_rq) 546 if (is_barrier && rq != &q->bar_rq)
535 *rqp = NULL; 547 *rqp = NULL;
536 return 1; 548 } else {
537 } 549 /* Ordered by draining. Wait for turn. */
538 550 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
539 switch (blk_ordered_cur_seq(q)) { 551 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
540 case QUEUE_ORDSEQ_PREFLUSH: 552 *rqp = NULL;
541 allowed_rq = &q->pre_flush_rq;
542 break;
543 case QUEUE_ORDSEQ_BAR:
544 allowed_rq = &q->bar_rq;
545 break;
546 case QUEUE_ORDSEQ_POSTFLUSH:
547 allowed_rq = &q->post_flush_rq;
548 break;
549 default:
550 allowed_rq = NULL;
551 break;
552 } 553 }
553 554
554 if (rq != allowed_rq &&
555 (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
556 rq == &q->post_flush_rq))
557 *rqp = NULL;
558
559 return 1; 555 return 1;
560} 556}
561 557
@@ -629,26 +625,31 @@ static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
629 * Different hardware can have different requirements as to what pages 625 * Different hardware can have different requirements as to what pages
630 * it can do I/O directly to. A low level driver can call 626 * it can do I/O directly to. A low level driver can call
631 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 627 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
632 * buffers for doing I/O to pages residing above @page. By default 628 * buffers for doing I/O to pages residing above @page.
633 * the block layer sets this to the highest numbered "low" memory page.
634 **/ 629 **/
635void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) 630void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
636{ 631{
637 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 632 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
638 633 int dma = 0;
639 /* 634
640 * set appropriate bounce gfp mask -- unfortunately we don't have a 635 q->bounce_gfp = GFP_NOIO;
641 * full 4GB zone, so we have to resort to low memory for any bounces. 636#if BITS_PER_LONG == 64
642 * ISA has its own < 16MB zone. 637 /* Assume anything <= 4GB can be handled by IOMMU.
643 */ 638 Actually some IOMMUs can handle everything, but I don't
644 if (bounce_pfn < blk_max_low_pfn) { 639 know of a way to test this here. */
645 BUG_ON(dma_addr < BLK_BOUNCE_ISA); 640 if (bounce_pfn < (0xffffffff>>PAGE_SHIFT))
641 dma = 1;
642 q->bounce_pfn = max_low_pfn;
643#else
644 if (bounce_pfn < blk_max_low_pfn)
645 dma = 1;
646 q->bounce_pfn = bounce_pfn;
647#endif
648 if (dma) {
646 init_emergency_isa_pool(); 649 init_emergency_isa_pool();
647 q->bounce_gfp = GFP_NOIO | GFP_DMA; 650 q->bounce_gfp = GFP_NOIO | GFP_DMA;
648 } else 651 q->bounce_pfn = bounce_pfn;
649 q->bounce_gfp = GFP_NOIO; 652 }
650
651 q->bounce_pfn = bounce_pfn;
652} 653}
653 654
654EXPORT_SYMBOL(blk_queue_bounce_limit); 655EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -662,7 +663,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
662 * Enables a low level driver to set an upper limit on the size of 663 * Enables a low level driver to set an upper limit on the size of
663 * received requests. 664 * received requests.
664 **/ 665 **/
665void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) 666void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
666{ 667{
667 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 668 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
668 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 669 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -2577,6 +2578,8 @@ void disk_round_stats(struct gendisk *disk)
2577 disk->stamp = now; 2578 disk->stamp = now;
2578} 2579}
2579 2580
2581EXPORT_SYMBOL_GPL(disk_round_stats);
2582
2580/* 2583/*
2581 * queue lock must be held 2584 * queue lock must be held
2582 */ 2585 */
@@ -2632,6 +2635,7 @@ EXPORT_SYMBOL(blk_put_request);
2632/** 2635/**
2633 * blk_end_sync_rq - executes a completion event on a request 2636 * blk_end_sync_rq - executes a completion event on a request
2634 * @rq: request to complete 2637 * @rq: request to complete
2638 * @error: end io status of the request
2635 */ 2639 */
2636void blk_end_sync_rq(struct request *rq, int error) 2640void blk_end_sync_rq(struct request *rq, int error)
2637{ 2641{
@@ -3153,7 +3157,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
3153 if (blk_fs_request(req) && req->rq_disk) { 3157 if (blk_fs_request(req) && req->rq_disk) {
3154 const int rw = rq_data_dir(req); 3158 const int rw = rq_data_dir(req);
3155 3159
3156 __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); 3160 disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
3157 } 3161 }
3158 3162
3159 total_bytes = bio_nbytes = 0; 3163 total_bytes = bio_nbytes = 0;
@@ -3448,7 +3452,7 @@ int __init blk_dev_init(void)
3448 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3452 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3449 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3453 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3450 3454
3451 for (i = 0; i < NR_CPUS; i++) 3455 for_each_cpu(i)
3452 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3456 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3453 3457
3454 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3458 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index cc72210687eb..24f7af9d0abc 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -310,6 +310,8 @@ static int sg_io(struct file *file, request_queue_t *q,
310 if (!rq->timeout) 310 if (!rq->timeout)
311 rq->timeout = BLK_DEFAULT_TIMEOUT; 311 rq->timeout = BLK_DEFAULT_TIMEOUT;
312 312
313 rq->retries = 0;
314
313 start_time = jiffies; 315 start_time = jiffies;
314 316
315 /* ignore return value. All information is passed back to caller 317 /* ignore return value. All information is passed back to caller
@@ -427,6 +429,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
427 rq->data = buffer; 429 rq->data = buffer;
428 rq->data_len = bytes; 430 rq->data_len = bytes;
429 rq->flags |= REQ_BLOCK_PC; 431 rq->flags |= REQ_BLOCK_PC;
432 rq->retries = 0;
430 433
431 blk_execute_rq(q, bd_disk, rq, 0); 434 blk_execute_rq(q, bd_disk, rq, 0);
432 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 435 err = rq->errors & 0xff; /* only 8 bit SCSI status */