diff options
author | Peter Osterlund <petero2@telia.com> | 2005-06-23 03:10:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-23 12:45:30 -0400 |
commit | 46c271bedd2c8444b1d05bc44928beec0c07debc (patch) | |
tree | 2e28cad2355b62b01815d1acc10d35a806f10ea0 /drivers | |
parent | dfb388bf8a328f206bba33933dd97230f412238b (diff) |
[PATCH] Improve CD/DVD packet driver write performance
This patch improves write performance for the CD/DVD packet writing driver.
The logic for switching between reading and writing has been changed so
that streaming writes are no longer interrupted by read requests.
Signed-off-by: Peter Osterlund <petero2@telia.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/pktcdvd.c | 36 |
1 files changed, 20 insertions, 16 deletions
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index bc56770bcc90..7f3d78de265c 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -467,14 +467,12 @@ static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsign | |||
467 | * Queue a bio for processing by the low-level CD device. Must be called | 467 | * Queue a bio for processing by the low-level CD device. Must be called |
468 | * from process context. | 468 | * from process context. |
469 | */ | 469 | */ |
470 | static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read) | 470 | static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) |
471 | { | 471 | { |
472 | spin_lock(&pd->iosched.lock); | 472 | spin_lock(&pd->iosched.lock); |
473 | if (bio_data_dir(bio) == READ) { | 473 | if (bio_data_dir(bio) == READ) { |
474 | pkt_add_list_last(bio, &pd->iosched.read_queue, | 474 | pkt_add_list_last(bio, &pd->iosched.read_queue, |
475 | &pd->iosched.read_queue_tail); | 475 | &pd->iosched.read_queue_tail); |
476 | if (high_prio_read) | ||
477 | pd->iosched.high_prio_read = 1; | ||
478 | } else { | 476 | } else { |
479 | pkt_add_list_last(bio, &pd->iosched.write_queue, | 477 | pkt_add_list_last(bio, &pd->iosched.write_queue, |
480 | &pd->iosched.write_queue_tail); | 478 | &pd->iosched.write_queue_tail); |
@@ -490,15 +488,16 @@ static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_p | |||
490 | * requirements for CDRW drives: | 488 | * requirements for CDRW drives: |
491 | * - A cache flush command must be inserted before a read request if the | 489 | * - A cache flush command must be inserted before a read request if the |
492 | * previous request was a write. | 490 | * previous request was a write. |
493 | * - Switching between reading and writing is slow, so don't it more often | 491 | * - Switching between reading and writing is slow, so don't do it more often |
494 | * than necessary. | 492 | * than necessary. |
493 | * - Optimize for throughput at the expense of latency. This means that streaming | ||
494 | * writes will never be interrupted by a read, but if the drive has to seek | ||
495 | * before the next write, switch to reading instead if there are any pending | ||
496 | * read requests. | ||
495 | * - Set the read speed according to current usage pattern. When only reading | 497 | * - Set the read speed according to current usage pattern. When only reading |
496 | * from the device, it's best to use the highest possible read speed, but | 498 | * from the device, it's best to use the highest possible read speed, but |
497 | * when switching often between reading and writing, it's better to have the | 499 | * when switching often between reading and writing, it's better to have the |
498 | * same read and write speeds. | 500 | * same read and write speeds. |
499 | * - Reads originating from user space should have higher priority than reads | ||
500 | * originating from pkt_gather_data, because some process is usually waiting | ||
501 | * on reads of the first kind. | ||
502 | */ | 501 | */ |
503 | static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | 502 | static void pkt_iosched_process_queue(struct pktcdvd_device *pd) |
504 | { | 503 | { |
@@ -512,21 +511,24 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
512 | 511 | ||
513 | for (;;) { | 512 | for (;;) { |
514 | struct bio *bio; | 513 | struct bio *bio; |
515 | int reads_queued, writes_queued, high_prio_read; | 514 | int reads_queued, writes_queued; |
516 | 515 | ||
517 | spin_lock(&pd->iosched.lock); | 516 | spin_lock(&pd->iosched.lock); |
518 | reads_queued = (pd->iosched.read_queue != NULL); | 517 | reads_queued = (pd->iosched.read_queue != NULL); |
519 | writes_queued = (pd->iosched.write_queue != NULL); | 518 | writes_queued = (pd->iosched.write_queue != NULL); |
520 | if (!reads_queued) | ||
521 | pd->iosched.high_prio_read = 0; | ||
522 | high_prio_read = pd->iosched.high_prio_read; | ||
523 | spin_unlock(&pd->iosched.lock); | 519 | spin_unlock(&pd->iosched.lock); |
524 | 520 | ||
525 | if (!reads_queued && !writes_queued) | 521 | if (!reads_queued && !writes_queued) |
526 | break; | 522 | break; |
527 | 523 | ||
528 | if (pd->iosched.writing) { | 524 | if (pd->iosched.writing) { |
529 | if (high_prio_read || (!writes_queued && reads_queued)) { | 525 | int need_write_seek = 1; |
526 | spin_lock(&pd->iosched.lock); | ||
527 | bio = pd->iosched.write_queue; | ||
528 | spin_unlock(&pd->iosched.lock); | ||
529 | if (bio && (bio->bi_sector == pd->iosched.last_write)) | ||
530 | need_write_seek = 0; | ||
531 | if (need_write_seek && reads_queued) { | ||
530 | if (atomic_read(&pd->cdrw.pending_bios) > 0) { | 532 | if (atomic_read(&pd->cdrw.pending_bios) > 0) { |
531 | VPRINTK("pktcdvd: write, waiting\n"); | 533 | VPRINTK("pktcdvd: write, waiting\n"); |
532 | break; | 534 | break; |
@@ -559,8 +561,10 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
559 | 561 | ||
560 | if (bio_data_dir(bio) == READ) | 562 | if (bio_data_dir(bio) == READ) |
561 | pd->iosched.successive_reads += bio->bi_size >> 10; | 563 | pd->iosched.successive_reads += bio->bi_size >> 10; |
562 | else | 564 | else { |
563 | pd->iosched.successive_reads = 0; | 565 | pd->iosched.successive_reads = 0; |
566 | pd->iosched.last_write = bio->bi_sector + bio_sectors(bio); | ||
567 | } | ||
564 | if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { | 568 | if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { |
565 | if (pd->read_speed == pd->write_speed) { | 569 | if (pd->read_speed == pd->write_speed) { |
566 | pd->read_speed = MAX_SPEED; | 570 | pd->read_speed = MAX_SPEED; |
@@ -765,7 +769,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
765 | 769 | ||
766 | atomic_inc(&pkt->io_wait); | 770 | atomic_inc(&pkt->io_wait); |
767 | bio->bi_rw = READ; | 771 | bio->bi_rw = READ; |
768 | pkt_queue_bio(pd, bio, 0); | 772 | pkt_queue_bio(pd, bio); |
769 | frames_read++; | 773 | frames_read++; |
770 | } | 774 | } |
771 | 775 | ||
@@ -1062,7 +1066,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1062 | 1066 | ||
1063 | atomic_set(&pkt->io_wait, 1); | 1067 | atomic_set(&pkt->io_wait, 1); |
1064 | pkt->w_bio->bi_rw = WRITE; | 1068 | pkt->w_bio->bi_rw = WRITE; |
1065 | pkt_queue_bio(pd, pkt->w_bio, 0); | 1069 | pkt_queue_bio(pd, pkt->w_bio); |
1066 | } | 1070 | } |
1067 | 1071 | ||
1068 | static void pkt_finish_packet(struct packet_data *pkt, int uptodate) | 1072 | static void pkt_finish_packet(struct packet_data *pkt, int uptodate) |
@@ -2120,7 +2124,7 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio) | |||
2120 | cloned_bio->bi_private = psd; | 2124 | cloned_bio->bi_private = psd; |
2121 | cloned_bio->bi_end_io = pkt_end_io_read_cloned; | 2125 | cloned_bio->bi_end_io = pkt_end_io_read_cloned; |
2122 | pd->stats.secs_r += bio->bi_size >> 9; | 2126 | pd->stats.secs_r += bio->bi_size >> 9; |
2123 | pkt_queue_bio(pd, cloned_bio, 1); | 2127 | pkt_queue_bio(pd, cloned_bio); |
2124 | return 0; | 2128 | return 0; |
2125 | } | 2129 | } |
2126 | 2130 | ||