diff options
Diffstat (limited to 'drivers/block/pktcdvd.c')
-rw-r--r-- | drivers/block/pktcdvd.c | 54 |
1 files changed, 29 insertions, 25 deletions
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ff8668c5efb1..ce986bacf7b7 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s | |||
651 | 651 | ||
652 | for (;;) { | 652 | for (;;) { |
653 | tmp = rb_entry(n, struct pkt_rb_node, rb_node); | 653 | tmp = rb_entry(n, struct pkt_rb_node, rb_node); |
654 | if (s <= tmp->bio->bi_sector) | 654 | if (s <= tmp->bio->bi_iter.bi_sector) |
655 | next = n->rb_left; | 655 | next = n->rb_left; |
656 | else | 656 | else |
657 | next = n->rb_right; | 657 | next = n->rb_right; |
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s | |||
660 | n = next; | 660 | n = next; |
661 | } | 661 | } |
662 | 662 | ||
663 | if (s > tmp->bio->bi_sector) { | 663 | if (s > tmp->bio->bi_iter.bi_sector) { |
664 | tmp = pkt_rbtree_next(tmp); | 664 | tmp = pkt_rbtree_next(tmp); |
665 | if (!tmp) | 665 | if (!tmp) |
666 | return NULL; | 666 | return NULL; |
667 | } | 667 | } |
668 | BUG_ON(s > tmp->bio->bi_sector); | 668 | BUG_ON(s > tmp->bio->bi_iter.bi_sector); |
669 | return tmp; | 669 | return tmp; |
670 | } | 670 | } |
671 | 671 | ||
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod | |||
676 | { | 676 | { |
677 | struct rb_node **p = &pd->bio_queue.rb_node; | 677 | struct rb_node **p = &pd->bio_queue.rb_node; |
678 | struct rb_node *parent = NULL; | 678 | struct rb_node *parent = NULL; |
679 | sector_t s = node->bio->bi_sector; | 679 | sector_t s = node->bio->bi_iter.bi_sector; |
680 | struct pkt_rb_node *tmp; | 680 | struct pkt_rb_node *tmp; |
681 | 681 | ||
682 | while (*p) { | 682 | while (*p) { |
683 | parent = *p; | 683 | parent = *p; |
684 | tmp = rb_entry(parent, struct pkt_rb_node, rb_node); | 684 | tmp = rb_entry(parent, struct pkt_rb_node, rb_node); |
685 | if (s < tmp->bio->bi_sector) | 685 | if (s < tmp->bio->bi_iter.bi_sector) |
686 | p = &(*p)->rb_left; | 686 | p = &(*p)->rb_left; |
687 | else | 687 | else |
688 | p = &(*p)->rb_right; | 688 | p = &(*p)->rb_right; |
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
857 | spin_lock(&pd->iosched.lock); | 857 | spin_lock(&pd->iosched.lock); |
858 | bio = bio_list_peek(&pd->iosched.write_queue); | 858 | bio = bio_list_peek(&pd->iosched.write_queue); |
859 | spin_unlock(&pd->iosched.lock); | 859 | spin_unlock(&pd->iosched.lock); |
860 | if (bio && (bio->bi_sector == pd->iosched.last_write)) | 860 | if (bio && (bio->bi_iter.bi_sector == |
861 | pd->iosched.last_write)) | ||
861 | need_write_seek = 0; | 862 | need_write_seek = 0; |
862 | if (need_write_seek && reads_queued) { | 863 | if (need_write_seek && reads_queued) { |
863 | if (atomic_read(&pd->cdrw.pending_bios) > 0) { | 864 | if (atomic_read(&pd->cdrw.pending_bios) > 0) { |
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
888 | continue; | 889 | continue; |
889 | 890 | ||
890 | if (bio_data_dir(bio) == READ) | 891 | if (bio_data_dir(bio) == READ) |
891 | pd->iosched.successive_reads += bio->bi_size >> 10; | 892 | pd->iosched.successive_reads += |
893 | bio->bi_iter.bi_size >> 10; | ||
892 | else { | 894 | else { |
893 | pd->iosched.successive_reads = 0; | 895 | pd->iosched.successive_reads = 0; |
894 | pd->iosched.last_write = bio_end_sector(bio); | 896 | pd->iosched.last_write = bio_end_sector(bio); |
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err) | |||
978 | 980 | ||
979 | pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", | 981 | pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", |
980 | bio, (unsigned long long)pkt->sector, | 982 | bio, (unsigned long long)pkt->sector, |
981 | (unsigned long long)bio->bi_sector, err); | 983 | (unsigned long long)bio->bi_iter.bi_sector, err); |
982 | 984 | ||
983 | if (err) | 985 | if (err) |
984 | atomic_inc(&pkt->io_errors); | 986 | atomic_inc(&pkt->io_errors); |
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1026 | memset(written, 0, sizeof(written)); | 1028 | memset(written, 0, sizeof(written)); |
1027 | spin_lock(&pkt->lock); | 1029 | spin_lock(&pkt->lock); |
1028 | bio_list_for_each(bio, &pkt->orig_bios) { | 1030 | bio_list_for_each(bio, &pkt->orig_bios) { |
1029 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); | 1031 | int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / |
1030 | int num_frames = bio->bi_size / CD_FRAMESIZE; | 1032 | (CD_FRAMESIZE >> 9); |
1033 | int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; | ||
1031 | pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); | 1034 | pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); |
1032 | BUG_ON(first_frame < 0); | 1035 | BUG_ON(first_frame < 0); |
1033 | BUG_ON(first_frame + num_frames > pkt->frames); | 1036 | BUG_ON(first_frame + num_frames > pkt->frames); |
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1053 | 1056 | ||
1054 | bio = pkt->r_bios[f]; | 1057 | bio = pkt->r_bios[f]; |
1055 | bio_reset(bio); | 1058 | bio_reset(bio); |
1056 | bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); | 1059 | bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); |
1057 | bio->bi_bdev = pd->bdev; | 1060 | bio->bi_bdev = pd->bdev; |
1058 | bio->bi_end_io = pkt_end_io_read; | 1061 | bio->bi_end_io = pkt_end_io_read; |
1059 | bio->bi_private = pkt; | 1062 | bio->bi_private = pkt; |
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt) | |||
1150 | bio_reset(pkt->bio); | 1153 | bio_reset(pkt->bio); |
1151 | pkt->bio->bi_bdev = pd->bdev; | 1154 | pkt->bio->bi_bdev = pd->bdev; |
1152 | pkt->bio->bi_rw = REQ_WRITE; | 1155 | pkt->bio->bi_rw = REQ_WRITE; |
1153 | pkt->bio->bi_sector = new_sector; | 1156 | pkt->bio->bi_iter.bi_sector = new_sector; |
1154 | pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; | 1157 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; |
1155 | pkt->bio->bi_vcnt = pkt->frames; | 1158 | pkt->bio->bi_vcnt = pkt->frames; |
1156 | 1159 | ||
1157 | pkt->bio->bi_end_io = pkt_end_io_packet_write; | 1160 | pkt->bio->bi_end_io = pkt_end_io_packet_write; |
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd) | |||
1213 | node = first_node; | 1216 | node = first_node; |
1214 | while (node) { | 1217 | while (node) { |
1215 | bio = node->bio; | 1218 | bio = node->bio; |
1216 | zone = get_zone(bio->bi_sector, pd); | 1219 | zone = get_zone(bio->bi_iter.bi_sector, pd); |
1217 | list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { | 1220 | list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { |
1218 | if (p->sector == zone) { | 1221 | if (p->sector == zone) { |
1219 | bio = NULL; | 1222 | bio = NULL; |
@@ -1252,14 +1255,14 @@ try_next_bio: | |||
1252 | pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); | 1255 | pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); |
1253 | while ((node = pkt_rbtree_find(pd, zone)) != NULL) { | 1256 | while ((node = pkt_rbtree_find(pd, zone)) != NULL) { |
1254 | bio = node->bio; | 1257 | bio = node->bio; |
1255 | pkt_dbg(2, pd, "found zone=%llx\n", | 1258 | pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) |
1256 | (unsigned long long)get_zone(bio->bi_sector, pd)); | 1259 | get_zone(bio->bi_iter.bi_sector, pd)); |
1257 | if (get_zone(bio->bi_sector, pd) != zone) | 1260 | if (get_zone(bio->bi_iter.bi_sector, pd) != zone) |
1258 | break; | 1261 | break; |
1259 | pkt_rbtree_erase(pd, node); | 1262 | pkt_rbtree_erase(pd, node); |
1260 | spin_lock(&pkt->lock); | 1263 | spin_lock(&pkt->lock); |
1261 | bio_list_add(&pkt->orig_bios, bio); | 1264 | bio_list_add(&pkt->orig_bios, bio); |
1262 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; | 1265 | pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; |
1263 | spin_unlock(&pkt->lock); | 1266 | spin_unlock(&pkt->lock); |
1264 | } | 1267 | } |
1265 | /* check write congestion marks, and if bio_queue_size is | 1268 | /* check write congestion marks, and if bio_queue_size is |
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1293 | struct bio_vec *bvec = pkt->w_bio->bi_io_vec; | 1296 | struct bio_vec *bvec = pkt->w_bio->bi_io_vec; |
1294 | 1297 | ||
1295 | bio_reset(pkt->w_bio); | 1298 | bio_reset(pkt->w_bio); |
1296 | pkt->w_bio->bi_sector = pkt->sector; | 1299 | pkt->w_bio->bi_iter.bi_sector = pkt->sector; |
1297 | pkt->w_bio->bi_bdev = pd->bdev; | 1300 | pkt->w_bio->bi_bdev = pd->bdev; |
1298 | pkt->w_bio->bi_end_io = pkt_end_io_packet_write; | 1301 | pkt->w_bio->bi_end_io = pkt_end_io_packet_write; |
1299 | pkt->w_bio->bi_private = pkt; | 1302 | pkt->w_bio->bi_private = pkt; |
@@ -2370,20 +2373,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2370 | 2373 | ||
2371 | if (!test_bit(PACKET_WRITABLE, &pd->flags)) { | 2374 | if (!test_bit(PACKET_WRITABLE, &pd->flags)) { |
2372 | pkt_notice(pd, "WRITE for ro device (%llu)\n", | 2375 | pkt_notice(pd, "WRITE for ro device (%llu)\n", |
2373 | (unsigned long long)bio->bi_sector); | 2376 | (unsigned long long)bio->bi_iter.bi_sector); |
2374 | goto end_io; | 2377 | goto end_io; |
2375 | } | 2378 | } |
2376 | 2379 | ||
2377 | if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { | 2380 | if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { |
2378 | pkt_err(pd, "wrong bio size\n"); | 2381 | pkt_err(pd, "wrong bio size\n"); |
2379 | goto end_io; | 2382 | goto end_io; |
2380 | } | 2383 | } |
2381 | 2384 | ||
2382 | blk_queue_bounce(q, &bio); | 2385 | blk_queue_bounce(q, &bio); |
2383 | 2386 | ||
2384 | zone = get_zone(bio->bi_sector, pd); | 2387 | zone = get_zone(bio->bi_iter.bi_sector, pd); |
2385 | pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", | 2388 | pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", |
2386 | (unsigned long long)bio->bi_sector, | 2389 | (unsigned long long)bio->bi_iter.bi_sector, |
2387 | (unsigned long long)bio_end_sector(bio)); | 2390 | (unsigned long long)bio_end_sector(bio)); |
2388 | 2391 | ||
2389 | /* Check if we have to split the bio */ | 2392 | /* Check if we have to split the bio */ |
@@ -2395,7 +2398,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2395 | last_zone = get_zone(bio_end_sector(bio) - 1, pd); | 2398 | last_zone = get_zone(bio_end_sector(bio) - 1, pd); |
2396 | if (last_zone != zone) { | 2399 | if (last_zone != zone) { |
2397 | BUG_ON(last_zone != zone + pd->settings.size); | 2400 | BUG_ON(last_zone != zone + pd->settings.size); |
2398 | first_sectors = last_zone - bio->bi_sector; | 2401 | first_sectors = last_zone - bio->bi_iter.bi_sector; |
2399 | bp = bio_split(bio, first_sectors); | 2402 | bp = bio_split(bio, first_sectors); |
2400 | BUG_ON(!bp); | 2403 | BUG_ON(!bp); |
2401 | pkt_make_request(q, &bp->bio1); | 2404 | pkt_make_request(q, &bp->bio1); |
@@ -2417,7 +2420,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2417 | if ((pkt->state == PACKET_WAITING_STATE) || | 2420 | if ((pkt->state == PACKET_WAITING_STATE) || |
2418 | (pkt->state == PACKET_READ_WAIT_STATE)) { | 2421 | (pkt->state == PACKET_READ_WAIT_STATE)) { |
2419 | bio_list_add(&pkt->orig_bios, bio); | 2422 | bio_list_add(&pkt->orig_bios, bio); |
2420 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; | 2423 | pkt->write_size += |
2424 | bio->bi_iter.bi_size / CD_FRAMESIZE; | ||
2421 | if ((pkt->write_size >= pkt->frames) && | 2425 | if ((pkt->write_size >= pkt->frames) && |
2422 | (pkt->state == PACKET_WAITING_STATE)) { | 2426 | (pkt->state == PACKET_WAITING_STATE)) { |
2423 | atomic_inc(&pkt->run_sm); | 2427 | atomic_inc(&pkt->run_sm); |