aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd_diag.c37
-rw-r--r--drivers/s390/block/dasd_eckd.c28
-rw-r--r--drivers/s390/block/dasd_fba.c28
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/xpram.c6
-rw-r--r--drivers/s390/char/tape_34xx.c32
-rw-r--r--drivers/s390/char/tape_3590.c37
7 files changed, 73 insertions, 99 deletions
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index d32c60dbdd82..571320ab9e1a 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -472,14 +472,13 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
472 struct dasd_ccw_req *cqr; 472 struct dasd_ccw_req *cqr;
473 struct dasd_diag_req *dreq; 473 struct dasd_diag_req *dreq;
474 struct dasd_diag_bio *dbio; 474 struct dasd_diag_bio *dbio;
475 struct bio *bio; 475 struct req_iterator iter;
476 struct bio_vec *bv; 476 struct bio_vec *bv;
477 char *dst; 477 char *dst;
478 unsigned int count, datasize; 478 unsigned int count, datasize;
479 sector_t recid, first_rec, last_rec; 479 sector_t recid, first_rec, last_rec;
480 unsigned int blksize, off; 480 unsigned int blksize, off;
481 unsigned char rw_cmd; 481 unsigned char rw_cmd;
482 int i;
483 482
484 if (rq_data_dir(req) == READ) 483 if (rq_data_dir(req) == READ)
485 rw_cmd = MDSK_READ_REQ; 484 rw_cmd = MDSK_READ_REQ;
@@ -493,13 +492,11 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
493 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 492 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
494 /* Check struct bio and count the number of blocks for the request. */ 493 /* Check struct bio and count the number of blocks for the request. */
495 count = 0; 494 count = 0;
496 rq_for_each_bio(bio, req) { 495 rq_for_each_segment(bv, req, iter) {
497 bio_for_each_segment(bv, bio, i) { 496 if (bv->bv_len & (blksize - 1))
498 if (bv->bv_len & (blksize - 1)) 497 /* Fba can only do full blocks. */
499 /* Fba can only do full blocks. */ 498 return ERR_PTR(-EINVAL);
500 return ERR_PTR(-EINVAL); 499 count += bv->bv_len >> (device->s2b_shift + 9);
501 count += bv->bv_len >> (device->s2b_shift + 9);
502 }
503 } 500 }
504 /* Paranoia. */ 501 /* Paranoia. */
505 if (count != last_rec - first_rec + 1) 502 if (count != last_rec - first_rec + 1)
@@ -516,18 +513,16 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
516 dreq->block_count = count; 513 dreq->block_count = count;
517 dbio = dreq->bio; 514 dbio = dreq->bio;
518 recid = first_rec; 515 recid = first_rec;
519 rq_for_each_bio(bio, req) { 516 rq_for_each_segment(bv, req, iter) {
520 bio_for_each_segment(bv, bio, i) { 517 dst = page_address(bv->bv_page) + bv->bv_offset;
521 dst = page_address(bv->bv_page) + bv->bv_offset; 518 for (off = 0; off < bv->bv_len; off += blksize) {
522 for (off = 0; off < bv->bv_len; off += blksize) { 519 memset(dbio, 0, sizeof (struct dasd_diag_bio));
523 memset(dbio, 0, sizeof (struct dasd_diag_bio)); 520 dbio->type = rw_cmd;
524 dbio->type = rw_cmd; 521 dbio->block_number = recid + 1;
525 dbio->block_number = recid + 1; 522 dbio->buffer = dst;
526 dbio->buffer = dst; 523 dbio++;
527 dbio++; 524 dst += blksize;
528 dst += blksize; 525 recid++;
529 recid++;
530 }
531 } 526 }
532 } 527 }
533 cqr->retries = DIAG_MAX_RETRIES; 528 cqr->retries = DIAG_MAX_RETRIES;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ea63ba7828f9..44adf8496bda 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1176,7 +1176,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1176 struct LO_eckd_data *LO_data; 1176 struct LO_eckd_data *LO_data;
1177 struct dasd_ccw_req *cqr; 1177 struct dasd_ccw_req *cqr;
1178 struct ccw1 *ccw; 1178 struct ccw1 *ccw;
1179 struct bio *bio; 1179 struct req_iterator iter;
1180 struct bio_vec *bv; 1180 struct bio_vec *bv;
1181 char *dst; 1181 char *dst;
1182 unsigned int blksize, blk_per_trk, off; 1182 unsigned int blksize, blk_per_trk, off;
@@ -1185,7 +1185,6 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1185 sector_t first_trk, last_trk; 1185 sector_t first_trk, last_trk;
1186 unsigned int first_offs, last_offs; 1186 unsigned int first_offs, last_offs;
1187 unsigned char cmd, rcmd; 1187 unsigned char cmd, rcmd;
1188 int i;
1189 1188
1190 private = (struct dasd_eckd_private *) device->private; 1189 private = (struct dasd_eckd_private *) device->private;
1191 if (rq_data_dir(req) == READ) 1190 if (rq_data_dir(req) == READ)
@@ -1206,18 +1205,15 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1206 /* Check struct bio and count the number of blocks for the request. */ 1205 /* Check struct bio and count the number of blocks for the request. */
1207 count = 0; 1206 count = 0;
1208 cidaw = 0; 1207 cidaw = 0;
1209 rq_for_each_bio(bio, req) { 1208 rq_for_each_segment(bv, req, iter) {
1210 bio_for_each_segment(bv, bio, i) { 1209 if (bv->bv_len & (blksize - 1))
1211 if (bv->bv_len & (blksize - 1)) 1210 /* Eckd can only do full blocks. */
1212 /* Eckd can only do full blocks. */ 1211 return ERR_PTR(-EINVAL);
1213 return ERR_PTR(-EINVAL); 1212 count += bv->bv_len >> (device->s2b_shift + 9);
1214 count += bv->bv_len >> (device->s2b_shift + 9);
1215#if defined(CONFIG_64BIT) 1213#if defined(CONFIG_64BIT)
1216 if (idal_is_needed (page_address(bv->bv_page), 1214 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1217 bv->bv_len)) 1215 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1218 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1219#endif 1216#endif
1220 }
1221 } 1217 }
1222 /* Paranoia. */ 1218 /* Paranoia. */
1223 if (count != last_rec - first_rec + 1) 1219 if (count != last_rec - first_rec + 1)
@@ -1257,7 +1253,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1257 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 1253 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1258 last_rec - recid + 1, cmd, device, blksize); 1254 last_rec - recid + 1, cmd, device, blksize);
1259 } 1255 }
1260 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 1256 rq_for_each_segment(bv, req, iter) {
1261 dst = page_address(bv->bv_page) + bv->bv_offset; 1257 dst = page_address(bv->bv_page) + bv->bv_offset;
1262 if (dasd_page_cache) { 1258 if (dasd_page_cache) {
1263 char *copy = kmem_cache_alloc(dasd_page_cache, 1259 char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -1328,12 +1324,12 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1328{ 1324{
1329 struct dasd_eckd_private *private; 1325 struct dasd_eckd_private *private;
1330 struct ccw1 *ccw; 1326 struct ccw1 *ccw;
1331 struct bio *bio; 1327 struct req_iterator iter;
1332 struct bio_vec *bv; 1328 struct bio_vec *bv;
1333 char *dst, *cda; 1329 char *dst, *cda;
1334 unsigned int blksize, blk_per_trk, off; 1330 unsigned int blksize, blk_per_trk, off;
1335 sector_t recid; 1331 sector_t recid;
1336 int i, status; 1332 int status;
1337 1333
1338 if (!dasd_page_cache) 1334 if (!dasd_page_cache)
1339 goto out; 1335 goto out;
@@ -1346,7 +1342,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1346 ccw++; 1342 ccw++;
1347 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 1343 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
1348 ccw++; 1344 ccw++;
1349 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 1345 rq_for_each_segment(bv, req, iter) {
1350 dst = page_address(bv->bv_page) + bv->bv_offset; 1346 dst = page_address(bv->bv_page) + bv->bv_offset;
1351 for (off = 0; off < bv->bv_len; off += blksize) { 1347 for (off = 0; off < bv->bv_len; off += blksize) {
1352 /* Skip locate record. */ 1348 /* Skip locate record. */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index da16ead8aff2..1d95822e0b8e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -234,14 +234,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
234 struct LO_fba_data *LO_data; 234 struct LO_fba_data *LO_data;
235 struct dasd_ccw_req *cqr; 235 struct dasd_ccw_req *cqr;
236 struct ccw1 *ccw; 236 struct ccw1 *ccw;
237 struct bio *bio; 237 struct req_iterator iter;
238 struct bio_vec *bv; 238 struct bio_vec *bv;
239 char *dst; 239 char *dst;
240 int count, cidaw, cplength, datasize; 240 int count, cidaw, cplength, datasize;
241 sector_t recid, first_rec, last_rec; 241 sector_t recid, first_rec, last_rec;
242 unsigned int blksize, off; 242 unsigned int blksize, off;
243 unsigned char cmd; 243 unsigned char cmd;
244 int i;
245 244
246 private = (struct dasd_fba_private *) device->private; 245 private = (struct dasd_fba_private *) device->private;
247 if (rq_data_dir(req) == READ) { 246 if (rq_data_dir(req) == READ) {
@@ -257,18 +256,15 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
257 /* Check struct bio and count the number of blocks for the request. */ 256 /* Check struct bio and count the number of blocks for the request. */
258 count = 0; 257 count = 0;
259 cidaw = 0; 258 cidaw = 0;
260 rq_for_each_bio(bio, req) { 259 rq_for_each_segment(bv, req, iter) {
261 bio_for_each_segment(bv, bio, i) { 260 if (bv->bv_len & (blksize - 1))
262 if (bv->bv_len & (blksize - 1)) 261 /* Fba can only do full blocks. */
263 /* Fba can only do full blocks. */ 262 return ERR_PTR(-EINVAL);
264 return ERR_PTR(-EINVAL); 263 count += bv->bv_len >> (device->s2b_shift + 9);
265 count += bv->bv_len >> (device->s2b_shift + 9);
266#if defined(CONFIG_64BIT) 264#if defined(CONFIG_64BIT)
267 if (idal_is_needed (page_address(bv->bv_page), 265 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
268 bv->bv_len)) 266 cidaw += bv->bv_len / blksize;
269 cidaw += bv->bv_len / blksize;
270#endif 267#endif
271 }
272 } 268 }
273 /* Paranoia. */ 269 /* Paranoia. */
274 if (count != last_rec - first_rec + 1) 270 if (count != last_rec - first_rec + 1)
@@ -304,7 +300,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
304 locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count); 300 locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
305 } 301 }
306 recid = first_rec; 302 recid = first_rec;
307 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 303 rq_for_each_segment(bv, req, iter) {
308 dst = page_address(bv->bv_page) + bv->bv_offset; 304 dst = page_address(bv->bv_page) + bv->bv_offset;
309 if (dasd_page_cache) { 305 if (dasd_page_cache) {
310 char *copy = kmem_cache_alloc(dasd_page_cache, 306 char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -359,11 +355,11 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
359{ 355{
360 struct dasd_fba_private *private; 356 struct dasd_fba_private *private;
361 struct ccw1 *ccw; 357 struct ccw1 *ccw;
362 struct bio *bio; 358 struct req_iterator iter;
363 struct bio_vec *bv; 359 struct bio_vec *bv;
364 char *dst, *cda; 360 char *dst, *cda;
365 unsigned int blksize, off; 361 unsigned int blksize, off;
366 int i, status; 362 int status;
367 363
368 if (!dasd_page_cache) 364 if (!dasd_page_cache)
369 goto out; 365 goto out;
@@ -374,7 +370,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
374 ccw++; 370 ccw++;
375 if (private->rdc_data.mode.bits.data_chain != 0) 371 if (private->rdc_data.mode.bits.data_chain != 0)
376 ccw++; 372 ccw++;
377 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 373 rq_for_each_segment(bv, req, iter) {
378 dst = page_address(bv->bv_page) + bv->bv_offset; 374 dst = page_address(bv->bv_page) + bv->bv_offset;
379 for (off = 0; off < bv->bv_len; off += blksize) { 375 for (off = 0; off < bv->bv_len; off += blksize) {
380 /* Skip locate record. */ 376 /* Skip locate record. */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4d8798bacf97..859f870552e3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -674,10 +674,10 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
674 } 674 }
675 bytes_done += bvec->bv_len; 675 bytes_done += bvec->bv_len;
676 } 676 }
677 bio_endio(bio, bytes_done, 0); 677 bio_endio(bio, 0);
678 return 0; 678 return 0;
679fail: 679fail:
680 bio_io_error(bio, bio->bi_size); 680 bio_io_error(bio);
681 return 0; 681 return 0;
682} 682}
683 683
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 354a060e5bec..0fbacc8b1063 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -230,12 +230,10 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
230 } 230 }
231 } 231 }
232 set_bit(BIO_UPTODATE, &bio->bi_flags); 232 set_bit(BIO_UPTODATE, &bio->bi_flags);
233 bytes = bio->bi_size; 233 bio_end_io(bio, 0);
234 bio->bi_size = 0;
235 bio->bi_end_io(bio, bytes, 0);
236 return 0; 234 return 0;
237fail: 235fail:
238 bio_io_error(bio, bio->bi_size); 236 bio_io_error(bio);
239 return 0; 237 return 0;
240} 238}
241 239
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 80e7a537e7d2..5b47e9cce75f 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,21 +1134,18 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1134{ 1134{
1135 struct tape_request *request; 1135 struct tape_request *request;
1136 struct ccw1 *ccw; 1136 struct ccw1 *ccw;
1137 int count = 0, i; 1137 int count = 0;
1138 unsigned off; 1138 unsigned off;
1139 char *dst; 1139 char *dst;
1140 struct bio_vec *bv; 1140 struct bio_vec *bv;
1141 struct bio *bio; 1141 struct req_iterator iter;
1142 struct tape_34xx_block_id * start_block; 1142 struct tape_34xx_block_id * start_block;
1143 1143
1144 DBF_EVENT(6, "xBREDid:"); 1144 DBF_EVENT(6, "xBREDid:");
1145 1145
1146 /* Count the number of blocks for the request. */ 1146 /* Count the number of blocks for the request. */
1147 rq_for_each_bio(bio, req) { 1147 rq_for_each_segment(bv, req, iter)
1148 bio_for_each_segment(bv, bio, i) { 1148 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1149 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1150 }
1151 }
1152 1149
1153 /* Allocate the ccw request. */ 1150 /* Allocate the ccw request. */
1154 request = tape_alloc_request(3+count+1, 8); 1151 request = tape_alloc_request(3+count+1, 8);
@@ -1175,18 +1172,15 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1175 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 1172 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1176 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 1173 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1177 1174
1178 rq_for_each_bio(bio, req) { 1175 rq_for_each_segment(bv, req, iter) {
1179 bio_for_each_segment(bv, bio, i) { 1176 dst = kmap(bv->bv_page) + bv->bv_offset;
1180 dst = kmap(bv->bv_page) + bv->bv_offset; 1177 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
1181 for (off = 0; off < bv->bv_len; 1178 ccw->flags = CCW_FLAG_CC;
1182 off += TAPEBLOCK_HSEC_SIZE) { 1179 ccw->cmd_code = READ_FORWARD;
1183 ccw->flags = CCW_FLAG_CC; 1180 ccw->count = TAPEBLOCK_HSEC_SIZE;
1184 ccw->cmd_code = READ_FORWARD; 1181 set_normalized_cda(ccw, (void*) __pa(dst));
1185 ccw->count = TAPEBLOCK_HSEC_SIZE; 1182 ccw++;
1186 set_normalized_cda(ccw, (void*) __pa(dst)); 1183 dst += TAPEBLOCK_HSEC_SIZE;
1187 ccw++;
1188 dst += TAPEBLOCK_HSEC_SIZE;
1189 }
1190 } 1184 }
1191 } 1185 }
1192 1186
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 7e2b2ab49264..9f244c591eeb 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -623,21 +623,19 @@ tape_3590_bread(struct tape_device *device, struct request *req)
623{ 623{
624 struct tape_request *request; 624 struct tape_request *request;
625 struct ccw1 *ccw; 625 struct ccw1 *ccw;
626 int count = 0, start_block, i; 626 int count = 0, start_block;
627 unsigned off; 627 unsigned off;
628 char *dst; 628 char *dst;
629 struct bio_vec *bv; 629 struct bio_vec *bv;
630 struct bio *bio; 630 struct req_iterator iter;
631 631
632 DBF_EVENT(6, "xBREDid:"); 632 DBF_EVENT(6, "xBREDid:");
633 start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 633 start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
634 DBF_EVENT(6, "start_block = %i\n", start_block); 634 DBF_EVENT(6, "start_block = %i\n", start_block);
635 635
636 rq_for_each_bio(bio, req) { 636 rq_for_each_segment(bv, req, iter)
637 bio_for_each_segment(bv, bio, i) { 637 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
638 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); 638
639 }
640 }
641 request = tape_alloc_request(2 + count + 1, 4); 639 request = tape_alloc_request(2 + count + 1, 4);
642 if (IS_ERR(request)) 640 if (IS_ERR(request))
643 return request; 641 return request;
@@ -653,21 +651,18 @@ tape_3590_bread(struct tape_device *device, struct request *req)
653 */ 651 */
654 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 652 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
655 653
656 rq_for_each_bio(bio, req) { 654 rq_for_each_segment(bv, req, iter) {
657 bio_for_each_segment(bv, bio, i) { 655 dst = page_address(bv->bv_page) + bv->bv_offset;
658 dst = page_address(bv->bv_page) + bv->bv_offset; 656 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
659 for (off = 0; off < bv->bv_len; 657 ccw->flags = CCW_FLAG_CC;
660 off += TAPEBLOCK_HSEC_SIZE) { 658 ccw->cmd_code = READ_FORWARD;
661 ccw->flags = CCW_FLAG_CC; 659 ccw->count = TAPEBLOCK_HSEC_SIZE;
662 ccw->cmd_code = READ_FORWARD; 660 set_normalized_cda(ccw, (void *) __pa(dst));
663 ccw->count = TAPEBLOCK_HSEC_SIZE; 661 ccw++;
664 set_normalized_cda(ccw, (void *) __pa(dst)); 662 dst += TAPEBLOCK_HSEC_SIZE;
665 ccw++;
666 dst += TAPEBLOCK_HSEC_SIZE;
667 }
668 if (off > bv->bv_len)
669 BUG();
670 } 663 }
664 if (off > bv->bv_len)
665 BUG();
671 } 666 }
672 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 667 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
673 DBF_EVENT(6, "xBREDccwg\n"); 668 DBF_EVENT(6, "xBREDccwg\n");