aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorStefan Weinhuber <wein@de.ibm.com>2013-08-16 09:57:32 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-08-22 06:20:09 -0400
commit558b9ef00e128b2c97df6e4243152af3d0da3653 (patch)
treef017c9b3613ea9779bc23371731e217ceac2676a /drivers/s390
parent5c474a1e2265c5156e6c63f87a7e99053039b8b9 (diff)
s390/dasd: enable raw_track_access reads without direct I/O
The ECKD protocol supports reading of tracks with arbitrary format as raw track images. The DASD device driver supports this in its raw_track_access mode. In this mode it maps each track to sixteen 4096 byte sectors and rejects all requests that are not properly aligned to this mapping. An application that wants to use a DASD in raw_track_access mode will usually use direct I/O to make sure that properly aligned requests are directly submitted to the driver. However, applications that are not aware of this mode, e.g. udev, will encounter I/O errors. To make the use without direct I/O possible and avoid this kind of alignment errors, we now pad unaligned read requests with a dummy page, so that we can always read full tracks. Please note that writing is still only possible for full track images that are properly aligned. Signed-off-by: Stefan Weinhuber <wein@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd_eckd.c54
1 files changed, 43 insertions, 11 deletions
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index e61a6deea3c0..5adb2042e824 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -85,6 +85,8 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
85 85
86static struct ccw_driver dasd_eckd_driver; /* see below */ 86static struct ccw_driver dasd_eckd_driver; /* see below */
87 87
88static void *rawpadpage;
89
88#define INIT_CQR_OK 0 90#define INIT_CQR_OK 0
89#define INIT_CQR_UNFORMATTED 1 91#define INIT_CQR_UNFORMATTED 1
90#define INIT_CQR_ERROR 2 92#define INIT_CQR_ERROR 2
@@ -3237,18 +3239,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3237 unsigned int seg_len, len_to_track_end; 3239 unsigned int seg_len, len_to_track_end;
3238 unsigned int first_offs; 3240 unsigned int first_offs;
3239 unsigned int cidaw, cplength, datasize; 3241 unsigned int cidaw, cplength, datasize;
3240 sector_t first_trk, last_trk; 3242 sector_t first_trk, last_trk, sectors;
3243 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
3241 unsigned int pfx_datasize; 3244 unsigned int pfx_datasize;
3242 3245
3243 /* 3246 /*
3244 * raw track access needs to be mutiple of 64k and on 64k boundary 3247 * raw track access needs to be mutiple of 64k and on 64k boundary
3248 * For read requests we can fix an incorrect alignment by padding
3249 * the request with dummy pages.
3245 */ 3250 */
3246 if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) { 3251 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
3247 cqr = ERR_PTR(-EINVAL); 3252 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
3248 goto out; 3253 DASD_RAW_SECTORS_PER_TRACK;
3249 } 3254 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
3250 if (((blk_rq_pos(req) + blk_rq_sectors(req)) % 3255 DASD_RAW_SECTORS_PER_TRACK;
3251 DASD_RAW_SECTORS_PER_TRACK) != 0) { 3256 basedev = block->base;
3257 if ((start_padding_sectors || end_padding_sectors) &&
3258 (rq_data_dir(req) == WRITE)) {
3259 DBF_DEV_EVENT(DBF_ERR, basedev,
3260 "raw write not track aligned (%lu,%lu) req %p",
3261 start_padding_sectors, end_padding_sectors, req);
3252 cqr = ERR_PTR(-EINVAL); 3262 cqr = ERR_PTR(-EINVAL);
3253 goto out; 3263 goto out;
3254 } 3264 }
@@ -3258,7 +3268,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3258 DASD_RAW_SECTORS_PER_TRACK; 3268 DASD_RAW_SECTORS_PER_TRACK;
3259 trkcount = last_trk - first_trk + 1; 3269 trkcount = last_trk - first_trk + 1;
3260 first_offs = 0; 3270 first_offs = 0;
3261 basedev = block->base;
3262 3271
3263 if (rq_data_dir(req) == READ) 3272 if (rq_data_dir(req) == READ)
3264 cmd = DASD_ECKD_CCW_READ_TRACK; 3273 cmd = DASD_ECKD_CCW_READ_TRACK;
@@ -3307,12 +3316,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3307 } 3316 }
3308 3317
3309 idaws = (unsigned long *)(cqr->data + pfx_datasize); 3318 idaws = (unsigned long *)(cqr->data + pfx_datasize);
3310
3311 len_to_track_end = 0; 3319 len_to_track_end = 0;
3312 3320 if (start_padding_sectors) {
3321 ccw[-1].flags |= CCW_FLAG_CC;
3322 ccw->cmd_code = cmd;
3323 /* maximum 3390 track size */
3324 ccw->count = 57326;
3325 /* 64k map to one track */
3326 len_to_track_end = 65536 - start_padding_sectors * 512;
3327 ccw->cda = (__u32)(addr_t)idaws;
3328 ccw->flags |= CCW_FLAG_IDA;
3329 ccw->flags |= CCW_FLAG_SLI;
3330 ccw++;
3331 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
3332 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
3333 }
3313 rq_for_each_segment(bv, req, iter) { 3334 rq_for_each_segment(bv, req, iter) {
3314 dst = page_address(bv->bv_page) + bv->bv_offset; 3335 dst = page_address(bv->bv_page) + bv->bv_offset;
3315 seg_len = bv->bv_len; 3336 seg_len = bv->bv_len;
3337 if (cmd == DASD_ECKD_CCW_READ_TRACK)
3338 memset(dst, 0, seg_len);
3316 if (!len_to_track_end) { 3339 if (!len_to_track_end) {
3317 ccw[-1].flags |= CCW_FLAG_CC; 3340 ccw[-1].flags |= CCW_FLAG_CC;
3318 ccw->cmd_code = cmd; 3341 ccw->cmd_code = cmd;
@@ -3328,7 +3351,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3328 len_to_track_end -= seg_len; 3351 len_to_track_end -= seg_len;
3329 idaws = idal_create_words(idaws, dst, seg_len); 3352 idaws = idal_create_words(idaws, dst, seg_len);
3330 } 3353 }
3331 3354 for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
3355 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
3332 if (blk_noretry_request(req) || 3356 if (blk_noretry_request(req) ||
3333 block->base->features & DASD_FEATURE_FAILFAST) 3357 block->base->features & DASD_FEATURE_FAILFAST)
3334 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3358 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
@@ -4479,12 +4503,19 @@ dasd_eckd_init(void)
4479 kfree(dasd_reserve_req); 4503 kfree(dasd_reserve_req);
4480 return -ENOMEM; 4504 return -ENOMEM;
4481 } 4505 }
4506 rawpadpage = (void *)__get_free_page(GFP_KERNEL);
4507 if (!rawpadpage) {
4508 kfree(path_verification_worker);
4509 kfree(dasd_reserve_req);
4510 return -ENOMEM;
4511 }
4482 ret = ccw_driver_register(&dasd_eckd_driver); 4512 ret = ccw_driver_register(&dasd_eckd_driver);
4483 if (!ret) 4513 if (!ret)
4484 wait_for_device_probe(); 4514 wait_for_device_probe();
4485 else { 4515 else {
4486 kfree(path_verification_worker); 4516 kfree(path_verification_worker);
4487 kfree(dasd_reserve_req); 4517 kfree(dasd_reserve_req);
4518 free_page((unsigned long)rawpadpage);
4488 } 4519 }
4489 return ret; 4520 return ret;
4490} 4521}
@@ -4495,6 +4526,7 @@ dasd_eckd_cleanup(void)
4495 ccw_driver_unregister(&dasd_eckd_driver); 4526 ccw_driver_unregister(&dasd_eckd_driver);
4496 kfree(path_verification_worker); 4527 kfree(path_verification_worker);
4497 kfree(dasd_reserve_req); 4528 kfree(dasd_reserve_req);
4529 free_page((unsigned long)rawpadpage);
4498} 4530}
4499 4531
4500module_init(dasd_eckd_init); 4532module_init(dasd_eckd_init);