aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/device_pgid.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/device_pgid.c')
-rw-r--r--drivers/s390/cio/device_pgid.c123
1 files changed, 105 insertions, 18 deletions
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 908d287f66c1..37ada05e82a5 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -23,6 +23,8 @@
23#define PGID_RETRIES 256 23#define PGID_RETRIES 256
24#define PGID_TIMEOUT (10 * HZ) 24#define PGID_TIMEOUT (10 * HZ)
25 25
26static void verify_start(struct ccw_device *cdev);
27
26/* 28/*
27 * Process path verification data and report result. 29 * Process path verification data and report result.
28 */ 30 */
@@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev)
70 struct subchannel *sch = to_subchannel(cdev->dev.parent); 72 struct subchannel *sch = to_subchannel(cdev->dev.parent);
71 struct ccw_request *req = &cdev->private->req; 73 struct ccw_request *req = &cdev->private->req;
72 74
73 /* Adjust lpm. */ 75 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
74 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm); 76 ~cdev->private->path_noirq_mask);
75 if (!req->lpm) 77 if (!req->lpm)
76 goto out_nopath; 78 goto out_nopath;
77 nop_build_cp(cdev); 79 nop_build_cp(cdev);
@@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc)
102 struct subchannel *sch = to_subchannel(cdev->dev.parent); 104 struct subchannel *sch = to_subchannel(cdev->dev.parent);
103 struct ccw_request *req = &cdev->private->req; 105 struct ccw_request *req = &cdev->private->req;
104 106
105 if (rc == 0) 107 switch (rc) {
108 case 0:
106 sch->vpm |= req->lpm; 109 sch->vpm |= req->lpm;
107 else if (rc != -EACCES) 110 break;
111 case -ETIME:
112 cdev->private->path_noirq_mask |= req->lpm;
113 break;
114 case -EACCES:
115 cdev->private->path_notoper_mask |= req->lpm;
116 break;
117 default:
108 goto err; 118 goto err;
119 }
120 /* Continue on the next path. */
109 req->lpm >>= 1; 121 req->lpm >>= 1;
110 nop_do(cdev); 122 nop_do(cdev);
111 return; 123 return;
@@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
132 req->cp = cp; 144 req->cp = cp;
133} 145}
134 146
147static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
148{
149 if (rc) {
150 /* We don't know the path groups' state. Abort. */
151 verify_done(cdev, rc);
152 return;
153 }
154 /*
155 * Path groups have been reset. Restart path verification but
156 * leave paths in path_noirq_mask out.
157 */
158 cdev->private->flags.pgid_unknown = 0;
159 verify_start(cdev);
160}
161
162/*
163 * Reset pathgroups and restart path verification, leave unusable paths out.
164 */
165static void pgid_wipeout_start(struct ccw_device *cdev)
166{
167 struct subchannel *sch = to_subchannel(cdev->dev.parent);
168 struct ccw_dev_id *id = &cdev->private->dev_id;
169 struct ccw_request *req = &cdev->private->req;
170 u8 fn;
171
172 CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
173 id->ssid, id->devno, cdev->private->pgid_valid_mask,
174 cdev->private->path_noirq_mask);
175
176 /* Initialize request data. */
177 memset(req, 0, sizeof(*req));
178 req->timeout = PGID_TIMEOUT;
179 req->maxretries = PGID_RETRIES;
180 req->lpm = sch->schib.pmcw.pam;
181 req->callback = pgid_wipeout_callback;
182 fn = SPID_FUNC_DISBAND;
183 if (cdev->private->flags.mpath)
184 fn |= SPID_FUNC_MULTI_PATH;
185 spid_build_cp(cdev, fn);
186 ccw_request_start(cdev);
187}
188
135/* 189/*
136 * Perform establish/resign SET PGID on a single path. 190 * Perform establish/resign SET PGID on a single path.
137 */ 191 */
@@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev)
157 return; 211 return;
158 212
159out_nopath: 213out_nopath:
214 if (cdev->private->flags.pgid_unknown) {
215 /* At least one SPID could be partially done. */
216 pgid_wipeout_start(cdev);
217 return;
218 }
160 verify_done(cdev, sch->vpm ? 0 : -EACCES); 219 verify_done(cdev, sch->vpm ? 0 : -EACCES);
161} 220}
162 221
163static void verify_start(struct ccw_device *cdev);
164
165/* 222/*
166 * Process SET PGID request result for a single path. 223 * Process SET PGID request result for a single path.
167 */ 224 */
@@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc)
174 case 0: 231 case 0:
175 sch->vpm |= req->lpm & sch->opm; 232 sch->vpm |= req->lpm & sch->opm;
176 break; 233 break;
234 case -ETIME:
235 cdev->private->flags.pgid_unknown = 1;
236 cdev->private->path_noirq_mask |= req->lpm;
237 break;
177 case -EACCES: 238 case -EACCES:
239 cdev->private->path_notoper_mask |= req->lpm;
178 break; 240 break;
179 case -EOPNOTSUPP: 241 case -EOPNOTSUPP:
180 if (cdev->private->flags.mpath) { 242 if (cdev->private->flags.mpath) {
@@ -330,8 +392,9 @@ static void snid_done(struct ccw_device *cdev, int rc)
330 else { 392 else {
331 donepm = pgid_to_donepm(cdev); 393 donepm = pgid_to_donepm(cdev);
332 sch->vpm = donepm & sch->opm; 394 sch->vpm = donepm & sch->opm;
333 cdev->private->pgid_todo_mask &= ~donepm;
334 cdev->private->pgid_reset_mask |= reset; 395 cdev->private->pgid_reset_mask |= reset;
396 cdev->private->pgid_todo_mask &=
397 ~(donepm | cdev->private->path_noirq_mask);
335 pgid_fill(cdev, pgid); 398 pgid_fill(cdev, pgid);
336 } 399 }
337out: 400out:
@@ -341,6 +404,10 @@ out:
341 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 404 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
342 switch (rc) { 405 switch (rc) {
343 case 0: 406 case 0:
407 if (cdev->private->flags.pgid_unknown) {
408 pgid_wipeout_start(cdev);
409 return;
410 }
344 /* Anything left to do? */ 411 /* Anything left to do? */
345 if (cdev->private->pgid_todo_mask == 0) { 412 if (cdev->private->pgid_todo_mask == 0) {
346 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); 413 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
@@ -384,9 +451,10 @@ static void snid_do(struct ccw_device *cdev)
384{ 451{
385 struct subchannel *sch = to_subchannel(cdev->dev.parent); 452 struct subchannel *sch = to_subchannel(cdev->dev.parent);
386 struct ccw_request *req = &cdev->private->req; 453 struct ccw_request *req = &cdev->private->req;
454 int ret;
387 455
388 /* Adjust lpm if paths are not set in pam. */ 456 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
389 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam); 457 ~cdev->private->path_noirq_mask);
390 if (!req->lpm) 458 if (!req->lpm)
391 goto out_nopath; 459 goto out_nopath;
392 snid_build_cp(cdev); 460 snid_build_cp(cdev);
@@ -394,7 +462,13 @@ static void snid_do(struct ccw_device *cdev)
394 return; 462 return;
395 463
396out_nopath: 464out_nopath:
397 snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES); 465 if (cdev->private->pgid_valid_mask)
466 ret = 0;
467 else if (cdev->private->path_noirq_mask)
468 ret = -ETIME;
469 else
470 ret = -EACCES;
471 snid_done(cdev, ret);
398} 472}
399 473
400/* 474/*
@@ -404,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc)
404{ 478{
405 struct ccw_request *req = &cdev->private->req; 479 struct ccw_request *req = &cdev->private->req;
406 480
407 if (rc == 0) 481 switch (rc) {
482 case 0:
408 cdev->private->pgid_valid_mask |= req->lpm; 483 cdev->private->pgid_valid_mask |= req->lpm;
409 else if (rc != -EACCES) 484 break;
485 case -ETIME:
486 cdev->private->flags.pgid_unknown = 1;
487 cdev->private->path_noirq_mask |= req->lpm;
488 break;
489 case -EACCES:
490 cdev->private->path_notoper_mask |= req->lpm;
491 break;
492 default:
410 goto err; 493 goto err;
494 }
495 /* Continue on the next path. */
411 req->lpm >>= 1; 496 req->lpm >>= 1;
412 snid_do(cdev); 497 snid_do(cdev);
413 return; 498 return;
@@ -427,6 +512,13 @@ static void verify_start(struct ccw_device *cdev)
427 512
428 sch->vpm = 0; 513 sch->vpm = 0;
429 sch->lpm = sch->schib.pmcw.pam; 514 sch->lpm = sch->schib.pmcw.pam;
515
516 /* Initialize PGID data. */
517 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
518 cdev->private->pgid_valid_mask = 0;
519 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
520 cdev->private->path_notoper_mask = 0;
521
430 /* Initialize request data. */ 522 /* Initialize request data. */
431 memset(req, 0, sizeof(*req)); 523 memset(req, 0, sizeof(*req));
432 req->timeout = PGID_TIMEOUT; 524 req->timeout = PGID_TIMEOUT;
@@ -459,14 +551,8 @@ static void verify_start(struct ccw_device *cdev)
459 */ 551 */
460void ccw_device_verify_start(struct ccw_device *cdev) 552void ccw_device_verify_start(struct ccw_device *cdev)
461{ 553{
462 struct subchannel *sch = to_subchannel(cdev->dev.parent);
463
464 CIO_TRACE_EVENT(4, "vrfy"); 554 CIO_TRACE_EVENT(4, "vrfy");
465 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 555 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
466 /* Initialize PGID data. */
467 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
468 cdev->private->pgid_valid_mask = 0;
469 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
470 /* 556 /*
471 * Initialize pathgroup and multipath state with target values. 557 * Initialize pathgroup and multipath state with target values.
472 * They may change in the course of path verification. 558 * They may change in the course of path verification.
@@ -474,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev)
474 cdev->private->flags.pgroup = cdev->private->options.pgroup; 560 cdev->private->flags.pgroup = cdev->private->options.pgroup;
475 cdev->private->flags.mpath = cdev->private->options.mpath; 561 cdev->private->flags.mpath = cdev->private->options.mpath;
476 cdev->private->flags.doverify = 0; 562 cdev->private->flags.doverify = 0;
563 cdev->private->path_noirq_mask = 0;
477 verify_start(cdev); 564 verify_start(cdev);
478} 565}
479 566