diff options
Diffstat (limited to 'drivers/s390/cio/device_fsm.c')
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 456 |
1 files changed, 128 insertions, 328 deletions
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index b9613d7df9ef..c9b852647f01 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -45,7 +45,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) | |||
45 | sch = to_subchannel(cdev->dev.parent); | 45 | sch = to_subchannel(cdev->dev.parent); |
46 | private = to_io_private(sch); | 46 | private = to_io_private(sch); |
47 | orb = &private->orb; | 47 | orb = &private->orb; |
48 | cc = stsch(sch->schid, &schib); | 48 | cc = stsch_err(sch->schid, &schib); |
49 | 49 | ||
50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " | 50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " |
51 | "device information:\n", get_clock()); | 51 | "device information:\n", get_clock()); |
@@ -229,8 +229,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
229 | 229 | ||
230 | sch = to_subchannel(cdev->dev.parent); | 230 | sch = to_subchannel(cdev->dev.parent); |
231 | 231 | ||
232 | ccw_device_set_timeout(cdev, 0); | 232 | if (cio_disable_subchannel(sch)) |
233 | cio_disable_subchannel(sch); | 233 | state = DEV_STATE_NOT_OPER; |
234 | /* | 234 | /* |
235 | * Now that we tried recognition, we have performed device selection | 235 | * Now that we tried recognition, we have performed device selection |
236 | * through ssch() and the path information is up to date. | 236 | * through ssch() and the path information is up to date. |
@@ -263,22 +263,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
263 | } | 263 | } |
264 | switch (state) { | 264 | switch (state) { |
265 | case DEV_STATE_NOT_OPER: | 265 | case DEV_STATE_NOT_OPER: |
266 | CIO_MSG_EVENT(2, "SenseID : unknown device %04x on " | ||
267 | "subchannel 0.%x.%04x\n", | ||
268 | cdev->private->dev_id.devno, | ||
269 | sch->schid.ssid, sch->schid.sch_no); | ||
270 | break; | 266 | break; |
271 | case DEV_STATE_OFFLINE: | 267 | case DEV_STATE_OFFLINE: |
272 | if (!cdev->online) { | 268 | if (!cdev->online) { |
273 | ccw_device_update_sense_data(cdev); | 269 | ccw_device_update_sense_data(cdev); |
274 | /* Issue device info message. */ | ||
275 | CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: " | ||
276 | "CU Type/Mod = %04X/%02X, Dev Type/Mod " | ||
277 | "= %04X/%02X\n", | ||
278 | cdev->private->dev_id.ssid, | ||
279 | cdev->private->dev_id.devno, | ||
280 | cdev->id.cu_type, cdev->id.cu_model, | ||
281 | cdev->id.dev_type, cdev->id.dev_model); | ||
282 | break; | 270 | break; |
283 | } | 271 | } |
284 | cdev->private->state = DEV_STATE_OFFLINE; | 272 | cdev->private->state = DEV_STATE_OFFLINE; |
@@ -289,16 +277,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
289 | wake_up(&cdev->private->wait_q); | 277 | wake_up(&cdev->private->wait_q); |
290 | } else { | 278 | } else { |
291 | ccw_device_update_sense_data(cdev); | 279 | ccw_device_update_sense_data(cdev); |
292 | PREPARE_WORK(&cdev->private->kick_work, | 280 | ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); |
293 | ccw_device_do_unbind_bind); | ||
294 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
295 | } | 281 | } |
296 | return; | 282 | return; |
297 | case DEV_STATE_BOXED: | 283 | case DEV_STATE_BOXED: |
298 | CIO_MSG_EVENT(0, "SenseID : boxed device %04x on " | ||
299 | " subchannel 0.%x.%04x\n", | ||
300 | cdev->private->dev_id.devno, | ||
301 | sch->schid.ssid, sch->schid.sch_no); | ||
302 | if (cdev->id.cu_type != 0) { /* device was recognized before */ | 284 | if (cdev->id.cu_type != 0) { /* device was recognized before */ |
303 | cdev->private->flags.recog_done = 1; | 285 | cdev->private->flags.recog_done = 1; |
304 | cdev->private->state = DEV_STATE_BOXED; | 286 | cdev->private->state = DEV_STATE_BOXED; |
@@ -331,40 +313,50 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) | |||
331 | } | 313 | } |
332 | } | 314 | } |
333 | 315 | ||
316 | /** | ||
317 | * ccw_device_notify() - inform the device's driver about an event | ||
318 | * @cdev: device for which an event occured | ||
319 | * @event: event that occurred | ||
320 | * | ||
321 | * Returns: | ||
322 | * -%EINVAL if the device is offline or has no driver. | ||
323 | * -%EOPNOTSUPP if the device's driver has no notifier registered. | ||
324 | * %NOTIFY_OK if the driver wants to keep the device. | ||
325 | * %NOTIFY_BAD if the driver doesn't want to keep the device. | ||
326 | */ | ||
334 | int ccw_device_notify(struct ccw_device *cdev, int event) | 327 | int ccw_device_notify(struct ccw_device *cdev, int event) |
335 | { | 328 | { |
329 | int ret = -EINVAL; | ||
330 | |||
336 | if (!cdev->drv) | 331 | if (!cdev->drv) |
337 | return 0; | 332 | goto out; |
338 | if (!cdev->online) | 333 | if (!cdev->online) |
339 | return 0; | 334 | goto out; |
340 | CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", | 335 | CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", |
341 | cdev->private->dev_id.ssid, cdev->private->dev_id.devno, | 336 | cdev->private->dev_id.ssid, cdev->private->dev_id.devno, |
342 | event); | 337 | event); |
343 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | 338 | if (!cdev->drv->notify) { |
344 | } | 339 | ret = -EOPNOTSUPP; |
345 | 340 | goto out; | |
346 | static void cmf_reenable_delayed(struct work_struct *work) | 341 | } |
347 | { | 342 | if (cdev->drv->notify(cdev, event)) |
348 | struct ccw_device_private *priv; | 343 | ret = NOTIFY_OK; |
349 | struct ccw_device *cdev; | 344 | else |
350 | 345 | ret = NOTIFY_BAD; | |
351 | priv = container_of(work, struct ccw_device_private, kick_work); | 346 | out: |
352 | cdev = priv->cdev; | 347 | return ret; |
353 | cmf_reenable(cdev); | ||
354 | } | 348 | } |
355 | 349 | ||
356 | static void ccw_device_oper_notify(struct ccw_device *cdev) | 350 | static void ccw_device_oper_notify(struct ccw_device *cdev) |
357 | { | 351 | { |
358 | if (ccw_device_notify(cdev, CIO_OPER)) { | 352 | if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { |
359 | /* Reenable channel measurements, if needed. */ | 353 | /* Reenable channel measurements, if needed. */ |
360 | PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); | 354 | ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); |
361 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
362 | return; | 355 | return; |
363 | } | 356 | } |
364 | /* Driver doesn't want device back. */ | 357 | /* Driver doesn't want device back. */ |
365 | ccw_device_set_notoper(cdev); | 358 | ccw_device_set_notoper(cdev); |
366 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind); | 359 | ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); |
367 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
368 | } | 360 | } |
369 | 361 | ||
370 | /* | 362 | /* |
@@ -391,15 +383,16 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
391 | case DEV_STATE_BOXED: | 383 | case DEV_STATE_BOXED: |
392 | CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", | 384 | CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", |
393 | cdev->private->dev_id.devno, sch->schid.sch_no); | 385 | cdev->private->dev_id.devno, sch->schid.sch_no); |
394 | if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) | 386 | if (cdev->online && |
395 | ccw_device_schedule_sch_unregister(cdev); | 387 | ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) |
388 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | ||
396 | cdev->private->flags.donotify = 0; | 389 | cdev->private->flags.donotify = 0; |
397 | break; | 390 | break; |
398 | case DEV_STATE_NOT_OPER: | 391 | case DEV_STATE_NOT_OPER: |
399 | CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", | 392 | CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", |
400 | cdev->private->dev_id.devno, sch->schid.sch_no); | 393 | cdev->private->dev_id.devno, sch->schid.sch_no); |
401 | if (!ccw_device_notify(cdev, CIO_GONE)) | 394 | if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) |
402 | ccw_device_schedule_sch_unregister(cdev); | 395 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
403 | else | 396 | else |
404 | ccw_device_set_disconnected(cdev); | 397 | ccw_device_set_disconnected(cdev); |
405 | cdev->private->flags.donotify = 0; | 398 | cdev->private->flags.donotify = 0; |
@@ -408,8 +401,8 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
408 | CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " | 401 | CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " |
409 | "%04x\n", cdev->private->dev_id.devno, | 402 | "%04x\n", cdev->private->dev_id.devno, |
410 | sch->schid.sch_no); | 403 | sch->schid.sch_no); |
411 | if (!ccw_device_notify(cdev, CIO_NO_PATH)) | 404 | if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) |
412 | ccw_device_schedule_sch_unregister(cdev); | 405 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
413 | else | 406 | else |
414 | ccw_device_set_disconnected(cdev); | 407 | ccw_device_set_disconnected(cdev); |
415 | cdev->private->flags.donotify = 0; | 408 | cdev->private->flags.donotify = 0; |
@@ -425,107 +418,12 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
425 | wake_up(&cdev->private->wait_q); | 418 | wake_up(&cdev->private->wait_q); |
426 | } | 419 | } |
427 | 420 | ||
428 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) | ||
429 | { | ||
430 | char *c1; | ||
431 | char *c2; | ||
432 | |||
433 | c1 = (char *)p1; | ||
434 | c2 = (char *)p2; | ||
435 | |||
436 | return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1); | ||
437 | } | ||
438 | |||
439 | static void __ccw_device_get_common_pgid(struct ccw_device *cdev) | ||
440 | { | ||
441 | int i; | ||
442 | int last; | ||
443 | |||
444 | last = 0; | ||
445 | for (i = 0; i < 8; i++) { | ||
446 | if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET) | ||
447 | /* No PGID yet */ | ||
448 | continue; | ||
449 | if (cdev->private->pgid[last].inf.ps.state1 == | ||
450 | SNID_STATE1_RESET) { | ||
451 | /* First non-zero PGID */ | ||
452 | last = i; | ||
453 | continue; | ||
454 | } | ||
455 | if (cmp_pgid(&cdev->private->pgid[i], | ||
456 | &cdev->private->pgid[last]) == 0) | ||
457 | /* Non-conflicting PGIDs */ | ||
458 | continue; | ||
459 | |||
460 | /* PGID mismatch, can't pathgroup. */ | ||
461 | CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " | ||
462 | "0.%x.%04x, can't pathgroup\n", | ||
463 | cdev->private->dev_id.ssid, | ||
464 | cdev->private->dev_id.devno); | ||
465 | cdev->private->options.pgroup = 0; | ||
466 | return; | ||
467 | } | ||
468 | if (cdev->private->pgid[last].inf.ps.state1 == | ||
469 | SNID_STATE1_RESET) | ||
470 | /* No previous pgid found */ | ||
471 | memcpy(&cdev->private->pgid[0], | ||
472 | &channel_subsystems[0]->global_pgid, | ||
473 | sizeof(struct pgid)); | ||
474 | else | ||
475 | /* Use existing pgid */ | ||
476 | memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last], | ||
477 | sizeof(struct pgid)); | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * Function called from device_pgid.c after sense path ground has completed. | ||
482 | */ | ||
483 | void | ||
484 | ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) | ||
485 | { | ||
486 | struct subchannel *sch; | ||
487 | |||
488 | sch = to_subchannel(cdev->dev.parent); | ||
489 | switch (err) { | ||
490 | case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */ | ||
491 | cdev->private->options.pgroup = 0; | ||
492 | break; | ||
493 | case 0: /* success */ | ||
494 | case -EACCES: /* partial success, some paths not operational */ | ||
495 | /* Check if all pgids are equal or 0. */ | ||
496 | __ccw_device_get_common_pgid(cdev); | ||
497 | break; | ||
498 | case -ETIME: /* Sense path group id stopped by timeout. */ | ||
499 | case -EUSERS: /* device is reserved for someone else. */ | ||
500 | ccw_device_done(cdev, DEV_STATE_BOXED); | ||
501 | return; | ||
502 | default: | ||
503 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
504 | return; | ||
505 | } | ||
506 | /* Start Path Group verification. */ | ||
507 | cdev->private->state = DEV_STATE_VERIFY; | ||
508 | cdev->private->flags.doverify = 0; | ||
509 | ccw_device_verify_start(cdev); | ||
510 | } | ||
511 | |||
512 | /* | 421 | /* |
513 | * Start device recognition. | 422 | * Start device recognition. |
514 | */ | 423 | */ |
515 | int | 424 | void ccw_device_recognition(struct ccw_device *cdev) |
516 | ccw_device_recognition(struct ccw_device *cdev) | ||
517 | { | 425 | { |
518 | struct subchannel *sch; | 426 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
519 | int ret; | ||
520 | |||
521 | sch = to_subchannel(cdev->dev.parent); | ||
522 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); | ||
523 | if (ret != 0) | ||
524 | /* Couldn't enable the subchannel for i/o. Sick device. */ | ||
525 | return ret; | ||
526 | |||
527 | /* After 60s the device recognition is considered to have failed. */ | ||
528 | ccw_device_set_timeout(cdev, 60*HZ); | ||
529 | 427 | ||
530 | /* | 428 | /* |
531 | * We used to start here with a sense pgid to find out whether a device | 429 | * We used to start here with a sense pgid to find out whether a device |
@@ -537,32 +435,33 @@ ccw_device_recognition(struct ccw_device *cdev) | |||
537 | */ | 435 | */ |
538 | cdev->private->flags.recog_done = 0; | 436 | cdev->private->flags.recog_done = 0; |
539 | cdev->private->state = DEV_STATE_SENSE_ID; | 437 | cdev->private->state = DEV_STATE_SENSE_ID; |
438 | if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) { | ||
439 | ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); | ||
440 | return; | ||
441 | } | ||
540 | ccw_device_sense_id_start(cdev); | 442 | ccw_device_sense_id_start(cdev); |
541 | return 0; | ||
542 | } | 443 | } |
543 | 444 | ||
544 | /* | 445 | /* |
545 | * Handle timeout in device recognition. | 446 | * Handle events for states that use the ccw request infrastructure. |
546 | */ | 447 | */ |
547 | static void | 448 | static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e) |
548 | ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
549 | { | 449 | { |
550 | int ret; | 450 | switch (e) { |
551 | 451 | case DEV_EVENT_NOTOPER: | |
552 | ret = ccw_device_cancel_halt_clear(cdev); | 452 | ccw_request_notoper(cdev); |
553 | switch (ret) { | ||
554 | case 0: | ||
555 | ccw_device_recog_done(cdev, DEV_STATE_BOXED); | ||
556 | break; | 453 | break; |
557 | case -ENODEV: | 454 | case DEV_EVENT_INTERRUPT: |
558 | ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); | 455 | ccw_request_handler(cdev); |
456 | break; | ||
457 | case DEV_EVENT_TIMEOUT: | ||
458 | ccw_request_timeout(cdev); | ||
559 | break; | 459 | break; |
560 | default: | 460 | default: |
561 | ccw_device_set_timeout(cdev, 3*HZ); | 461 | break; |
562 | } | 462 | } |
563 | } | 463 | } |
564 | 464 | ||
565 | |||
566 | void | 465 | void |
567 | ccw_device_verify_done(struct ccw_device *cdev, int err) | 466 | ccw_device_verify_done(struct ccw_device *cdev, int err) |
568 | { | 467 | { |
@@ -571,21 +470,18 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
571 | sch = to_subchannel(cdev->dev.parent); | 470 | sch = to_subchannel(cdev->dev.parent); |
572 | /* Update schib - pom may have changed. */ | 471 | /* Update schib - pom may have changed. */ |
573 | if (cio_update_schib(sch)) { | 472 | if (cio_update_schib(sch)) { |
574 | cdev->private->flags.donotify = 0; | 473 | err = -ENODEV; |
575 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | 474 | goto callback; |
576 | return; | ||
577 | } | 475 | } |
578 | /* Update lpm with verified path mask. */ | 476 | /* Update lpm with verified path mask. */ |
579 | sch->lpm = sch->vpm; | 477 | sch->lpm = sch->vpm; |
580 | /* Repeat path verification? */ | 478 | /* Repeat path verification? */ |
581 | if (cdev->private->flags.doverify) { | 479 | if (cdev->private->flags.doverify) { |
582 | cdev->private->flags.doverify = 0; | ||
583 | ccw_device_verify_start(cdev); | 480 | ccw_device_verify_start(cdev); |
584 | return; | 481 | return; |
585 | } | 482 | } |
483 | callback: | ||
586 | switch (err) { | 484 | switch (err) { |
587 | case -EOPNOTSUPP: /* path grouping not supported, just set online. */ | ||
588 | cdev->private->options.pgroup = 0; | ||
589 | case 0: | 485 | case 0: |
590 | ccw_device_done(cdev, DEV_STATE_ONLINE); | 486 | ccw_device_done(cdev, DEV_STATE_ONLINE); |
591 | /* Deliver fake irb to device driver, if needed. */ | 487 | /* Deliver fake irb to device driver, if needed. */ |
@@ -604,18 +500,20 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
604 | } | 500 | } |
605 | break; | 501 | break; |
606 | case -ETIME: | 502 | case -ETIME: |
503 | case -EUSERS: | ||
607 | /* Reset oper notify indication after verify error. */ | 504 | /* Reset oper notify indication after verify error. */ |
608 | cdev->private->flags.donotify = 0; | 505 | cdev->private->flags.donotify = 0; |
609 | ccw_device_done(cdev, DEV_STATE_BOXED); | 506 | ccw_device_done(cdev, DEV_STATE_BOXED); |
610 | break; | 507 | break; |
508 | case -EACCES: | ||
509 | /* Reset oper notify indication after verify error. */ | ||
510 | cdev->private->flags.donotify = 0; | ||
511 | ccw_device_done(cdev, DEV_STATE_DISCONNECTED); | ||
512 | break; | ||
611 | default: | 513 | default: |
612 | /* Reset oper notify indication after verify error. */ | 514 | /* Reset oper notify indication after verify error. */ |
613 | cdev->private->flags.donotify = 0; | 515 | cdev->private->flags.donotify = 0; |
614 | if (cdev->online) { | 516 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); |
615 | ccw_device_set_timeout(cdev, 0); | ||
616 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
617 | } else | ||
618 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
619 | break; | 517 | break; |
620 | } | 518 | } |
621 | } | 519 | } |
@@ -640,17 +538,9 @@ ccw_device_online(struct ccw_device *cdev) | |||
640 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | 538 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
641 | return ret; | 539 | return ret; |
642 | } | 540 | } |
643 | /* Do we want to do path grouping? */ | 541 | /* Start initial path verification. */ |
644 | if (!cdev->private->options.pgroup) { | 542 | cdev->private->state = DEV_STATE_VERIFY; |
645 | /* Start initial path verification. */ | 543 | ccw_device_verify_start(cdev); |
646 | cdev->private->state = DEV_STATE_VERIFY; | ||
647 | cdev->private->flags.doverify = 0; | ||
648 | ccw_device_verify_start(cdev); | ||
649 | return 0; | ||
650 | } | ||
651 | /* Do a SensePGID first. */ | ||
652 | cdev->private->state = DEV_STATE_SENSE_PGID; | ||
653 | ccw_device_sense_pgid_start(cdev); | ||
654 | return 0; | 544 | return 0; |
655 | } | 545 | } |
656 | 546 | ||
@@ -666,7 +556,6 @@ ccw_device_disband_done(struct ccw_device *cdev, int err) | |||
666 | break; | 556 | break; |
667 | default: | 557 | default: |
668 | cdev->private->flags.donotify = 0; | 558 | cdev->private->flags.donotify = 0; |
669 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
670 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | 559 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); |
671 | break; | 560 | break; |
672 | } | 561 | } |
@@ -703,7 +592,7 @@ ccw_device_offline(struct ccw_device *cdev) | |||
703 | if (cdev->private->state != DEV_STATE_ONLINE) | 592 | if (cdev->private->state != DEV_STATE_ONLINE) |
704 | return -EINVAL; | 593 | return -EINVAL; |
705 | /* Are we doing path grouping? */ | 594 | /* Are we doing path grouping? */ |
706 | if (!cdev->private->options.pgroup) { | 595 | if (!cdev->private->flags.pgroup) { |
707 | /* No, set state offline immediately. */ | 596 | /* No, set state offline immediately. */ |
708 | ccw_device_done(cdev, DEV_STATE_OFFLINE); | 597 | ccw_device_done(cdev, DEV_STATE_OFFLINE); |
709 | return 0; | 598 | return 0; |
@@ -715,43 +604,13 @@ ccw_device_offline(struct ccw_device *cdev) | |||
715 | } | 604 | } |
716 | 605 | ||
717 | /* | 606 | /* |
718 | * Handle timeout in device online/offline process. | ||
719 | */ | ||
720 | static void | ||
721 | ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
722 | { | ||
723 | int ret; | ||
724 | |||
725 | ret = ccw_device_cancel_halt_clear(cdev); | ||
726 | switch (ret) { | ||
727 | case 0: | ||
728 | ccw_device_done(cdev, DEV_STATE_BOXED); | ||
729 | break; | ||
730 | case -ENODEV: | ||
731 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
732 | break; | ||
733 | default: | ||
734 | ccw_device_set_timeout(cdev, 3*HZ); | ||
735 | } | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * Handle not oper event in device recognition. | ||
740 | */ | ||
741 | static void | ||
742 | ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event) | ||
743 | { | ||
744 | ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); | ||
745 | } | ||
746 | |||
747 | /* | ||
748 | * Handle not operational event in non-special state. | 607 | * Handle not operational event in non-special state. |
749 | */ | 608 | */ |
750 | static void ccw_device_generic_notoper(struct ccw_device *cdev, | 609 | static void ccw_device_generic_notoper(struct ccw_device *cdev, |
751 | enum dev_event dev_event) | 610 | enum dev_event dev_event) |
752 | { | 611 | { |
753 | if (!ccw_device_notify(cdev, CIO_GONE)) | 612 | if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) |
754 | ccw_device_schedule_sch_unregister(cdev); | 613 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
755 | else | 614 | else |
756 | ccw_device_set_disconnected(cdev); | 615 | ccw_device_set_disconnected(cdev); |
757 | } | 616 | } |
@@ -802,11 +661,27 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
802 | } | 661 | } |
803 | /* Device is idle, we can do the path verification. */ | 662 | /* Device is idle, we can do the path verification. */ |
804 | cdev->private->state = DEV_STATE_VERIFY; | 663 | cdev->private->state = DEV_STATE_VERIFY; |
805 | cdev->private->flags.doverify = 0; | ||
806 | ccw_device_verify_start(cdev); | 664 | ccw_device_verify_start(cdev); |
807 | } | 665 | } |
808 | 666 | ||
809 | /* | 667 | /* |
668 | * Handle path verification event in boxed state. | ||
669 | */ | ||
670 | static void ccw_device_boxed_verify(struct ccw_device *cdev, | ||
671 | enum dev_event dev_event) | ||
672 | { | ||
673 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
674 | |||
675 | if (cdev->online) { | ||
676 | if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) | ||
677 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
678 | else | ||
679 | ccw_device_online_verify(cdev, dev_event); | ||
680 | } else | ||
681 | css_schedule_eval(sch->schid); | ||
682 | } | ||
683 | |||
684 | /* | ||
810 | * Got an interrupt for a normal io (state online). | 685 | * Got an interrupt for a normal io (state online). |
811 | */ | 686 | */ |
812 | static void | 687 | static void |
@@ -815,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
815 | struct irb *irb; | 690 | struct irb *irb; |
816 | int is_cmd; | 691 | int is_cmd; |
817 | 692 | ||
818 | irb = (struct irb *) __LC_IRB; | 693 | irb = (struct irb *)&S390_lowcore.irb; |
819 | is_cmd = !scsw_is_tm(&irb->scsw); | 694 | is_cmd = !scsw_is_tm(&irb->scsw); |
820 | /* Check for unsolicited interrupt. */ | 695 | /* Check for unsolicited interrupt. */ |
821 | if (!scsw_is_solicited(&irb->scsw)) { | 696 | if (!scsw_is_solicited(&irb->scsw)) { |
@@ -880,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
880 | { | 755 | { |
881 | struct irb *irb; | 756 | struct irb *irb; |
882 | 757 | ||
883 | irb = (struct irb *) __LC_IRB; | 758 | irb = (struct irb *)&S390_lowcore.irb; |
884 | /* Check for unsolicited interrupt. */ | 759 | /* Check for unsolicited interrupt. */ |
885 | if (scsw_stctl(&irb->scsw) == | 760 | if (scsw_stctl(&irb->scsw) == |
886 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 761 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
@@ -904,12 +779,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
904 | */ | 779 | */ |
905 | if (scsw_fctl(&irb->scsw) & | 780 | if (scsw_fctl(&irb->scsw) & |
906 | (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { | 781 | (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { |
907 | /* Retry Basic Sense if requested. */ | ||
908 | if (cdev->private->flags.intretry) { | ||
909 | cdev->private->flags.intretry = 0; | ||
910 | ccw_device_do_sense(cdev, irb); | ||
911 | return; | ||
912 | } | ||
913 | cdev->private->flags.dosense = 0; | 782 | cdev->private->flags.dosense = 0; |
914 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 783 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
915 | ccw_device_accumulate_irb(cdev, irb); | 784 | ccw_device_accumulate_irb(cdev, irb); |
@@ -933,21 +802,6 @@ call_handler: | |||
933 | } | 802 | } |
934 | 803 | ||
935 | static void | 804 | static void |
936 | ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event) | ||
937 | { | ||
938 | struct irb *irb; | ||
939 | |||
940 | irb = (struct irb *) __LC_IRB; | ||
941 | /* Accumulate status. We don't do basic sense. */ | ||
942 | ccw_device_accumulate_irb(cdev, irb); | ||
943 | /* Remember to clear irb to avoid residuals. */ | ||
944 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
945 | /* Try to start delayed device verification. */ | ||
946 | ccw_device_online_verify(cdev, 0); | ||
947 | /* Note: Don't call handler for cio initiated clear! */ | ||
948 | } | ||
949 | |||
950 | static void | ||
951 | ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) | 805 | ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) |
952 | { | 806 | { |
953 | struct subchannel *sch; | 807 | struct subchannel *sch; |
@@ -1004,32 +858,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
1004 | } | 858 | } |
1005 | 859 | ||
1006 | static void | 860 | static void |
1007 | ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) | ||
1008 | { | ||
1009 | struct irb *irb; | ||
1010 | |||
1011 | switch (dev_event) { | ||
1012 | case DEV_EVENT_INTERRUPT: | ||
1013 | irb = (struct irb *) __LC_IRB; | ||
1014 | /* Check for unsolicited interrupt. */ | ||
1015 | if ((scsw_stctl(&irb->scsw) == | ||
1016 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && | ||
1017 | (!scsw_cc(&irb->scsw))) | ||
1018 | /* FIXME: we should restart stlck here, but this | ||
1019 | * is extremely unlikely ... */ | ||
1020 | goto out_wakeup; | ||
1021 | |||
1022 | ccw_device_accumulate_irb(cdev, irb); | ||
1023 | /* We don't care about basic sense etc. */ | ||
1024 | break; | ||
1025 | default: /* timeout */ | ||
1026 | break; | ||
1027 | } | ||
1028 | out_wakeup: | ||
1029 | wake_up(&cdev->private->wait_q); | ||
1030 | } | ||
1031 | |||
1032 | static void | ||
1033 | ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) | 861 | ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) |
1034 | { | 862 | { |
1035 | struct subchannel *sch; | 863 | struct subchannel *sch; |
@@ -1038,10 +866,6 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) | |||
1038 | if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) | 866 | if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) |
1039 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 867 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
1040 | return; | 868 | return; |
1041 | |||
1042 | /* After 60s the device recognition is considered to have failed. */ | ||
1043 | ccw_device_set_timeout(cdev, 60*HZ); | ||
1044 | |||
1045 | cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; | 869 | cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; |
1046 | ccw_device_sense_id_start(cdev); | 870 | ccw_device_sense_id_start(cdev); |
1047 | } | 871 | } |
@@ -1072,22 +896,20 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev) | |||
1072 | 896 | ||
1073 | /* We should also udate ssd info, but this has to wait. */ | 897 | /* We should also udate ssd info, but this has to wait. */ |
1074 | /* Check if this is another device which appeared on the same sch. */ | 898 | /* Check if this is another device which appeared on the same sch. */ |
1075 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 899 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) |
1076 | PREPARE_WORK(&cdev->private->kick_work, | 900 | css_schedule_eval(sch->schid); |
1077 | ccw_device_move_to_orphanage); | 901 | else |
1078 | queue_work(slow_path_wq, &cdev->private->kick_work); | ||
1079 | } else | ||
1080 | ccw_device_start_id(cdev, 0); | 902 | ccw_device_start_id(cdev, 0); |
1081 | } | 903 | } |
1082 | 904 | ||
1083 | static void | 905 | static void ccw_device_disabled_irq(struct ccw_device *cdev, |
1084 | ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) | 906 | enum dev_event dev_event) |
1085 | { | 907 | { |
1086 | struct subchannel *sch; | 908 | struct subchannel *sch; |
1087 | 909 | ||
1088 | sch = to_subchannel(cdev->dev.parent); | 910 | sch = to_subchannel(cdev->dev.parent); |
1089 | /* | 911 | /* |
1090 | * An interrupt in state offline means a previous disable was not | 912 | * An interrupt in a disabled state means a previous disable was not |
1091 | * successful - should not happen, but we try to disable again. | 913 | * successful - should not happen, but we try to disable again. |
1092 | */ | 914 | */ |
1093 | cio_disable_subchannel(sch); | 915 | cio_disable_subchannel(sch); |
@@ -1113,10 +935,7 @@ static void | |||
1113 | ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) | 935 | ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) |
1114 | { | 936 | { |
1115 | ccw_device_set_timeout(cdev, 0); | 937 | ccw_device_set_timeout(cdev, 0); |
1116 | if (dev_event == DEV_EVENT_NOTOPER) | 938 | cdev->private->state = DEV_STATE_NOT_OPER; |
1117 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
1118 | else | ||
1119 | cdev->private->state = DEV_STATE_OFFLINE; | ||
1120 | wake_up(&cdev->private->wait_q); | 939 | wake_up(&cdev->private->wait_q); |
1121 | } | 940 | } |
1122 | 941 | ||
@@ -1126,17 +945,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
1126 | int ret; | 945 | int ret; |
1127 | 946 | ||
1128 | ret = ccw_device_cancel_halt_clear(cdev); | 947 | ret = ccw_device_cancel_halt_clear(cdev); |
1129 | switch (ret) { | 948 | if (ret == -EBUSY) { |
1130 | case 0: | 949 | ccw_device_set_timeout(cdev, HZ/10); |
1131 | cdev->private->state = DEV_STATE_OFFLINE; | 950 | } else { |
1132 | wake_up(&cdev->private->wait_q); | ||
1133 | break; | ||
1134 | case -ENODEV: | ||
1135 | cdev->private->state = DEV_STATE_NOT_OPER; | 951 | cdev->private->state = DEV_STATE_NOT_OPER; |
1136 | wake_up(&cdev->private->wait_q); | 952 | wake_up(&cdev->private->wait_q); |
1137 | break; | ||
1138 | default: | ||
1139 | ccw_device_set_timeout(cdev, HZ/10); | ||
1140 | } | 953 | } |
1141 | } | 954 | } |
1142 | 955 | ||
@@ -1150,50 +963,37 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) | |||
1150 | } | 963 | } |
1151 | 964 | ||
1152 | /* | 965 | /* |
1153 | * Bug operation action. | ||
1154 | */ | ||
1155 | static void | ||
1156 | ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) | ||
1157 | { | ||
1158 | CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device " | ||
1159 | "0.%x.%04x\n", cdev->private->state, dev_event, | ||
1160 | cdev->private->dev_id.ssid, | ||
1161 | cdev->private->dev_id.devno); | ||
1162 | BUG(); | ||
1163 | } | ||
1164 | |||
1165 | /* | ||
1166 | * device statemachine | 966 | * device statemachine |
1167 | */ | 967 | */ |
1168 | fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | 968 | fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { |
1169 | [DEV_STATE_NOT_OPER] = { | 969 | [DEV_STATE_NOT_OPER] = { |
1170 | [DEV_EVENT_NOTOPER] = ccw_device_nop, | 970 | [DEV_EVENT_NOTOPER] = ccw_device_nop, |
1171 | [DEV_EVENT_INTERRUPT] = ccw_device_bug, | 971 | [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, |
1172 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | 972 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, |
1173 | [DEV_EVENT_VERIFY] = ccw_device_nop, | 973 | [DEV_EVENT_VERIFY] = ccw_device_nop, |
1174 | }, | 974 | }, |
1175 | [DEV_STATE_SENSE_PGID] = { | 975 | [DEV_STATE_SENSE_PGID] = { |
1176 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | 976 | [DEV_EVENT_NOTOPER] = ccw_device_request_event, |
1177 | [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, | 977 | [DEV_EVENT_INTERRUPT] = ccw_device_request_event, |
1178 | [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, | 978 | [DEV_EVENT_TIMEOUT] = ccw_device_request_event, |
1179 | [DEV_EVENT_VERIFY] = ccw_device_nop, | 979 | [DEV_EVENT_VERIFY] = ccw_device_nop, |
1180 | }, | 980 | }, |
1181 | [DEV_STATE_SENSE_ID] = { | 981 | [DEV_STATE_SENSE_ID] = { |
1182 | [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, | 982 | [DEV_EVENT_NOTOPER] = ccw_device_request_event, |
1183 | [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, | 983 | [DEV_EVENT_INTERRUPT] = ccw_device_request_event, |
1184 | [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, | 984 | [DEV_EVENT_TIMEOUT] = ccw_device_request_event, |
1185 | [DEV_EVENT_VERIFY] = ccw_device_nop, | 985 | [DEV_EVENT_VERIFY] = ccw_device_nop, |
1186 | }, | 986 | }, |
1187 | [DEV_STATE_OFFLINE] = { | 987 | [DEV_STATE_OFFLINE] = { |
1188 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | 988 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, |
1189 | [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, | 989 | [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, |
1190 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | 990 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, |
1191 | [DEV_EVENT_VERIFY] = ccw_device_offline_verify, | 991 | [DEV_EVENT_VERIFY] = ccw_device_offline_verify, |
1192 | }, | 992 | }, |
1193 | [DEV_STATE_VERIFY] = { | 993 | [DEV_STATE_VERIFY] = { |
1194 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | 994 | [DEV_EVENT_NOTOPER] = ccw_device_request_event, |
1195 | [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, | 995 | [DEV_EVENT_INTERRUPT] = ccw_device_request_event, |
1196 | [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, | 996 | [DEV_EVENT_TIMEOUT] = ccw_device_request_event, |
1197 | [DEV_EVENT_VERIFY] = ccw_device_delay_verify, | 997 | [DEV_EVENT_VERIFY] = ccw_device_delay_verify, |
1198 | }, | 998 | }, |
1199 | [DEV_STATE_ONLINE] = { | 999 | [DEV_STATE_ONLINE] = { |
@@ -1209,24 +1009,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | |||
1209 | [DEV_EVENT_VERIFY] = ccw_device_online_verify, | 1009 | [DEV_EVENT_VERIFY] = ccw_device_online_verify, |
1210 | }, | 1010 | }, |
1211 | [DEV_STATE_DISBAND_PGID] = { | 1011 | [DEV_STATE_DISBAND_PGID] = { |
1212 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | 1012 | [DEV_EVENT_NOTOPER] = ccw_device_request_event, |
1213 | [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, | 1013 | [DEV_EVENT_INTERRUPT] = ccw_device_request_event, |
1214 | [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, | 1014 | [DEV_EVENT_TIMEOUT] = ccw_device_request_event, |
1215 | [DEV_EVENT_VERIFY] = ccw_device_nop, | 1015 | [DEV_EVENT_VERIFY] = ccw_device_nop, |
1216 | }, | 1016 | }, |
1217 | [DEV_STATE_BOXED] = { | 1017 | [DEV_STATE_BOXED] = { |
1218 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | 1018 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, |
1219 | [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, | 1019 | [DEV_EVENT_INTERRUPT] = ccw_device_nop, |
1220 | [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, | ||
1221 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1222 | }, | ||
1223 | /* states to wait for i/o completion before doing something */ | ||
1224 | [DEV_STATE_CLEAR_VERIFY] = { | ||
1225 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | ||
1226 | [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, | ||
1227 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | 1020 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, |
1228 | [DEV_EVENT_VERIFY] = ccw_device_nop, | 1021 | [DEV_EVENT_VERIFY] = ccw_device_boxed_verify, |
1229 | }, | 1022 | }, |
1023 | /* states to wait for i/o completion before doing something */ | ||
1230 | [DEV_STATE_TIMEOUT_KILL] = { | 1024 | [DEV_STATE_TIMEOUT_KILL] = { |
1231 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | 1025 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, |
1232 | [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, | 1026 | [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, |
@@ -1243,13 +1037,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | |||
1243 | [DEV_STATE_DISCONNECTED] = { | 1037 | [DEV_STATE_DISCONNECTED] = { |
1244 | [DEV_EVENT_NOTOPER] = ccw_device_nop, | 1038 | [DEV_EVENT_NOTOPER] = ccw_device_nop, |
1245 | [DEV_EVENT_INTERRUPT] = ccw_device_start_id, | 1039 | [DEV_EVENT_INTERRUPT] = ccw_device_start_id, |
1246 | [DEV_EVENT_TIMEOUT] = ccw_device_bug, | 1040 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, |
1247 | [DEV_EVENT_VERIFY] = ccw_device_start_id, | 1041 | [DEV_EVENT_VERIFY] = ccw_device_start_id, |
1248 | }, | 1042 | }, |
1249 | [DEV_STATE_DISCONNECTED_SENSE_ID] = { | 1043 | [DEV_STATE_DISCONNECTED_SENSE_ID] = { |
1250 | [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, | 1044 | [DEV_EVENT_NOTOPER] = ccw_device_request_event, |
1251 | [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, | 1045 | [DEV_EVENT_INTERRUPT] = ccw_device_request_event, |
1252 | [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, | 1046 | [DEV_EVENT_TIMEOUT] = ccw_device_request_event, |
1253 | [DEV_EVENT_VERIFY] = ccw_device_nop, | 1047 | [DEV_EVENT_VERIFY] = ccw_device_nop, |
1254 | }, | 1048 | }, |
1255 | [DEV_STATE_CMFCHANGE] = { | 1049 | [DEV_STATE_CMFCHANGE] = { |
@@ -1264,6 +1058,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | |||
1264 | [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, | 1058 | [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, |
1265 | [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, | 1059 | [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, |
1266 | }, | 1060 | }, |
1061 | [DEV_STATE_STEAL_LOCK] = { | ||
1062 | [DEV_EVENT_NOTOPER] = ccw_device_request_event, | ||
1063 | [DEV_EVENT_INTERRUPT] = ccw_device_request_event, | ||
1064 | [DEV_EVENT_TIMEOUT] = ccw_device_request_event, | ||
1065 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1066 | }, | ||
1267 | }; | 1067 | }; |
1268 | 1068 | ||
1269 | EXPORT_SYMBOL_GPL(ccw_device_set_timeout); | 1069 | EXPORT_SYMBOL_GPL(ccw_device_set_timeout); |