diff options
Diffstat (limited to 'drivers/s390/cio/device_fsm.c')
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 117 |
1 files changed, 35 insertions, 82 deletions
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index b67620208f36..de3d0857db9f 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -59,18 +59,6 @@ device_set_disconnected(struct subchannel *sch) | |||
59 | cdev->private->state = DEV_STATE_DISCONNECTED; | 59 | cdev->private->state = DEV_STATE_DISCONNECTED; |
60 | } | 60 | } |
61 | 61 | ||
62 | void | ||
63 | device_set_waiting(struct subchannel *sch) | ||
64 | { | ||
65 | struct ccw_device *cdev; | ||
66 | |||
67 | if (!sch->dev.driver_data) | ||
68 | return; | ||
69 | cdev = sch->dev.driver_data; | ||
70 | ccw_device_set_timeout(cdev, 10*HZ); | ||
71 | cdev->private->state = DEV_STATE_WAIT4IO; | ||
72 | } | ||
73 | |||
74 | /* | 62 | /* |
75 | * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. | 63 | * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. |
76 | */ | 64 | */ |
@@ -183,9 +171,9 @@ ccw_device_handle_oper(struct ccw_device *cdev) | |||
183 | cdev->id.cu_model != cdev->private->senseid.cu_model || | 171 | cdev->id.cu_model != cdev->private->senseid.cu_model || |
184 | cdev->id.dev_type != cdev->private->senseid.dev_type || | 172 | cdev->id.dev_type != cdev->private->senseid.dev_type || |
185 | cdev->id.dev_model != cdev->private->senseid.dev_model || | 173 | cdev->id.dev_model != cdev->private->senseid.dev_model || |
186 | cdev->private->devno != sch->schib.pmcw.dev) { | 174 | cdev->private->dev_id.devno != sch->schib.pmcw.dev) { |
187 | PREPARE_WORK(&cdev->private->kick_work, | 175 | PREPARE_WORK(&cdev->private->kick_work, |
188 | ccw_device_do_unreg_rereg, (void *)cdev); | 176 | ccw_device_do_unreg_rereg, cdev); |
189 | queue_work(ccw_device_work, &cdev->private->kick_work); | 177 | queue_work(ccw_device_work, &cdev->private->kick_work); |
190 | return 0; | 178 | return 0; |
191 | } | 179 | } |
@@ -255,7 +243,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
255 | case DEV_STATE_NOT_OPER: | 243 | case DEV_STATE_NOT_OPER: |
256 | CIO_DEBUG(KERN_WARNING, 2, | 244 | CIO_DEBUG(KERN_WARNING, 2, |
257 | "SenseID : unknown device %04x on subchannel " | 245 | "SenseID : unknown device %04x on subchannel " |
258 | "0.%x.%04x\n", cdev->private->devno, | 246 | "0.%x.%04x\n", cdev->private->dev_id.devno, |
259 | sch->schid.ssid, sch->schid.sch_no); | 247 | sch->schid.ssid, sch->schid.sch_no); |
260 | break; | 248 | break; |
261 | case DEV_STATE_OFFLINE: | 249 | case DEV_STATE_OFFLINE: |
@@ -282,14 +270,15 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
282 | CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: " | 270 | CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: " |
283 | "CU Type/Mod = %04X/%02X, Dev Type/Mod = " | 271 | "CU Type/Mod = %04X/%02X, Dev Type/Mod = " |
284 | "%04X/%02X\n", | 272 | "%04X/%02X\n", |
285 | cdev->private->ssid, cdev->private->devno, | 273 | cdev->private->dev_id.ssid, |
274 | cdev->private->dev_id.devno, | ||
286 | cdev->id.cu_type, cdev->id.cu_model, | 275 | cdev->id.cu_type, cdev->id.cu_model, |
287 | cdev->id.dev_type, cdev->id.dev_model); | 276 | cdev->id.dev_type, cdev->id.dev_model); |
288 | break; | 277 | break; |
289 | case DEV_STATE_BOXED: | 278 | case DEV_STATE_BOXED: |
290 | CIO_DEBUG(KERN_WARNING, 2, | 279 | CIO_DEBUG(KERN_WARNING, 2, |
291 | "SenseID : boxed device %04x on subchannel " | 280 | "SenseID : boxed device %04x on subchannel " |
292 | "0.%x.%04x\n", cdev->private->devno, | 281 | "0.%x.%04x\n", cdev->private->dev_id.devno, |
293 | sch->schid.ssid, sch->schid.sch_no); | 282 | sch->schid.ssid, sch->schid.sch_no); |
294 | break; | 283 | break; |
295 | } | 284 | } |
@@ -325,13 +314,13 @@ ccw_device_oper_notify(void *data) | |||
325 | struct subchannel *sch; | 314 | struct subchannel *sch; |
326 | int ret; | 315 | int ret; |
327 | 316 | ||
328 | cdev = (struct ccw_device *)data; | 317 | cdev = data; |
329 | sch = to_subchannel(cdev->dev.parent); | 318 | sch = to_subchannel(cdev->dev.parent); |
330 | ret = (sch->driver && sch->driver->notify) ? | 319 | ret = (sch->driver && sch->driver->notify) ? |
331 | sch->driver->notify(&sch->dev, CIO_OPER) : 0; | 320 | sch->driver->notify(&sch->dev, CIO_OPER) : 0; |
332 | if (!ret) | 321 | if (!ret) |
333 | /* Driver doesn't want device back. */ | 322 | /* Driver doesn't want device back. */ |
334 | ccw_device_do_unreg_rereg((void *)cdev); | 323 | ccw_device_do_unreg_rereg(cdev); |
335 | else { | 324 | else { |
336 | /* Reenable channel measurements, if needed. */ | 325 | /* Reenable channel measurements, if needed. */ |
337 | cmf_reenable(cdev); | 326 | cmf_reenable(cdev); |
@@ -363,12 +352,12 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
363 | if (state == DEV_STATE_BOXED) | 352 | if (state == DEV_STATE_BOXED) |
364 | CIO_DEBUG(KERN_WARNING, 2, | 353 | CIO_DEBUG(KERN_WARNING, 2, |
365 | "Boxed device %04x on subchannel %04x\n", | 354 | "Boxed device %04x on subchannel %04x\n", |
366 | cdev->private->devno, sch->schid.sch_no); | 355 | cdev->private->dev_id.devno, sch->schid.sch_no); |
367 | 356 | ||
368 | if (cdev->private->flags.donotify) { | 357 | if (cdev->private->flags.donotify) { |
369 | cdev->private->flags.donotify = 0; | 358 | cdev->private->flags.donotify = 0; |
370 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, | 359 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, |
371 | (void *)cdev); | 360 | cdev); |
372 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 361 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); |
373 | } | 362 | } |
374 | wake_up(&cdev->private->wait_q); | 363 | wake_up(&cdev->private->wait_q); |
@@ -412,7 +401,8 @@ static void __ccw_device_get_common_pgid(struct ccw_device *cdev) | |||
412 | /* PGID mismatch, can't pathgroup. */ | 401 | /* PGID mismatch, can't pathgroup. */ |
413 | CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " | 402 | CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " |
414 | "0.%x.%04x, can't pathgroup\n", | 403 | "0.%x.%04x, can't pathgroup\n", |
415 | cdev->private->ssid, cdev->private->devno); | 404 | cdev->private->dev_id.ssid, |
405 | cdev->private->dev_id.devno); | ||
416 | cdev->private->options.pgroup = 0; | 406 | cdev->private->options.pgroup = 0; |
417 | return; | 407 | return; |
418 | } | 408 | } |
@@ -523,7 +513,7 @@ ccw_device_nopath_notify(void *data) | |||
523 | struct subchannel *sch; | 513 | struct subchannel *sch; |
524 | int ret; | 514 | int ret; |
525 | 515 | ||
526 | cdev = (struct ccw_device *)data; | 516 | cdev = data; |
527 | sch = to_subchannel(cdev->dev.parent); | 517 | sch = to_subchannel(cdev->dev.parent); |
528 | /* Extra sanity. */ | 518 | /* Extra sanity. */ |
529 | if (sch->lpm) | 519 | if (sch->lpm) |
@@ -537,7 +527,7 @@ ccw_device_nopath_notify(void *data) | |||
537 | if (get_device(&cdev->dev)) { | 527 | if (get_device(&cdev->dev)) { |
538 | PREPARE_WORK(&cdev->private->kick_work, | 528 | PREPARE_WORK(&cdev->private->kick_work, |
539 | ccw_device_call_sch_unregister, | 529 | ccw_device_call_sch_unregister, |
540 | (void *)cdev); | 530 | cdev); |
541 | queue_work(ccw_device_work, | 531 | queue_work(ccw_device_work, |
542 | &cdev->private->kick_work); | 532 | &cdev->private->kick_work); |
543 | } else | 533 | } else |
@@ -588,11 +578,15 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
588 | } | 578 | } |
589 | break; | 579 | break; |
590 | case -ETIME: | 580 | case -ETIME: |
581 | /* Reset oper notify indication after verify error. */ | ||
582 | cdev->private->flags.donotify = 0; | ||
591 | ccw_device_done(cdev, DEV_STATE_BOXED); | 583 | ccw_device_done(cdev, DEV_STATE_BOXED); |
592 | break; | 584 | break; |
593 | default: | 585 | default: |
586 | /* Reset oper notify indication after verify error. */ | ||
587 | cdev->private->flags.donotify = 0; | ||
594 | PREPARE_WORK(&cdev->private->kick_work, | 588 | PREPARE_WORK(&cdev->private->kick_work, |
595 | ccw_device_nopath_notify, (void *)cdev); | 589 | ccw_device_nopath_notify, cdev); |
596 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 590 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); |
597 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | 591 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); |
598 | break; | 592 | break; |
@@ -723,7 +717,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) | |||
723 | sch = to_subchannel(cdev->dev.parent); | 717 | sch = to_subchannel(cdev->dev.parent); |
724 | if (get_device(&cdev->dev)) { | 718 | if (get_device(&cdev->dev)) { |
725 | PREPARE_WORK(&cdev->private->kick_work, | 719 | PREPARE_WORK(&cdev->private->kick_work, |
726 | ccw_device_call_sch_unregister, (void *)cdev); | 720 | ccw_device_call_sch_unregister, cdev); |
727 | queue_work(ccw_device_work, &cdev->private->kick_work); | 721 | queue_work(ccw_device_work, &cdev->private->kick_work); |
728 | } | 722 | } |
729 | wake_up(&cdev->private->wait_q); | 723 | wake_up(&cdev->private->wait_q); |
@@ -754,7 +748,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) | |||
754 | } | 748 | } |
755 | if (get_device(&cdev->dev)) { | 749 | if (get_device(&cdev->dev)) { |
756 | PREPARE_WORK(&cdev->private->kick_work, | 750 | PREPARE_WORK(&cdev->private->kick_work, |
757 | ccw_device_call_sch_unregister, (void *)cdev); | 751 | ccw_device_call_sch_unregister, cdev); |
758 | queue_work(ccw_device_work, &cdev->private->kick_work); | 752 | queue_work(ccw_device_work, &cdev->private->kick_work); |
759 | } | 753 | } |
760 | wake_up(&cdev->private->wait_q); | 754 | wake_up(&cdev->private->wait_q); |
@@ -859,7 +853,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
859 | sch = to_subchannel(cdev->dev.parent); | 853 | sch = to_subchannel(cdev->dev.parent); |
860 | if (!sch->lpm) { | 854 | if (!sch->lpm) { |
861 | PREPARE_WORK(&cdev->private->kick_work, | 855 | PREPARE_WORK(&cdev->private->kick_work, |
862 | ccw_device_nopath_notify, (void *)cdev); | 856 | ccw_device_nopath_notify, cdev); |
863 | queue_work(ccw_device_notify_work, | 857 | queue_work(ccw_device_notify_work, |
864 | &cdev->private->kick_work); | 858 | &cdev->private->kick_work); |
865 | } else | 859 | } else |
@@ -885,7 +879,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
885 | /* Basic sense hasn't started. Try again. */ | 879 | /* Basic sense hasn't started. Try again. */ |
886 | ccw_device_do_sense(cdev, irb); | 880 | ccw_device_do_sense(cdev, irb); |
887 | else { | 881 | else { |
888 | printk("Huh? %s(%s): unsolicited interrupt...\n", | 882 | printk(KERN_INFO "Huh? %s(%s): unsolicited " |
883 | "interrupt...\n", | ||
889 | __FUNCTION__, cdev->dev.bus_id); | 884 | __FUNCTION__, cdev->dev.bus_id); |
890 | if (cdev->handler) | 885 | if (cdev->handler) |
891 | cdev->handler (cdev, 0, irb); | 886 | cdev->handler (cdev, 0, irb); |
@@ -944,10 +939,10 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
944 | cdev->private->state = DEV_STATE_ONLINE; | 939 | cdev->private->state = DEV_STATE_ONLINE; |
945 | if (cdev->handler) | 940 | if (cdev->handler) |
946 | cdev->handler(cdev, cdev->private->intparm, | 941 | cdev->handler(cdev, cdev->private->intparm, |
947 | ERR_PTR(-ETIMEDOUT)); | 942 | ERR_PTR(-EIO)); |
948 | if (!sch->lpm) { | 943 | if (!sch->lpm) { |
949 | PREPARE_WORK(&cdev->private->kick_work, | 944 | PREPARE_WORK(&cdev->private->kick_work, |
950 | ccw_device_nopath_notify, (void *)cdev); | 945 | ccw_device_nopath_notify, cdev); |
951 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 946 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); |
952 | } else if (cdev->private->flags.doverify) | 947 | } else if (cdev->private->flags.doverify) |
953 | /* Start delayed path verification. */ | 948 | /* Start delayed path verification. */ |
@@ -970,7 +965,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
970 | sch = to_subchannel(cdev->dev.parent); | 965 | sch = to_subchannel(cdev->dev.parent); |
971 | if (!sch->lpm) { | 966 | if (!sch->lpm) { |
972 | PREPARE_WORK(&cdev->private->kick_work, | 967 | PREPARE_WORK(&cdev->private->kick_work, |
973 | ccw_device_nopath_notify, (void *)cdev); | 968 | ccw_device_nopath_notify, cdev); |
974 | queue_work(ccw_device_notify_work, | 969 | queue_work(ccw_device_notify_work, |
975 | &cdev->private->kick_work); | 970 | &cdev->private->kick_work); |
976 | } else | 971 | } else |
@@ -981,51 +976,15 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
981 | cdev->private->state = DEV_STATE_ONLINE; | 976 | cdev->private->state = DEV_STATE_ONLINE; |
982 | if (cdev->handler) | 977 | if (cdev->handler) |
983 | cdev->handler(cdev, cdev->private->intparm, | 978 | cdev->handler(cdev, cdev->private->intparm, |
984 | ERR_PTR(-ETIMEDOUT)); | 979 | ERR_PTR(-EIO)); |
985 | } | ||
986 | |||
987 | static void | ||
988 | ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
989 | { | ||
990 | struct irb *irb; | ||
991 | struct subchannel *sch; | ||
992 | |||
993 | irb = (struct irb *) __LC_IRB; | ||
994 | /* | ||
995 | * Accumulate status and find out if a basic sense is needed. | ||
996 | * This is fine since we have already adapted the lpm. | ||
997 | */ | ||
998 | ccw_device_accumulate_irb(cdev, irb); | ||
999 | if (cdev->private->flags.dosense) { | ||
1000 | if (ccw_device_do_sense(cdev, irb) == 0) { | ||
1001 | cdev->private->state = DEV_STATE_W4SENSE; | ||
1002 | } | ||
1003 | return; | ||
1004 | } | ||
1005 | |||
1006 | /* Iff device is idle, reset timeout. */ | ||
1007 | sch = to_subchannel(cdev->dev.parent); | ||
1008 | if (!stsch(sch->schid, &sch->schib)) | ||
1009 | if (sch->schib.scsw.actl == 0) | ||
1010 | ccw_device_set_timeout(cdev, 0); | ||
1011 | /* Call the handler. */ | ||
1012 | ccw_device_call_handler(cdev); | ||
1013 | if (!sch->lpm) { | ||
1014 | PREPARE_WORK(&cdev->private->kick_work, | ||
1015 | ccw_device_nopath_notify, (void *)cdev); | ||
1016 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
1017 | } else if (cdev->private->flags.doverify) | ||
1018 | ccw_device_online_verify(cdev, 0); | ||
1019 | } | 980 | } |
1020 | 981 | ||
1021 | static void | 982 | void device_kill_io(struct subchannel *sch) |
1022 | ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
1023 | { | 983 | { |
1024 | int ret; | 984 | int ret; |
1025 | struct subchannel *sch; | 985 | struct ccw_device *cdev; |
1026 | 986 | ||
1027 | sch = to_subchannel(cdev->dev.parent); | 987 | cdev = sch->dev.driver_data; |
1028 | ccw_device_set_timeout(cdev, 0); | ||
1029 | ret = ccw_device_cancel_halt_clear(cdev); | 988 | ret = ccw_device_cancel_halt_clear(cdev); |
1030 | if (ret == -EBUSY) { | 989 | if (ret == -EBUSY) { |
1031 | ccw_device_set_timeout(cdev, 3*HZ); | 990 | ccw_device_set_timeout(cdev, 3*HZ); |
@@ -1035,7 +994,7 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
1035 | if (ret == -ENODEV) { | 994 | if (ret == -ENODEV) { |
1036 | if (!sch->lpm) { | 995 | if (!sch->lpm) { |
1037 | PREPARE_WORK(&cdev->private->kick_work, | 996 | PREPARE_WORK(&cdev->private->kick_work, |
1038 | ccw_device_nopath_notify, (void *)cdev); | 997 | ccw_device_nopath_notify, cdev); |
1039 | queue_work(ccw_device_notify_work, | 998 | queue_work(ccw_device_notify_work, |
1040 | &cdev->private->kick_work); | 999 | &cdev->private->kick_work); |
1041 | } else | 1000 | } else |
@@ -1044,12 +1003,12 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
1044 | } | 1003 | } |
1045 | if (cdev->handler) | 1004 | if (cdev->handler) |
1046 | cdev->handler(cdev, cdev->private->intparm, | 1005 | cdev->handler(cdev, cdev->private->intparm, |
1047 | ERR_PTR(-ETIMEDOUT)); | 1006 | ERR_PTR(-EIO)); |
1048 | if (!sch->lpm) { | 1007 | if (!sch->lpm) { |
1049 | PREPARE_WORK(&cdev->private->kick_work, | 1008 | PREPARE_WORK(&cdev->private->kick_work, |
1050 | ccw_device_nopath_notify, (void *)cdev); | 1009 | ccw_device_nopath_notify, cdev); |
1051 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | 1010 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); |
1052 | } else if (cdev->private->flags.doverify) | 1011 | } else |
1053 | /* Start delayed path verification. */ | 1012 | /* Start delayed path verification. */ |
1054 | ccw_device_online_verify(cdev, 0); | 1013 | ccw_device_online_verify(cdev, 0); |
1055 | } | 1014 | } |
@@ -1286,12 +1245,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | |||
1286 | [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, | 1245 | [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, |
1287 | [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME | 1246 | [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME |
1288 | }, | 1247 | }, |
1289 | [DEV_STATE_WAIT4IO] = { | ||
1290 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1291 | [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, | ||
1292 | [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, | ||
1293 | [DEV_EVENT_VERIFY] = ccw_device_delay_verify, | ||
1294 | }, | ||
1295 | [DEV_STATE_QUIESCE] = { | 1248 | [DEV_STATE_QUIESCE] = { |
1296 | [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, | 1249 | [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, |
1297 | [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, | 1250 | [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, |