diff options
author | Stefan Weinhuber <wein@de.ibm.com> | 2011-01-05 06:48:03 -0500 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2011-01-05 06:47:30 -0500 |
commit | a4d26c6aeceea330ee5e0fb6b017d57e3b252d29 (patch) | |
tree | eed358de48ff28e4fba73d9925abafa2699a7b6c /drivers/s390/block/dasd.c | |
parent | ef19298b406f93af4bb249f0776deb8366e97532 (diff) |
[S390] dasd: do path verification for paths added at runtime
When a new path is added at runtime, the CIO layer will call the drivers
path_event callback. The DASD device driver uses this callback to trigger
a path verification for the new path. The driver will use only those
paths for I/O, which have been successfully verified.
Signed-off-by: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/block/dasd.c')
-rw-r--r-- | drivers/s390/block/dasd.c | 202 |
1 files changed, 158 insertions, 44 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 605f96f154a5..8f2067bc88c0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -913,6 +913,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) | |||
913 | cqr->startclk = get_clock(); | 913 | cqr->startclk = get_clock(); |
914 | cqr->starttime = jiffies; | 914 | cqr->starttime = jiffies; |
915 | cqr->retries--; | 915 | cqr->retries--; |
916 | if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { | ||
917 | cqr->lpm &= device->path_data.opm; | ||
918 | if (!cqr->lpm) | ||
919 | cqr->lpm = device->path_data.opm; | ||
920 | } | ||
916 | if (cqr->cpmode == 1) { | 921 | if (cqr->cpmode == 1) { |
917 | rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, | 922 | rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, |
918 | (long) cqr, cqr->lpm); | 923 | (long) cqr, cqr->lpm); |
@@ -925,35 +930,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) | |||
925 | cqr->status = DASD_CQR_IN_IO; | 930 | cqr->status = DASD_CQR_IN_IO; |
926 | break; | 931 | break; |
927 | case -EBUSY: | 932 | case -EBUSY: |
928 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 933 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
929 | "start_IO: device busy, retry later"); | 934 | "start_IO: device busy, retry later"); |
930 | break; | 935 | break; |
931 | case -ETIMEDOUT: | 936 | case -ETIMEDOUT: |
932 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 937 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
933 | "start_IO: request timeout, retry later"); | 938 | "start_IO: request timeout, retry later"); |
934 | break; | 939 | break; |
935 | case -EACCES: | 940 | case -EACCES: |
936 | /* -EACCES indicates that the request used only a | 941 | /* -EACCES indicates that the request used only a subset of the |
937 | * subset of the available pathes and all these | 942 | * available paths and all these paths are gone. If the lpm of |
938 | * pathes are gone. | 943 | * this request was only a subset of the opm (e.g. the ppm) then |
939 | * Do a retry with all available pathes. | 944 | * we just do a retry with all available paths. |
945 | * If we already use the full opm, something is amiss, and we | ||
946 | * need a full path verification. | ||
940 | */ | 947 | */ |
941 | cqr->lpm = LPM_ANYPATH; | 948 | if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { |
942 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 949 | DBF_DEV_EVENT(DBF_WARNING, device, |
943 | "start_IO: selected pathes gone," | 950 | "start_IO: selected paths gone (%x)", |
944 | " retry on all pathes"); | 951 | cqr->lpm); |
952 | } else if (cqr->lpm != device->path_data.opm) { | ||
953 | cqr->lpm = device->path_data.opm; | ||
954 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | ||
955 | "start_IO: selected paths gone," | ||
956 | " retry on all paths"); | ||
957 | } else { | ||
958 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | ||
959 | "start_IO: all paths in opm gone," | ||
960 | " do path verification"); | ||
961 | dasd_generic_last_path_gone(device); | ||
962 | device->path_data.opm = 0; | ||
963 | device->path_data.ppm = 0; | ||
964 | device->path_data.npm = 0; | ||
965 | device->path_data.tbvpm = | ||
966 | ccw_device_get_path_mask(device->cdev); | ||
967 | } | ||
945 | break; | 968 | break; |
946 | case -ENODEV: | 969 | case -ENODEV: |
947 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 970 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
948 | "start_IO: -ENODEV device gone, retry"); | 971 | "start_IO: -ENODEV device gone, retry"); |
949 | break; | 972 | break; |
950 | case -EIO: | 973 | case -EIO: |
951 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 974 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
952 | "start_IO: -EIO device gone, retry"); | 975 | "start_IO: -EIO device gone, retry"); |
953 | break; | 976 | break; |
954 | case -EINVAL: | 977 | case -EINVAL: |
955 | /* most likely caused in power management context */ | 978 | /* most likely caused in power management context */ |
956 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 979 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
957 | "start_IO: -EINVAL device currently " | 980 | "start_IO: -EINVAL device currently " |
958 | "not accessible"); | 981 | "not accessible"); |
959 | break; | 982 | break; |
@@ -1175,12 +1198,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1175 | */ | 1198 | */ |
1176 | if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && | 1199 | if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && |
1177 | cqr->retries > 0) { | 1200 | cqr->retries > 0) { |
1178 | if (cqr->lpm == LPM_ANYPATH) | 1201 | if (cqr->lpm == device->path_data.opm) |
1179 | DBF_DEV_EVENT(DBF_DEBUG, device, | 1202 | DBF_DEV_EVENT(DBF_DEBUG, device, |
1180 | "default ERP in fastpath " | 1203 | "default ERP in fastpath " |
1181 | "(%i retries left)", | 1204 | "(%i retries left)", |
1182 | cqr->retries); | 1205 | cqr->retries); |
1183 | cqr->lpm = LPM_ANYPATH; | 1206 | if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) |
1207 | cqr->lpm = device->path_data.opm; | ||
1184 | cqr->status = DASD_CQR_QUEUED; | 1208 | cqr->status = DASD_CQR_QUEUED; |
1185 | next = cqr; | 1209 | next = cqr; |
1186 | } else | 1210 | } else |
@@ -1364,8 +1388,14 @@ static void __dasd_device_start_head(struct dasd_device *device) | |||
1364 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); | 1388 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1365 | if (cqr->status != DASD_CQR_QUEUED) | 1389 | if (cqr->status != DASD_CQR_QUEUED) |
1366 | return; | 1390 | return; |
1367 | /* when device is stopped, return request to previous layer */ | 1391 | /* when device is stopped, return request to previous layer |
1368 | if (device->stopped) { | 1392 | * exception: only the disconnect or unresumed bits are set and the |
1393 | * cqr is a path verification request | ||
1394 | */ | ||
1395 | if (device->stopped && | ||
1396 | !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) | ||
1397 | && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { | ||
1398 | cqr->intrc = -EAGAIN; | ||
1369 | cqr->status = DASD_CQR_CLEARED; | 1399 | cqr->status = DASD_CQR_CLEARED; |
1370 | dasd_schedule_device_bh(device); | 1400 | dasd_schedule_device_bh(device); |
1371 | return; | 1401 | return; |
@@ -1381,6 +1411,23 @@ static void __dasd_device_start_head(struct dasd_device *device) | |||
1381 | dasd_device_set_timer(device, 50); | 1411 | dasd_device_set_timer(device, 50); |
1382 | } | 1412 | } |
1383 | 1413 | ||
1414 | static void __dasd_device_check_path_events(struct dasd_device *device) | ||
1415 | { | ||
1416 | int rc; | ||
1417 | |||
1418 | if (device->path_data.tbvpm) { | ||
1419 | if (device->stopped & ~(DASD_STOPPED_DC_WAIT | | ||
1420 | DASD_UNRESUMED_PM)) | ||
1421 | return; | ||
1422 | rc = device->discipline->verify_path( | ||
1423 | device, device->path_data.tbvpm); | ||
1424 | if (rc) | ||
1425 | dasd_device_set_timer(device, 50); | ||
1426 | else | ||
1427 | device->path_data.tbvpm = 0; | ||
1428 | } | ||
1429 | }; | ||
1430 | |||
1384 | /* | 1431 | /* |
1385 | * Go through all request on the dasd_device request queue, | 1432 | * Go through all request on the dasd_device request queue, |
1386 | * terminate them on the cdev if necessary, and return them to the | 1433 | * terminate them on the cdev if necessary, and return them to the |
@@ -1455,6 +1502,7 @@ static void dasd_device_tasklet(struct dasd_device *device) | |||
1455 | __dasd_device_check_expire(device); | 1502 | __dasd_device_check_expire(device); |
1456 | /* find final requests on ccw queue */ | 1503 | /* find final requests on ccw queue */ |
1457 | __dasd_device_process_ccw_queue(device, &final_queue); | 1504 | __dasd_device_process_ccw_queue(device, &final_queue); |
1505 | __dasd_device_check_path_events(device); | ||
1458 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1506 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1459 | /* Now call the callback function of requests with final status */ | 1507 | /* Now call the callback function of requests with final status */ |
1460 | __dasd_device_process_final_queue(device, &final_queue); | 1508 | __dasd_device_process_final_queue(device, &final_queue); |
@@ -2586,10 +2634,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev) | |||
2586 | return 0; | 2634 | return 0; |
2587 | } | 2635 | } |
2588 | 2636 | ||
2637 | int dasd_generic_last_path_gone(struct dasd_device *device) | ||
2638 | { | ||
2639 | struct dasd_ccw_req *cqr; | ||
2640 | |||
2641 | dev_warn(&device->cdev->dev, "No operational channel path is left " | ||
2642 | "for the device\n"); | ||
2643 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); | ||
2644 | /* First of all call extended error reporting. */ | ||
2645 | dasd_eer_write(device, NULL, DASD_EER_NOPATH); | ||
2646 | |||
2647 | if (device->state < DASD_STATE_BASIC) | ||
2648 | return 0; | ||
2649 | /* Device is active. We want to keep it. */ | ||
2650 | list_for_each_entry(cqr, &device->ccw_queue, devlist) | ||
2651 | if ((cqr->status == DASD_CQR_IN_IO) || | ||
2652 | (cqr->status == DASD_CQR_CLEAR_PENDING)) { | ||
2653 | cqr->status = DASD_CQR_QUEUED; | ||
2654 | cqr->retries++; | ||
2655 | } | ||
2656 | dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); | ||
2657 | dasd_device_clear_timer(device); | ||
2658 | dasd_schedule_device_bh(device); | ||
2659 | return 1; | ||
2660 | } | ||
2661 | EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); | ||
2662 | |||
2663 | int dasd_generic_path_operational(struct dasd_device *device) | ||
2664 | { | ||
2665 | dev_info(&device->cdev->dev, "A channel path to the device has become " | ||
2666 | "operational\n"); | ||
2667 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); | ||
2668 | dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); | ||
2669 | if (device->stopped & DASD_UNRESUMED_PM) { | ||
2670 | dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); | ||
2671 | dasd_restore_device(device); | ||
2672 | return 1; | ||
2673 | } | ||
2674 | dasd_schedule_device_bh(device); | ||
2675 | if (device->block) | ||
2676 | dasd_schedule_block_bh(device->block); | ||
2677 | return 1; | ||
2678 | } | ||
2679 | EXPORT_SYMBOL_GPL(dasd_generic_path_operational); | ||
2680 | |||
2589 | int dasd_generic_notify(struct ccw_device *cdev, int event) | 2681 | int dasd_generic_notify(struct ccw_device *cdev, int event) |
2590 | { | 2682 | { |
2591 | struct dasd_device *device; | 2683 | struct dasd_device *device; |
2592 | struct dasd_ccw_req *cqr; | ||
2593 | int ret; | 2684 | int ret; |
2594 | 2685 | ||
2595 | device = dasd_device_from_cdev_locked(cdev); | 2686 | device = dasd_device_from_cdev_locked(cdev); |
@@ -2600,41 +2691,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) | |||
2600 | case CIO_GONE: | 2691 | case CIO_GONE: |
2601 | case CIO_BOXED: | 2692 | case CIO_BOXED: |
2602 | case CIO_NO_PATH: | 2693 | case CIO_NO_PATH: |
2603 | /* First of all call extended error reporting. */ | 2694 | device->path_data.opm = 0; |
2604 | dasd_eer_write(device, NULL, DASD_EER_NOPATH); | 2695 | device->path_data.ppm = 0; |
2605 | 2696 | device->path_data.npm = 0; | |
2606 | if (device->state < DASD_STATE_BASIC) | 2697 | ret = dasd_generic_last_path_gone(device); |
2607 | break; | ||
2608 | /* Device is active. We want to keep it. */ | ||
2609 | list_for_each_entry(cqr, &device->ccw_queue, devlist) | ||
2610 | if (cqr->status == DASD_CQR_IN_IO) { | ||
2611 | cqr->status = DASD_CQR_QUEUED; | ||
2612 | cqr->retries++; | ||
2613 | } | ||
2614 | dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); | ||
2615 | dasd_device_clear_timer(device); | ||
2616 | dasd_schedule_device_bh(device); | ||
2617 | ret = 1; | ||
2618 | break; | 2698 | break; |
2619 | case CIO_OPER: | 2699 | case CIO_OPER: |
2620 | /* FIXME: add a sanity check. */ | ||
2621 | dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); | ||
2622 | if (device->stopped & DASD_UNRESUMED_PM) { | ||
2623 | dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); | ||
2624 | dasd_restore_device(device); | ||
2625 | ret = 1; | ||
2626 | break; | ||
2627 | } | ||
2628 | dasd_schedule_device_bh(device); | ||
2629 | if (device->block) | ||
2630 | dasd_schedule_block_bh(device->block); | ||
2631 | ret = 1; | 2700 | ret = 1; |
2701 | if (device->path_data.opm) | ||
2702 | ret = dasd_generic_path_operational(device); | ||
2632 | break; | 2703 | break; |
2633 | } | 2704 | } |
2634 | dasd_put_device(device); | 2705 | dasd_put_device(device); |
2635 | return ret; | 2706 | return ret; |
2636 | } | 2707 | } |
2637 | 2708 | ||
2709 | void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) | ||
2710 | { | ||
2711 | int chp; | ||
2712 | __u8 oldopm, eventlpm; | ||
2713 | struct dasd_device *device; | ||
2714 | |||
2715 | device = dasd_device_from_cdev_locked(cdev); | ||
2716 | if (IS_ERR(device)) | ||
2717 | return; | ||
2718 | for (chp = 0; chp < 8; chp++) { | ||
2719 | eventlpm = 0x80 >> chp; | ||
2720 | if (path_event[chp] & PE_PATH_GONE) { | ||
2721 | oldopm = device->path_data.opm; | ||
2722 | device->path_data.opm &= ~eventlpm; | ||
2723 | device->path_data.ppm &= ~eventlpm; | ||
2724 | device->path_data.npm &= ~eventlpm; | ||
2725 | if (oldopm && !device->path_data.opm) | ||
2726 | dasd_generic_last_path_gone(device); | ||
2727 | } | ||
2728 | if (path_event[chp] & PE_PATH_AVAILABLE) { | ||
2729 | device->path_data.opm &= ~eventlpm; | ||
2730 | device->path_data.ppm &= ~eventlpm; | ||
2731 | device->path_data.npm &= ~eventlpm; | ||
2732 | device->path_data.tbvpm |= eventlpm; | ||
2733 | dasd_schedule_device_bh(device); | ||
2734 | } | ||
2735 | } | ||
2736 | dasd_put_device(device); | ||
2737 | } | ||
2738 | EXPORT_SYMBOL_GPL(dasd_generic_path_event); | ||
2739 | |||
2740 | int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) | ||
2741 | { | ||
2742 | if (!device->path_data.opm && lpm) { | ||
2743 | device->path_data.opm = lpm; | ||
2744 | dasd_generic_path_operational(device); | ||
2745 | } else | ||
2746 | device->path_data.opm |= lpm; | ||
2747 | return 0; | ||
2748 | } | ||
2749 | EXPORT_SYMBOL_GPL(dasd_generic_verify_path); | ||
2750 | |||
2751 | |||
2638 | int dasd_generic_pm_freeze(struct ccw_device *cdev) | 2752 | int dasd_generic_pm_freeze(struct ccw_device *cdev) |
2639 | { | 2753 | { |
2640 | struct dasd_ccw_req *cqr, *n; | 2754 | struct dasd_ccw_req *cqr, *n; |