diff options
-rw-r--r-- | drivers/s390/block/dasd.c | 202 | ||||
-rw-r--r-- | drivers/s390/block/dasd_3990_erp.c | 16 | ||||
-rw-r--r-- | drivers/s390/block/dasd_devmap.c | 1 | ||||
-rw-r--r-- | drivers/s390/block/dasd_diag.c | 1 | ||||
-rw-r--r-- | drivers/s390/block/dasd_eckd.c | 261 | ||||
-rw-r--r-- | drivers/s390/block/dasd_eckd.h | 9 | ||||
-rw-r--r-- | drivers/s390/block/dasd_erp.c | 3 | ||||
-rw-r--r-- | drivers/s390/block/dasd_fba.c | 3 | ||||
-rw-r--r-- | drivers/s390/block/dasd_int.h | 22 |
9 files changed, 418 insertions, 100 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 605f96f154a5..8f2067bc88c0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -913,6 +913,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) | |||
913 | cqr->startclk = get_clock(); | 913 | cqr->startclk = get_clock(); |
914 | cqr->starttime = jiffies; | 914 | cqr->starttime = jiffies; |
915 | cqr->retries--; | 915 | cqr->retries--; |
916 | if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { | ||
917 | cqr->lpm &= device->path_data.opm; | ||
918 | if (!cqr->lpm) | ||
919 | cqr->lpm = device->path_data.opm; | ||
920 | } | ||
916 | if (cqr->cpmode == 1) { | 921 | if (cqr->cpmode == 1) { |
917 | rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, | 922 | rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, |
918 | (long) cqr, cqr->lpm); | 923 | (long) cqr, cqr->lpm); |
@@ -925,35 +930,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) | |||
925 | cqr->status = DASD_CQR_IN_IO; | 930 | cqr->status = DASD_CQR_IN_IO; |
926 | break; | 931 | break; |
927 | case -EBUSY: | 932 | case -EBUSY: |
928 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 933 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
929 | "start_IO: device busy, retry later"); | 934 | "start_IO: device busy, retry later"); |
930 | break; | 935 | break; |
931 | case -ETIMEDOUT: | 936 | case -ETIMEDOUT: |
932 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 937 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
933 | "start_IO: request timeout, retry later"); | 938 | "start_IO: request timeout, retry later"); |
934 | break; | 939 | break; |
935 | case -EACCES: | 940 | case -EACCES: |
936 | /* -EACCES indicates that the request used only a | 941 | /* -EACCES indicates that the request used only a subset of the |
937 | * subset of the available pathes and all these | 942 | * available paths and all these paths are gone. If the lpm of |
938 | * pathes are gone. | 943 | * this request was only a subset of the opm (e.g. the ppm) then |
939 | * Do a retry with all available pathes. | 944 | * we just do a retry with all available paths. |
945 | * If we already use the full opm, something is amiss, and we | ||
946 | * need a full path verification. | ||
940 | */ | 947 | */ |
941 | cqr->lpm = LPM_ANYPATH; | 948 | if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { |
942 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 949 | DBF_DEV_EVENT(DBF_WARNING, device, |
943 | "start_IO: selected pathes gone," | 950 | "start_IO: selected paths gone (%x)", |
944 | " retry on all pathes"); | 951 | cqr->lpm); |
952 | } else if (cqr->lpm != device->path_data.opm) { | ||
953 | cqr->lpm = device->path_data.opm; | ||
954 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | ||
955 | "start_IO: selected paths gone," | ||
956 | " retry on all paths"); | ||
957 | } else { | ||
958 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | ||
959 | "start_IO: all paths in opm gone," | ||
960 | " do path verification"); | ||
961 | dasd_generic_last_path_gone(device); | ||
962 | device->path_data.opm = 0; | ||
963 | device->path_data.ppm = 0; | ||
964 | device->path_data.npm = 0; | ||
965 | device->path_data.tbvpm = | ||
966 | ccw_device_get_path_mask(device->cdev); | ||
967 | } | ||
945 | break; | 968 | break; |
946 | case -ENODEV: | 969 | case -ENODEV: |
947 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 970 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
948 | "start_IO: -ENODEV device gone, retry"); | 971 | "start_IO: -ENODEV device gone, retry"); |
949 | break; | 972 | break; |
950 | case -EIO: | 973 | case -EIO: |
951 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 974 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
952 | "start_IO: -EIO device gone, retry"); | 975 | "start_IO: -EIO device gone, retry"); |
953 | break; | 976 | break; |
954 | case -EINVAL: | 977 | case -EINVAL: |
955 | /* most likely caused in power management context */ | 978 | /* most likely caused in power management context */ |
956 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 979 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
957 | "start_IO: -EINVAL device currently " | 980 | "start_IO: -EINVAL device currently " |
958 | "not accessible"); | 981 | "not accessible"); |
959 | break; | 982 | break; |
@@ -1175,12 +1198,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1175 | */ | 1198 | */ |
1176 | if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && | 1199 | if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && |
1177 | cqr->retries > 0) { | 1200 | cqr->retries > 0) { |
1178 | if (cqr->lpm == LPM_ANYPATH) | 1201 | if (cqr->lpm == device->path_data.opm) |
1179 | DBF_DEV_EVENT(DBF_DEBUG, device, | 1202 | DBF_DEV_EVENT(DBF_DEBUG, device, |
1180 | "default ERP in fastpath " | 1203 | "default ERP in fastpath " |
1181 | "(%i retries left)", | 1204 | "(%i retries left)", |
1182 | cqr->retries); | 1205 | cqr->retries); |
1183 | cqr->lpm = LPM_ANYPATH; | 1206 | if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) |
1207 | cqr->lpm = device->path_data.opm; | ||
1184 | cqr->status = DASD_CQR_QUEUED; | 1208 | cqr->status = DASD_CQR_QUEUED; |
1185 | next = cqr; | 1209 | next = cqr; |
1186 | } else | 1210 | } else |
@@ -1364,8 +1388,14 @@ static void __dasd_device_start_head(struct dasd_device *device) | |||
1364 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); | 1388 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1365 | if (cqr->status != DASD_CQR_QUEUED) | 1389 | if (cqr->status != DASD_CQR_QUEUED) |
1366 | return; | 1390 | return; |
1367 | /* when device is stopped, return request to previous layer */ | 1391 | /* when device is stopped, return request to previous layer |
1368 | if (device->stopped) { | 1392 | * exception: only the disconnect or unresumed bits are set and the |
1393 | * cqr is a path verification request | ||
1394 | */ | ||
1395 | if (device->stopped && | ||
1396 | !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) | ||
1397 | && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { | ||
1398 | cqr->intrc = -EAGAIN; | ||
1369 | cqr->status = DASD_CQR_CLEARED; | 1399 | cqr->status = DASD_CQR_CLEARED; |
1370 | dasd_schedule_device_bh(device); | 1400 | dasd_schedule_device_bh(device); |
1371 | return; | 1401 | return; |
@@ -1381,6 +1411,23 @@ static void __dasd_device_start_head(struct dasd_device *device) | |||
1381 | dasd_device_set_timer(device, 50); | 1411 | dasd_device_set_timer(device, 50); |
1382 | } | 1412 | } |
1383 | 1413 | ||
1414 | static void __dasd_device_check_path_events(struct dasd_device *device) | ||
1415 | { | ||
1416 | int rc; | ||
1417 | |||
1418 | if (device->path_data.tbvpm) { | ||
1419 | if (device->stopped & ~(DASD_STOPPED_DC_WAIT | | ||
1420 | DASD_UNRESUMED_PM)) | ||
1421 | return; | ||
1422 | rc = device->discipline->verify_path( | ||
1423 | device, device->path_data.tbvpm); | ||
1424 | if (rc) | ||
1425 | dasd_device_set_timer(device, 50); | ||
1426 | else | ||
1427 | device->path_data.tbvpm = 0; | ||
1428 | } | ||
1429 | }; | ||
1430 | |||
1384 | /* | 1431 | /* |
1385 | * Go through all request on the dasd_device request queue, | 1432 | * Go through all request on the dasd_device request queue, |
1386 | * terminate them on the cdev if necessary, and return them to the | 1433 | * terminate them on the cdev if necessary, and return them to the |
@@ -1455,6 +1502,7 @@ static void dasd_device_tasklet(struct dasd_device *device) | |||
1455 | __dasd_device_check_expire(device); | 1502 | __dasd_device_check_expire(device); |
1456 | /* find final requests on ccw queue */ | 1503 | /* find final requests on ccw queue */ |
1457 | __dasd_device_process_ccw_queue(device, &final_queue); | 1504 | __dasd_device_process_ccw_queue(device, &final_queue); |
1505 | __dasd_device_check_path_events(device); | ||
1458 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1506 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1459 | /* Now call the callback function of requests with final status */ | 1507 | /* Now call the callback function of requests with final status */ |
1460 | __dasd_device_process_final_queue(device, &final_queue); | 1508 | __dasd_device_process_final_queue(device, &final_queue); |
@@ -2586,10 +2634,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev) | |||
2586 | return 0; | 2634 | return 0; |
2587 | } | 2635 | } |
2588 | 2636 | ||
2637 | int dasd_generic_last_path_gone(struct dasd_device *device) | ||
2638 | { | ||
2639 | struct dasd_ccw_req *cqr; | ||
2640 | |||
2641 | dev_warn(&device->cdev->dev, "No operational channel path is left " | ||
2642 | "for the device\n"); | ||
2643 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); | ||
2644 | /* First of all call extended error reporting. */ | ||
2645 | dasd_eer_write(device, NULL, DASD_EER_NOPATH); | ||
2646 | |||
2647 | if (device->state < DASD_STATE_BASIC) | ||
2648 | return 0; | ||
2649 | /* Device is active. We want to keep it. */ | ||
2650 | list_for_each_entry(cqr, &device->ccw_queue, devlist) | ||
2651 | if ((cqr->status == DASD_CQR_IN_IO) || | ||
2652 | (cqr->status == DASD_CQR_CLEAR_PENDING)) { | ||
2653 | cqr->status = DASD_CQR_QUEUED; | ||
2654 | cqr->retries++; | ||
2655 | } | ||
2656 | dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); | ||
2657 | dasd_device_clear_timer(device); | ||
2658 | dasd_schedule_device_bh(device); | ||
2659 | return 1; | ||
2660 | } | ||
2661 | EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); | ||
2662 | |||
2663 | int dasd_generic_path_operational(struct dasd_device *device) | ||
2664 | { | ||
2665 | dev_info(&device->cdev->dev, "A channel path to the device has become " | ||
2666 | "operational\n"); | ||
2667 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); | ||
2668 | dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); | ||
2669 | if (device->stopped & DASD_UNRESUMED_PM) { | ||
2670 | dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); | ||
2671 | dasd_restore_device(device); | ||
2672 | return 1; | ||
2673 | } | ||
2674 | dasd_schedule_device_bh(device); | ||
2675 | if (device->block) | ||
2676 | dasd_schedule_block_bh(device->block); | ||
2677 | return 1; | ||
2678 | } | ||
2679 | EXPORT_SYMBOL_GPL(dasd_generic_path_operational); | ||
2680 | |||
2589 | int dasd_generic_notify(struct ccw_device *cdev, int event) | 2681 | int dasd_generic_notify(struct ccw_device *cdev, int event) |
2590 | { | 2682 | { |
2591 | struct dasd_device *device; | 2683 | struct dasd_device *device; |
2592 | struct dasd_ccw_req *cqr; | ||
2593 | int ret; | 2684 | int ret; |
2594 | 2685 | ||
2595 | device = dasd_device_from_cdev_locked(cdev); | 2686 | device = dasd_device_from_cdev_locked(cdev); |
@@ -2600,41 +2691,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) | |||
2600 | case CIO_GONE: | 2691 | case CIO_GONE: |
2601 | case CIO_BOXED: | 2692 | case CIO_BOXED: |
2602 | case CIO_NO_PATH: | 2693 | case CIO_NO_PATH: |
2603 | /* First of all call extended error reporting. */ | 2694 | device->path_data.opm = 0; |
2604 | dasd_eer_write(device, NULL, DASD_EER_NOPATH); | 2695 | device->path_data.ppm = 0; |
2605 | 2696 | device->path_data.npm = 0; | |
2606 | if (device->state < DASD_STATE_BASIC) | 2697 | ret = dasd_generic_last_path_gone(device); |
2607 | break; | ||
2608 | /* Device is active. We want to keep it. */ | ||
2609 | list_for_each_entry(cqr, &device->ccw_queue, devlist) | ||
2610 | if (cqr->status == DASD_CQR_IN_IO) { | ||
2611 | cqr->status = DASD_CQR_QUEUED; | ||
2612 | cqr->retries++; | ||
2613 | } | ||
2614 | dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); | ||
2615 | dasd_device_clear_timer(device); | ||
2616 | dasd_schedule_device_bh(device); | ||
2617 | ret = 1; | ||
2618 | break; | 2698 | break; |
2619 | case CIO_OPER: | 2699 | case CIO_OPER: |
2620 | /* FIXME: add a sanity check. */ | ||
2621 | dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); | ||
2622 | if (device->stopped & DASD_UNRESUMED_PM) { | ||
2623 | dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); | ||
2624 | dasd_restore_device(device); | ||
2625 | ret = 1; | ||
2626 | break; | ||
2627 | } | ||
2628 | dasd_schedule_device_bh(device); | ||
2629 | if (device->block) | ||
2630 | dasd_schedule_block_bh(device->block); | ||
2631 | ret = 1; | 2700 | ret = 1; |
2701 | if (device->path_data.opm) | ||
2702 | ret = dasd_generic_path_operational(device); | ||
2632 | break; | 2703 | break; |
2633 | } | 2704 | } |
2634 | dasd_put_device(device); | 2705 | dasd_put_device(device); |
2635 | return ret; | 2706 | return ret; |
2636 | } | 2707 | } |
2637 | 2708 | ||
2709 | void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) | ||
2710 | { | ||
2711 | int chp; | ||
2712 | __u8 oldopm, eventlpm; | ||
2713 | struct dasd_device *device; | ||
2714 | |||
2715 | device = dasd_device_from_cdev_locked(cdev); | ||
2716 | if (IS_ERR(device)) | ||
2717 | return; | ||
2718 | for (chp = 0; chp < 8; chp++) { | ||
2719 | eventlpm = 0x80 >> chp; | ||
2720 | if (path_event[chp] & PE_PATH_GONE) { | ||
2721 | oldopm = device->path_data.opm; | ||
2722 | device->path_data.opm &= ~eventlpm; | ||
2723 | device->path_data.ppm &= ~eventlpm; | ||
2724 | device->path_data.npm &= ~eventlpm; | ||
2725 | if (oldopm && !device->path_data.opm) | ||
2726 | dasd_generic_last_path_gone(device); | ||
2727 | } | ||
2728 | if (path_event[chp] & PE_PATH_AVAILABLE) { | ||
2729 | device->path_data.opm &= ~eventlpm; | ||
2730 | device->path_data.ppm &= ~eventlpm; | ||
2731 | device->path_data.npm &= ~eventlpm; | ||
2732 | device->path_data.tbvpm |= eventlpm; | ||
2733 | dasd_schedule_device_bh(device); | ||
2734 | } | ||
2735 | } | ||
2736 | dasd_put_device(device); | ||
2737 | } | ||
2738 | EXPORT_SYMBOL_GPL(dasd_generic_path_event); | ||
2739 | |||
2740 | int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) | ||
2741 | { | ||
2742 | if (!device->path_data.opm && lpm) { | ||
2743 | device->path_data.opm = lpm; | ||
2744 | dasd_generic_path_operational(device); | ||
2745 | } else | ||
2746 | device->path_data.opm |= lpm; | ||
2747 | return 0; | ||
2748 | } | ||
2749 | EXPORT_SYMBOL_GPL(dasd_generic_verify_path); | ||
2750 | |||
2751 | |||
2638 | int dasd_generic_pm_freeze(struct ccw_device *cdev) | 2752 | int dasd_generic_pm_freeze(struct ccw_device *cdev) |
2639 | { | 2753 | { |
2640 | struct dasd_ccw_req *cqr, *n; | 2754 | struct dasd_ccw_req *cqr, *n; |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 968c76cf7127..1654a24817be 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) | |||
152 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 152 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
153 | opm = ccw_device_get_path_mask(device->cdev); | 153 | opm = ccw_device_get_path_mask(device->cdev); |
154 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 154 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
155 | //FIXME: start with get_opm ? | ||
156 | if (erp->lpm == 0) | 155 | if (erp->lpm == 0) |
157 | erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); | 156 | erp->lpm = device->path_data.opm & |
157 | ~(erp->irb.esw.esw0.sublog.lpum); | ||
158 | else | 158 | else |
159 | erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); | 159 | erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); |
160 | 160 | ||
@@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp) | |||
270 | { | 270 | { |
271 | erp->function = dasd_3990_erp_action_1; | 271 | erp->function = dasd_3990_erp_action_1; |
272 | dasd_3990_erp_alternate_path(erp); | 272 | dasd_3990_erp_alternate_path(erp); |
273 | if (erp->status == DASD_CQR_FAILED) { | 273 | if (erp->status == DASD_CQR_FAILED && |
274 | !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { | ||
274 | erp->status = DASD_CQR_FILLED; | 275 | erp->status = DASD_CQR_FILLED; |
275 | erp->retries = 10; | 276 | erp->retries = 10; |
276 | erp->lpm = LPM_ANYPATH; | 277 | erp->lpm = erp->startdev->path_data.opm; |
277 | erp->function = dasd_3990_erp_action_1_sec; | 278 | erp->function = dasd_3990_erp_action_1_sec; |
278 | } | 279 | } |
279 | return erp; | 280 | return erp; |
@@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense) | |||
1907 | static void | 1908 | static void |
1908 | dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) | 1909 | dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) |
1909 | { | 1910 | { |
1910 | |||
1911 | if (sense[25] & DASD_SENSE_BIT_3) { | 1911 | if (sense[25] & DASD_SENSE_BIT_3) { |
1912 | dasd_3990_erp_alternate_path(erp); | 1912 | dasd_3990_erp_alternate_path(erp); |
1913 | 1913 | ||
1914 | if (erp->status == DASD_CQR_FAILED) { | 1914 | if (erp->status == DASD_CQR_FAILED && |
1915 | !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { | ||
1915 | /* reset the lpm and the status to be able to | 1916 | /* reset the lpm and the status to be able to |
1916 | * try further actions. */ | 1917 | * try further actions. */ |
1917 | 1918 | erp->lpm = erp->startdev->path_data.opm; | |
1918 | erp->lpm = 0; | ||
1919 | erp->status = DASD_CQR_NEED_ERP; | 1919 | erp->status = DASD_CQR_NEED_ERP; |
1920 | } | 1920 | } |
1921 | } | 1921 | } |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 8d41f3ed38d7..0001df8ad3e6 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -639,6 +639,7 @@ dasd_put_device_wake(struct dasd_device *device) | |||
639 | { | 639 | { |
640 | wake_up(&dasd_delete_wq); | 640 | wake_up(&dasd_delete_wq); |
641 | } | 641 | } |
642 | EXPORT_SYMBOL_GPL(dasd_put_device_wake); | ||
642 | 643 | ||
643 | /* | 644 | /* |
644 | * Return dasd_device structure associated with cdev. | 645 | * Return dasd_device structure associated with cdev. |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index a3a5db58df18..29143eda9dd9 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -619,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = { | |||
619 | .ebcname = "DIAG", | 619 | .ebcname = "DIAG", |
620 | .max_blocks = DIAG_MAX_BLOCKS, | 620 | .max_blocks = DIAG_MAX_BLOCKS, |
621 | .check_device = dasd_diag_check_device, | 621 | .check_device = dasd_diag_check_device, |
622 | .verify_path = dasd_generic_verify_path, | ||
622 | .fill_geometry = dasd_diag_fill_geometry, | 623 | .fill_geometry = dasd_diag_fill_geometry, |
623 | .start_IO = dasd_start_diag, | 624 | .start_IO = dasd_start_diag, |
624 | .term_IO = dasd_diag_term_IO, | 625 | .term_IO = dasd_diag_term_IO, |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 549443af121c..a1ebf5722ae5 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -90,6 +90,18 @@ static struct { | |||
90 | } *dasd_reserve_req; | 90 | } *dasd_reserve_req; |
91 | static DEFINE_MUTEX(dasd_reserve_mutex); | 91 | static DEFINE_MUTEX(dasd_reserve_mutex); |
92 | 92 | ||
93 | /* definitions for the path verification worker */ | ||
94 | struct path_verification_work_data { | ||
95 | struct work_struct worker; | ||
96 | struct dasd_device *device; | ||
97 | struct dasd_ccw_req cqr; | ||
98 | struct ccw1 ccw; | ||
99 | __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE]; | ||
100 | int isglobal; | ||
101 | __u8 tbvpm; | ||
102 | }; | ||
103 | static struct path_verification_work_data *path_verification_worker; | ||
104 | static DEFINE_MUTEX(dasd_path_verification_mutex); | ||
93 | 105 | ||
94 | /* initial attempt at a probe function. this can be simplified once | 106 | /* initial attempt at a probe function. this can be simplified once |
95 | * the other detection code is gone */ | 107 | * the other detection code is gone */ |
@@ -755,26 +767,27 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) | |||
755 | return -EINVAL; | 767 | return -EINVAL; |
756 | } | 768 | } |
757 | 769 | ||
758 | static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, | 770 | static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, |
759 | void *rcd_buffer, | 771 | struct dasd_ccw_req *cqr, |
760 | struct ciw *ciw, __u8 lpm) | 772 | __u8 *rcd_buffer, |
773 | __u8 lpm) | ||
761 | { | 774 | { |
762 | struct dasd_ccw_req *cqr; | ||
763 | struct ccw1 *ccw; | 775 | struct ccw1 *ccw; |
764 | 776 | /* | |
765 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count, | 777 | * buffer has to start with EBCDIC "V1.0" to show |
766 | device); | 778 | * support for virtual device SNEQ |
767 | 779 | */ | |
768 | if (IS_ERR(cqr)) { | 780 | rcd_buffer[0] = 0xE5; |
769 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 781 | rcd_buffer[1] = 0xF1; |
770 | "Could not allocate RCD request"); | 782 | rcd_buffer[2] = 0x4B; |
771 | return cqr; | 783 | rcd_buffer[3] = 0xF0; |
772 | } | ||
773 | 784 | ||
774 | ccw = cqr->cpaddr; | 785 | ccw = cqr->cpaddr; |
775 | ccw->cmd_code = ciw->cmd; | 786 | ccw->cmd_code = DASD_ECKD_CCW_RCD; |
787 | ccw->flags = 0; | ||
776 | ccw->cda = (__u32)(addr_t)rcd_buffer; | 788 | ccw->cda = (__u32)(addr_t)rcd_buffer; |
777 | ccw->count = ciw->count; | 789 | ccw->count = DASD_ECKD_RCD_DATA_SIZE; |
790 | cqr->magic = DASD_ECKD_MAGIC; | ||
778 | 791 | ||
779 | cqr->startdev = device; | 792 | cqr->startdev = device; |
780 | cqr->memdev = device; | 793 | cqr->memdev = device; |
@@ -784,7 +797,29 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, | |||
784 | cqr->retries = 256; | 797 | cqr->retries = 256; |
785 | cqr->buildclk = get_clock(); | 798 | cqr->buildclk = get_clock(); |
786 | cqr->status = DASD_CQR_FILLED; | 799 | cqr->status = DASD_CQR_FILLED; |
787 | return cqr; | 800 | set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); |
801 | } | ||
802 | |||
803 | static int dasd_eckd_read_conf_immediately(struct dasd_device *device, | ||
804 | struct dasd_ccw_req *cqr, | ||
805 | __u8 *rcd_buffer, | ||
806 | __u8 lpm) | ||
807 | { | ||
808 | struct ciw *ciw; | ||
809 | int rc; | ||
810 | /* | ||
811 | * sanity check: scan for RCD command in extended SenseID data | ||
812 | * some devices do not support RCD | ||
813 | */ | ||
814 | ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); | ||
815 | if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) | ||
816 | return -EOPNOTSUPP; | ||
817 | |||
818 | dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm); | ||
819 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | ||
820 | cqr->retries = 5; | ||
821 | rc = dasd_sleep_on_immediatly(cqr); | ||
822 | return rc; | ||
788 | } | 823 | } |
789 | 824 | ||
790 | static int dasd_eckd_read_conf_lpm(struct dasd_device *device, | 825 | static int dasd_eckd_read_conf_lpm(struct dasd_device *device, |
@@ -797,32 +832,29 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, | |||
797 | struct dasd_ccw_req *cqr; | 832 | struct dasd_ccw_req *cqr; |
798 | 833 | ||
799 | /* | 834 | /* |
800 | * scan for RCD command in extended SenseID data | 835 | * sanity check: scan for RCD command in extended SenseID data |
836 | * some devices do not support RCD | ||
801 | */ | 837 | */ |
802 | ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); | 838 | ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); |
803 | if (!ciw || ciw->cmd == 0) { | 839 | if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) { |
804 | ret = -EOPNOTSUPP; | 840 | ret = -EOPNOTSUPP; |
805 | goto out_error; | 841 | goto out_error; |
806 | } | 842 | } |
807 | rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); | 843 | rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA); |
808 | if (!rcd_buf) { | 844 | if (!rcd_buf) { |
809 | ret = -ENOMEM; | 845 | ret = -ENOMEM; |
810 | goto out_error; | 846 | goto out_error; |
811 | } | 847 | } |
812 | 848 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, | |
813 | /* | 849 | 0, /* use rcd_buf as data ara */ |
814 | * buffer has to start with EBCDIC "V1.0" to show | 850 | device); |
815 | * support for virtual device SNEQ | ||
816 | */ | ||
817 | rcd_buf[0] = 0xE5; | ||
818 | rcd_buf[1] = 0xF1; | ||
819 | rcd_buf[2] = 0x4B; | ||
820 | rcd_buf[3] = 0xF0; | ||
821 | cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm); | ||
822 | if (IS_ERR(cqr)) { | 851 | if (IS_ERR(cqr)) { |
823 | ret = PTR_ERR(cqr); | 852 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
853 | "Could not allocate RCD request"); | ||
854 | ret = -ENOMEM; | ||
824 | goto out_error; | 855 | goto out_error; |
825 | } | 856 | } |
857 | dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); | ||
826 | ret = dasd_sleep_on(cqr); | 858 | ret = dasd_sleep_on(cqr); |
827 | /* | 859 | /* |
828 | * on success we update the user input parms | 860 | * on success we update the user input parms |
@@ -831,7 +863,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, | |||
831 | if (ret) | 863 | if (ret) |
832 | goto out_error; | 864 | goto out_error; |
833 | 865 | ||
834 | *rcd_buffer_size = ciw->count; | 866 | *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE; |
835 | *rcd_buffer = rcd_buf; | 867 | *rcd_buffer = rcd_buf; |
836 | return 0; | 868 | return 0; |
837 | out_error: | 869 | out_error: |
@@ -901,18 +933,18 @@ static int dasd_eckd_read_conf(struct dasd_device *device) | |||
901 | void *conf_data; | 933 | void *conf_data; |
902 | int conf_len, conf_data_saved; | 934 | int conf_len, conf_data_saved; |
903 | int rc; | 935 | int rc; |
904 | __u8 lpm; | 936 | __u8 lpm, opm; |
905 | struct dasd_eckd_private *private; | 937 | struct dasd_eckd_private *private; |
906 | struct dasd_eckd_path *path_data; | 938 | struct dasd_path *path_data; |
907 | 939 | ||
908 | private = (struct dasd_eckd_private *) device->private; | 940 | private = (struct dasd_eckd_private *) device->private; |
909 | path_data = (struct dasd_eckd_path *) &private->path_data; | 941 | path_data = &device->path_data; |
910 | path_data->opm = ccw_device_get_path_mask(device->cdev); | 942 | opm = ccw_device_get_path_mask(device->cdev); |
911 | lpm = 0x80; | 943 | lpm = 0x80; |
912 | conf_data_saved = 0; | 944 | conf_data_saved = 0; |
913 | /* get configuration data per operational path */ | 945 | /* get configuration data per operational path */ |
914 | for (lpm = 0x80; lpm; lpm>>= 1) { | 946 | for (lpm = 0x80; lpm; lpm>>= 1) { |
915 | if (lpm & path_data->opm){ | 947 | if (lpm & opm) { |
916 | rc = dasd_eckd_read_conf_lpm(device, &conf_data, | 948 | rc = dasd_eckd_read_conf_lpm(device, &conf_data, |
917 | &conf_len, lpm); | 949 | &conf_len, lpm); |
918 | if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ | 950 | if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ |
@@ -925,6 +957,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device) | |||
925 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", | 957 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", |
926 | "No configuration data " | 958 | "No configuration data " |
927 | "retrieved"); | 959 | "retrieved"); |
960 | /* no further analysis possible */ | ||
961 | path_data->opm |= lpm; | ||
928 | continue; /* no error */ | 962 | continue; /* no error */ |
929 | } | 963 | } |
930 | /* save first valid configuration data */ | 964 | /* save first valid configuration data */ |
@@ -948,6 +982,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device) | |||
948 | path_data->ppm |= lpm; | 982 | path_data->ppm |= lpm; |
949 | break; | 983 | break; |
950 | } | 984 | } |
985 | path_data->opm |= lpm; | ||
951 | if (conf_data != private->conf_data) | 986 | if (conf_data != private->conf_data) |
952 | kfree(conf_data); | 987 | kfree(conf_data); |
953 | } | 988 | } |
@@ -955,6 +990,140 @@ static int dasd_eckd_read_conf(struct dasd_device *device) | |||
955 | return 0; | 990 | return 0; |
956 | } | 991 | } |
957 | 992 | ||
993 | static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) | ||
994 | { | ||
995 | struct dasd_eckd_private *private; | ||
996 | int mdc; | ||
997 | u32 fcx_max_data; | ||
998 | |||
999 | private = (struct dasd_eckd_private *) device->private; | ||
1000 | if (private->fcx_max_data) { | ||
1001 | mdc = ccw_device_get_mdc(device->cdev, lpm); | ||
1002 | if ((mdc < 0)) { | ||
1003 | dev_warn(&device->cdev->dev, | ||
1004 | "Detecting the maximum data size for zHPF " | ||
1005 | "requests failed (rc=%d) for a new path %x\n", | ||
1006 | mdc, lpm); | ||
1007 | return mdc; | ||
1008 | } | ||
1009 | fcx_max_data = mdc * FCX_MAX_DATA_FACTOR; | ||
1010 | if (fcx_max_data < private->fcx_max_data) { | ||
1011 | dev_warn(&device->cdev->dev, | ||
1012 | "The maximum data size for zHPF requests %u " | ||
1013 | "on a new path %x is below the active maximum " | ||
1014 | "%u\n", fcx_max_data, lpm, | ||
1015 | private->fcx_max_data); | ||
1016 | return -EACCES; | ||
1017 | } | ||
1018 | } | ||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | static void do_path_verification_work(struct work_struct *work) | ||
1023 | { | ||
1024 | struct path_verification_work_data *data; | ||
1025 | struct dasd_device *device; | ||
1026 | __u8 lpm, opm, npm, ppm, epm; | ||
1027 | unsigned long flags; | ||
1028 | int rc; | ||
1029 | |||
1030 | data = container_of(work, struct path_verification_work_data, worker); | ||
1031 | device = data->device; | ||
1032 | |||
1033 | opm = 0; | ||
1034 | npm = 0; | ||
1035 | ppm = 0; | ||
1036 | epm = 0; | ||
1037 | for (lpm = 0x80; lpm; lpm >>= 1) { | ||
1038 | if (lpm & data->tbvpm) { | ||
1039 | memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer)); | ||
1040 | memset(&data->cqr, 0, sizeof(data->cqr)); | ||
1041 | data->cqr.cpaddr = &data->ccw; | ||
1042 | rc = dasd_eckd_read_conf_immediately(device, &data->cqr, | ||
1043 | data->rcd_buffer, | ||
1044 | lpm); | ||
1045 | if (!rc) { | ||
1046 | switch (dasd_eckd_path_access(data->rcd_buffer, | ||
1047 | DASD_ECKD_RCD_DATA_SIZE)) { | ||
1048 | case 0x02: | ||
1049 | npm |= lpm; | ||
1050 | break; | ||
1051 | case 0x03: | ||
1052 | ppm |= lpm; | ||
1053 | break; | ||
1054 | } | ||
1055 | opm |= lpm; | ||
1056 | } else if (rc == -EOPNOTSUPP) { | ||
1057 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", | ||
1058 | "path verification: No configuration " | ||
1059 | "data retrieved"); | ||
1060 | opm |= lpm; | ||
1061 | } else if (rc == -EAGAIN) { | ||
1062 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", | ||
1063 | "path verification: device is stopped," | ||
1064 | " try again later"); | ||
1065 | epm |= lpm; | ||
1066 | } else { | ||
1067 | dev_warn(&device->cdev->dev, | ||
1068 | "Reading device feature codes failed " | ||
1069 | "(rc=%d) for new path %x\n", rc, lpm); | ||
1070 | continue; | ||
1071 | } | ||
1072 | if (verify_fcx_max_data(device, lpm)) { | ||
1073 | opm &= ~lpm; | ||
1074 | npm &= ~lpm; | ||
1075 | ppm &= ~lpm; | ||
1076 | } | ||
1077 | } | ||
1078 | } | ||
1079 | /* | ||
1080 | * There is a small chance that a path is lost again between | ||
1081 | * above path verification and the following modification of | ||
1082 | * the device opm mask. We could avoid that race here by using | ||
1083 | * yet another path mask, but we rather deal with this unlikely | ||
1084 | * situation in dasd_start_IO. | ||
1085 | */ | ||
1086 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
1087 | if (!device->path_data.opm && opm) { | ||
1088 | device->path_data.opm = opm; | ||
1089 | dasd_generic_path_operational(device); | ||
1090 | } else | ||
1091 | device->path_data.opm |= opm; | ||
1092 | device->path_data.npm |= npm; | ||
1093 | device->path_data.ppm |= ppm; | ||
1094 | device->path_data.tbvpm |= epm; | ||
1095 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
1096 | |||
1097 | dasd_put_device(device); | ||
1098 | if (data->isglobal) | ||
1099 | mutex_unlock(&dasd_path_verification_mutex); | ||
1100 | else | ||
1101 | kfree(data); | ||
1102 | } | ||
1103 | |||
1104 | static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm) | ||
1105 | { | ||
1106 | struct path_verification_work_data *data; | ||
1107 | |||
1108 | data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); | ||
1109 | if (!data) { | ||
1110 | if (mutex_trylock(&dasd_path_verification_mutex)) { | ||
1111 | data = path_verification_worker; | ||
1112 | data->isglobal = 1; | ||
1113 | } else | ||
1114 | return -ENOMEM; | ||
1115 | } else { | ||
1116 | memset(data, 0, sizeof(*data)); | ||
1117 | data->isglobal = 0; | ||
1118 | } | ||
1119 | INIT_WORK(&data->worker, do_path_verification_work); | ||
1120 | dasd_get_device(device); | ||
1121 | data->device = device; | ||
1122 | data->tbvpm = lpm; | ||
1123 | schedule_work(&data->worker); | ||
1124 | return 0; | ||
1125 | } | ||
1126 | |||
958 | static int dasd_eckd_read_features(struct dasd_device *device) | 1127 | static int dasd_eckd_read_features(struct dasd_device *device) |
959 | { | 1128 | { |
960 | struct dasd_psf_prssd_data *prssdp; | 1129 | struct dasd_psf_prssd_data *prssdp; |
@@ -1749,6 +1918,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) | |||
1749 | if (cqr->block && (cqr->startdev != cqr->block->base)) { | 1918 | if (cqr->block && (cqr->startdev != cqr->block->base)) { |
1750 | dasd_eckd_reset_ccw_to_base_io(cqr); | 1919 | dasd_eckd_reset_ccw_to_base_io(cqr); |
1751 | cqr->startdev = cqr->block->base; | 1920 | cqr->startdev = cqr->block->base; |
1921 | cqr->lpm = cqr->block->base->path_data.opm; | ||
1752 | } | 1922 | } |
1753 | }; | 1923 | }; |
1754 | 1924 | ||
@@ -2017,7 +2187,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( | |||
2017 | cqr->memdev = startdev; | 2187 | cqr->memdev = startdev; |
2018 | cqr->block = block; | 2188 | cqr->block = block; |
2019 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ | 2189 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ |
2020 | cqr->lpm = private->path_data.ppm; | 2190 | cqr->lpm = startdev->path_data.ppm; |
2021 | cqr->retries = 256; | 2191 | cqr->retries = 256; |
2022 | cqr->buildclk = get_clock(); | 2192 | cqr->buildclk = get_clock(); |
2023 | cqr->status = DASD_CQR_FILLED; | 2193 | cqr->status = DASD_CQR_FILLED; |
@@ -2194,7 +2364,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( | |||
2194 | cqr->memdev = startdev; | 2364 | cqr->memdev = startdev; |
2195 | cqr->block = block; | 2365 | cqr->block = block; |
2196 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ | 2366 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ |
2197 | cqr->lpm = private->path_data.ppm; | 2367 | cqr->lpm = startdev->path_data.ppm; |
2198 | cqr->retries = 256; | 2368 | cqr->retries = 256; |
2199 | cqr->buildclk = get_clock(); | 2369 | cqr->buildclk = get_clock(); |
2200 | cqr->status = DASD_CQR_FILLED; | 2370 | cqr->status = DASD_CQR_FILLED; |
@@ -2484,7 +2654,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( | |||
2484 | cqr->memdev = startdev; | 2654 | cqr->memdev = startdev; |
2485 | cqr->block = block; | 2655 | cqr->block = block; |
2486 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ | 2656 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ |
2487 | cqr->lpm = private->path_data.ppm; | 2657 | cqr->lpm = startdev->path_data.ppm; |
2488 | cqr->retries = 256; | 2658 | cqr->retries = 256; |
2489 | cqr->buildclk = get_clock(); | 2659 | cqr->buildclk = get_clock(); |
2490 | cqr->status = DASD_CQR_FILLED; | 2660 | cqr->status = DASD_CQR_FILLED; |
@@ -3624,6 +3794,7 @@ static struct ccw_driver dasd_eckd_driver = { | |||
3624 | .set_offline = dasd_generic_set_offline, | 3794 | .set_offline = dasd_generic_set_offline, |
3625 | .set_online = dasd_eckd_set_online, | 3795 | .set_online = dasd_eckd_set_online, |
3626 | .notify = dasd_generic_notify, | 3796 | .notify = dasd_generic_notify, |
3797 | .path_event = dasd_generic_path_event, | ||
3627 | .freeze = dasd_generic_pm_freeze, | 3798 | .freeze = dasd_generic_pm_freeze, |
3628 | .thaw = dasd_generic_restore_device, | 3799 | .thaw = dasd_generic_restore_device, |
3629 | .restore = dasd_generic_restore_device, | 3800 | .restore = dasd_generic_restore_device, |
@@ -3651,6 +3822,7 @@ static struct dasd_discipline dasd_eckd_discipline = { | |||
3651 | .check_device = dasd_eckd_check_characteristics, | 3822 | .check_device = dasd_eckd_check_characteristics, |
3652 | .uncheck_device = dasd_eckd_uncheck_device, | 3823 | .uncheck_device = dasd_eckd_uncheck_device, |
3653 | .do_analysis = dasd_eckd_do_analysis, | 3824 | .do_analysis = dasd_eckd_do_analysis, |
3825 | .verify_path = dasd_eckd_verify_path, | ||
3654 | .ready_to_online = dasd_eckd_ready_to_online, | 3826 | .ready_to_online = dasd_eckd_ready_to_online, |
3655 | .online_to_ready = dasd_eckd_online_to_ready, | 3827 | .online_to_ready = dasd_eckd_online_to_ready, |
3656 | .fill_geometry = dasd_eckd_fill_geometry, | 3828 | .fill_geometry = dasd_eckd_fill_geometry, |
@@ -3683,11 +3855,19 @@ dasd_eckd_init(void) | |||
3683 | GFP_KERNEL | GFP_DMA); | 3855 | GFP_KERNEL | GFP_DMA); |
3684 | if (!dasd_reserve_req) | 3856 | if (!dasd_reserve_req) |
3685 | return -ENOMEM; | 3857 | return -ENOMEM; |
3858 | path_verification_worker = kmalloc(sizeof(*path_verification_worker), | ||
3859 | GFP_KERNEL | GFP_DMA); | ||
3860 | if (!path_verification_worker) { | ||
3861 | kfree(dasd_reserve_req); | ||
3862 | return -ENOMEM; | ||
3863 | } | ||
3686 | ret = ccw_driver_register(&dasd_eckd_driver); | 3864 | ret = ccw_driver_register(&dasd_eckd_driver); |
3687 | if (!ret) | 3865 | if (!ret) |
3688 | wait_for_device_probe(); | 3866 | wait_for_device_probe(); |
3689 | else | 3867 | else { |
3868 | kfree(path_verification_worker); | ||
3690 | kfree(dasd_reserve_req); | 3869 | kfree(dasd_reserve_req); |
3870 | } | ||
3691 | return ret; | 3871 | return ret; |
3692 | } | 3872 | } |
3693 | 3873 | ||
@@ -3695,6 +3875,7 @@ static void __exit | |||
3695 | dasd_eckd_cleanup(void) | 3875 | dasd_eckd_cleanup(void) |
3696 | { | 3876 | { |
3697 | ccw_driver_unregister(&dasd_eckd_driver); | 3877 | ccw_driver_unregister(&dasd_eckd_driver); |
3878 | kfree(path_verification_worker); | ||
3698 | kfree(dasd_reserve_req); | 3879 | kfree(dasd_reserve_req); |
3699 | } | 3880 | } |
3700 | 3881 | ||
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 2150aed541be..5051f374cbcb 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define DASD_ECKD_CCW_PFX 0xE7 | 45 | #define DASD_ECKD_CCW_PFX 0xE7 |
46 | #define DASD_ECKD_CCW_PFX_READ 0xEA | 46 | #define DASD_ECKD_CCW_PFX_READ 0xEA |
47 | #define DASD_ECKD_CCW_RSCK 0xF9 | 47 | #define DASD_ECKD_CCW_RSCK 0xF9 |
48 | #define DASD_ECKD_CCW_RCD 0xFA | ||
48 | 49 | ||
49 | /* | 50 | /* |
50 | * Perform Subsystem Function / Sub-Orders | 51 | * Perform Subsystem Function / Sub-Orders |
@@ -59,6 +60,7 @@ | |||
59 | 60 | ||
60 | 61 | ||
61 | #define FCX_MAX_DATA_FACTOR 65536 | 62 | #define FCX_MAX_DATA_FACTOR 65536 |
63 | #define DASD_ECKD_RCD_DATA_SIZE 256 | ||
62 | 64 | ||
63 | 65 | ||
64 | /***************************************************************************** | 66 | /***************************************************************************** |
@@ -335,12 +337,6 @@ struct dasd_gneq { | |||
335 | __u8 reserved2[22]; | 337 | __u8 reserved2[22]; |
336 | } __attribute__ ((packed)); | 338 | } __attribute__ ((packed)); |
337 | 339 | ||
338 | struct dasd_eckd_path { | ||
339 | __u8 opm; | ||
340 | __u8 ppm; | ||
341 | __u8 npm; | ||
342 | }; | ||
343 | |||
344 | struct dasd_rssd_features { | 340 | struct dasd_rssd_features { |
345 | char feature[256]; | 341 | char feature[256]; |
346 | } __attribute__((packed)); | 342 | } __attribute__((packed)); |
@@ -446,7 +442,6 @@ struct dasd_eckd_private { | |||
446 | struct vd_sneq *vdsneq; | 442 | struct vd_sneq *vdsneq; |
447 | struct dasd_gneq *gneq; | 443 | struct dasd_gneq *gneq; |
448 | 444 | ||
449 | struct dasd_eckd_path path_data; | ||
450 | struct eckd_count count_area[5]; | 445 | struct eckd_count count_area[5]; |
451 | int init_cqr_status; | 446 | int init_cqr_status; |
452 | int uses_cdl; | 447 | int uses_cdl; |
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index 7656384a811d..0eafe2e421e7 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c | |||
@@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) | |||
96 | DBF_DEV_EVENT(DBF_DEBUG, device, | 96 | DBF_DEV_EVENT(DBF_DEBUG, device, |
97 | "default ERP called (%i retries left)", | 97 | "default ERP called (%i retries left)", |
98 | cqr->retries); | 98 | cqr->retries); |
99 | cqr->lpm = LPM_ANYPATH; | 99 | if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) |
100 | cqr->lpm = device->path_data.opm; | ||
100 | cqr->status = DASD_CQR_FILLED; | 101 | cqr->status = DASD_CQR_FILLED; |
101 | } else { | 102 | } else { |
102 | pr_err("%s: default ERP has run out of retries and failed\n", | 103 | pr_err("%s: default ERP has run out of retries and failed\n", |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index bec5486e0e6d..86bacda2c5f6 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -73,6 +73,7 @@ static struct ccw_driver dasd_fba_driver = { | |||
73 | .set_offline = dasd_generic_set_offline, | 73 | .set_offline = dasd_generic_set_offline, |
74 | .set_online = dasd_fba_set_online, | 74 | .set_online = dasd_fba_set_online, |
75 | .notify = dasd_generic_notify, | 75 | .notify = dasd_generic_notify, |
76 | .path_event = dasd_generic_path_event, | ||
76 | .freeze = dasd_generic_pm_freeze, | 77 | .freeze = dasd_generic_pm_freeze, |
77 | .thaw = dasd_generic_restore_device, | 78 | .thaw = dasd_generic_restore_device, |
78 | .restore = dasd_generic_restore_device, | 79 | .restore = dasd_generic_restore_device, |
@@ -164,6 +165,7 @@ dasd_fba_check_characteristics(struct dasd_device *device) | |||
164 | } | 165 | } |
165 | 166 | ||
166 | device->default_expires = DASD_EXPIRES; | 167 | device->default_expires = DASD_EXPIRES; |
168 | device->path_data.opm = LPM_ANYPATH; | ||
167 | 169 | ||
168 | readonly = dasd_device_is_ro(device); | 170 | readonly = dasd_device_is_ro(device); |
169 | if (readonly) | 171 | if (readonly) |
@@ -596,6 +598,7 @@ static struct dasd_discipline dasd_fba_discipline = { | |||
596 | .max_blocks = 96, | 598 | .max_blocks = 96, |
597 | .check_device = dasd_fba_check_characteristics, | 599 | .check_device = dasd_fba_check_characteristics, |
598 | .do_analysis = dasd_fba_do_analysis, | 600 | .do_analysis = dasd_fba_do_analysis, |
601 | .verify_path = dasd_generic_verify_path, | ||
599 | .fill_geometry = dasd_fba_fill_geometry, | 602 | .fill_geometry = dasd_fba_fill_geometry, |
600 | .start_IO = dasd_start_IO, | 603 | .start_IO = dasd_start_IO, |
601 | .term_IO = dasd_term_IO, | 604 | .term_IO = dasd_term_IO, |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 500678d7116c..ba038ef57606 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -231,6 +231,7 @@ struct dasd_ccw_req { | |||
231 | /* per dasd_ccw_req flags */ | 231 | /* per dasd_ccw_req flags */ |
232 | #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ | 232 | #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ |
233 | #define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ | 233 | #define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ |
234 | #define DASD_CQR_VERIFY_PATH 2 /* path verification request */ | ||
234 | 235 | ||
235 | /* Signature for error recovery functions. */ | 236 | /* Signature for error recovery functions. */ |
236 | typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); | 237 | typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); |
@@ -287,6 +288,14 @@ struct dasd_discipline { | |||
287 | int (*do_analysis) (struct dasd_block *); | 288 | int (*do_analysis) (struct dasd_block *); |
288 | 289 | ||
289 | /* | 290 | /* |
291 | * This function is called, when new paths become available. | ||
292 | * Disciplins may use this callback to do necessary setup work, | ||
293 | * e.g. verify that new path is compatible with the current | ||
294 | * configuration. | ||
295 | */ | ||
296 | int (*verify_path)(struct dasd_device *, __u8); | ||
297 | |||
298 | /* | ||
290 | * Last things to do when a device is set online, and first things | 299 | * Last things to do when a device is set online, and first things |
291 | * when it is set offline. | 300 | * when it is set offline. |
292 | */ | 301 | */ |
@@ -362,6 +371,13 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer; | |||
362 | #define DASD_EER_STATECHANGE 3 | 371 | #define DASD_EER_STATECHANGE 3 |
363 | #define DASD_EER_PPRCSUSPEND 4 | 372 | #define DASD_EER_PPRCSUSPEND 4 |
364 | 373 | ||
374 | struct dasd_path { | ||
375 | __u8 opm; | ||
376 | __u8 tbvpm; | ||
377 | __u8 ppm; | ||
378 | __u8 npm; | ||
379 | }; | ||
380 | |||
365 | struct dasd_device { | 381 | struct dasd_device { |
366 | /* Block device stuff. */ | 382 | /* Block device stuff. */ |
367 | struct dasd_block *block; | 383 | struct dasd_block *block; |
@@ -377,6 +393,7 @@ struct dasd_device { | |||
377 | struct dasd_discipline *discipline; | 393 | struct dasd_discipline *discipline; |
378 | struct dasd_discipline *base_discipline; | 394 | struct dasd_discipline *base_discipline; |
379 | char *private; | 395 | char *private; |
396 | struct dasd_path path_data; | ||
380 | 397 | ||
381 | /* Device state and target state. */ | 398 | /* Device state and target state. */ |
382 | int state, target; | 399 | int state, target; |
@@ -620,10 +637,15 @@ void dasd_generic_remove (struct ccw_device *cdev); | |||
620 | int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); | 637 | int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); |
621 | int dasd_generic_set_offline (struct ccw_device *cdev); | 638 | int dasd_generic_set_offline (struct ccw_device *cdev); |
622 | int dasd_generic_notify(struct ccw_device *, int); | 639 | int dasd_generic_notify(struct ccw_device *, int); |
640 | int dasd_generic_last_path_gone(struct dasd_device *); | ||
641 | int dasd_generic_path_operational(struct dasd_device *); | ||
642 | |||
623 | void dasd_generic_handle_state_change(struct dasd_device *); | 643 | void dasd_generic_handle_state_change(struct dasd_device *); |
624 | int dasd_generic_pm_freeze(struct ccw_device *); | 644 | int dasd_generic_pm_freeze(struct ccw_device *); |
625 | int dasd_generic_restore_device(struct ccw_device *); | 645 | int dasd_generic_restore_device(struct ccw_device *); |
626 | enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); | 646 | enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); |
647 | void dasd_generic_path_event(struct ccw_device *, int *); | ||
648 | int dasd_generic_verify_path(struct dasd_device *, __u8); | ||
627 | 649 | ||
628 | int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); | 650 | int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); |
629 | char *dasd_get_sense(struct irb *); | 651 | char *dasd_get_sense(struct irb *); |