diff options
Diffstat (limited to 'drivers/s390')
42 files changed, 1222 insertions, 708 deletions
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index bed7b4634ccd..8d41f3ed38d7 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -1083,6 +1083,49 @@ dasd_eer_store(struct device *dev, struct device_attribute *attr, | |||
1083 | 1083 | ||
1084 | static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); | 1084 | static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); |
1085 | 1085 | ||
1086 | /* | ||
1087 | * expiration time for default requests | ||
1088 | */ | ||
1089 | static ssize_t | ||
1090 | dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1091 | { | ||
1092 | struct dasd_device *device; | ||
1093 | int len; | ||
1094 | |||
1095 | device = dasd_device_from_cdev(to_ccwdev(dev)); | ||
1096 | if (IS_ERR(device)) | ||
1097 | return -ENODEV; | ||
1098 | len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires); | ||
1099 | dasd_put_device(device); | ||
1100 | return len; | ||
1101 | } | ||
1102 | |||
1103 | static ssize_t | ||
1104 | dasd_expires_store(struct device *dev, struct device_attribute *attr, | ||
1105 | const char *buf, size_t count) | ||
1106 | { | ||
1107 | struct dasd_device *device; | ||
1108 | unsigned long val; | ||
1109 | |||
1110 | device = dasd_device_from_cdev(to_ccwdev(dev)); | ||
1111 | if (IS_ERR(device)) | ||
1112 | return -ENODEV; | ||
1113 | |||
1114 | if ((strict_strtoul(buf, 10, &val) != 0) || | ||
1115 | (val > DASD_EXPIRES_MAX) || val == 0) { | ||
1116 | dasd_put_device(device); | ||
1117 | return -EINVAL; | ||
1118 | } | ||
1119 | |||
1120 | if (val) | ||
1121 | device->default_expires = val; | ||
1122 | |||
1123 | dasd_put_device(device); | ||
1124 | return count; | ||
1125 | } | ||
1126 | |||
1127 | static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store); | ||
1128 | |||
1086 | static struct attribute * dasd_attrs[] = { | 1129 | static struct attribute * dasd_attrs[] = { |
1087 | &dev_attr_readonly.attr, | 1130 | &dev_attr_readonly.attr, |
1088 | &dev_attr_discipline.attr, | 1131 | &dev_attr_discipline.attr, |
@@ -1094,6 +1137,7 @@ static struct attribute * dasd_attrs[] = { | |||
1094 | &dev_attr_eer_enabled.attr, | 1137 | &dev_attr_eer_enabled.attr, |
1095 | &dev_attr_erplog.attr, | 1138 | &dev_attr_erplog.attr, |
1096 | &dev_attr_failfast.attr, | 1139 | &dev_attr_failfast.attr, |
1140 | &dev_attr_expires.attr, | ||
1097 | NULL, | 1141 | NULL, |
1098 | }; | 1142 | }; |
1099 | 1143 | ||
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 687f323cdc38..2b3bc3ec0541 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL"); | |||
43 | sizeof(struct dasd_diag_req)) / \ | 43 | sizeof(struct dasd_diag_req)) / \ |
44 | sizeof(struct dasd_diag_bio)) / 2) | 44 | sizeof(struct dasd_diag_bio)) / 2) |
45 | #define DIAG_MAX_RETRIES 32 | 45 | #define DIAG_MAX_RETRIES 32 |
46 | #define DIAG_TIMEOUT 50 * HZ | 46 | #define DIAG_TIMEOUT 50 |
47 | 47 | ||
48 | static struct dasd_discipline dasd_diag_discipline; | 48 | static struct dasd_discipline dasd_diag_discipline; |
49 | 49 | ||
@@ -360,6 +360,8 @@ dasd_diag_check_device(struct dasd_device *device) | |||
360 | goto out; | 360 | goto out; |
361 | } | 361 | } |
362 | 362 | ||
363 | device->default_expires = DIAG_TIMEOUT; | ||
364 | |||
363 | /* Figure out position of label block */ | 365 | /* Figure out position of label block */ |
364 | switch (private->rdc_data.vdev_class) { | 366 | switch (private->rdc_data.vdev_class) { |
365 | case DEV_CLASS_FBA: | 367 | case DEV_CLASS_FBA: |
@@ -563,7 +565,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | |||
563 | cqr->startdev = memdev; | 565 | cqr->startdev = memdev; |
564 | cqr->memdev = memdev; | 566 | cqr->memdev = memdev; |
565 | cqr->block = block; | 567 | cqr->block = block; |
566 | cqr->expires = DIAG_TIMEOUT; | 568 | cqr->expires = memdev->default_expires * HZ; |
567 | cqr->status = DASD_CQR_FILLED; | 569 | cqr->status = DASD_CQR_FILLED; |
568 | return cqr; | 570 | return cqr; |
569 | } | 571 | } |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index ab84da5592e8..66360c24bd48 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -82,6 +82,14 @@ static struct ccw_driver dasd_eckd_driver; /* see below */ | |||
82 | #define INIT_CQR_UNFORMATTED 1 | 82 | #define INIT_CQR_UNFORMATTED 1 |
83 | #define INIT_CQR_ERROR 2 | 83 | #define INIT_CQR_ERROR 2 |
84 | 84 | ||
85 | /* emergency request for reserve/release */ | ||
86 | static struct { | ||
87 | struct dasd_ccw_req cqr; | ||
88 | struct ccw1 ccw; | ||
89 | char data[32]; | ||
90 | } *dasd_reserve_req; | ||
91 | static DEFINE_MUTEX(dasd_reserve_mutex); | ||
92 | |||
85 | 93 | ||
86 | /* initial attempt at a probe function. this can be simplified once | 94 | /* initial attempt at a probe function. this can be simplified once |
87 | * the other detection code is gone */ | 95 | * the other detection code is gone */ |
@@ -1107,8 +1115,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
1107 | struct dasd_eckd_private *private; | 1115 | struct dasd_eckd_private *private; |
1108 | struct dasd_block *block; | 1116 | struct dasd_block *block; |
1109 | struct dasd_uid temp_uid; | 1117 | struct dasd_uid temp_uid; |
1110 | int is_known, rc; | 1118 | int is_known, rc, i; |
1111 | int readonly; | 1119 | int readonly; |
1120 | unsigned long value; | ||
1112 | 1121 | ||
1113 | if (!ccw_device_is_pathgroup(device->cdev)) { | 1122 | if (!ccw_device_is_pathgroup(device->cdev)) { |
1114 | dev_warn(&device->cdev->dev, | 1123 | dev_warn(&device->cdev->dev, |
@@ -1143,6 +1152,18 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
1143 | if (rc) | 1152 | if (rc) |
1144 | goto out_err1; | 1153 | goto out_err1; |
1145 | 1154 | ||
1155 | /* set default timeout */ | ||
1156 | device->default_expires = DASD_EXPIRES; | ||
1157 | if (private->gneq) { | ||
1158 | value = 1; | ||
1159 | for (i = 0; i < private->gneq->timeout.value; i++) | ||
1160 | value = 10 * value; | ||
1161 | value = value * private->gneq->timeout.number; | ||
1162 | /* do not accept useless values */ | ||
1163 | if (value != 0 && value <= DASD_EXPIRES_MAX) | ||
1164 | device->default_expires = value; | ||
1165 | } | ||
1166 | |||
1146 | /* Generate device unique id */ | 1167 | /* Generate device unique id */ |
1147 | rc = dasd_eckd_generate_uid(device); | 1168 | rc = dasd_eckd_generate_uid(device); |
1148 | if (rc) | 1169 | if (rc) |
@@ -1973,7 +1994,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( | |||
1973 | cqr->startdev = startdev; | 1994 | cqr->startdev = startdev; |
1974 | cqr->memdev = startdev; | 1995 | cqr->memdev = startdev; |
1975 | cqr->block = block; | 1996 | cqr->block = block; |
1976 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ | 1997 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ |
1977 | cqr->lpm = private->path_data.ppm; | 1998 | cqr->lpm = private->path_data.ppm; |
1978 | cqr->retries = 256; | 1999 | cqr->retries = 256; |
1979 | cqr->buildclk = get_clock(); | 2000 | cqr->buildclk = get_clock(); |
@@ -2150,7 +2171,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( | |||
2150 | cqr->startdev = startdev; | 2171 | cqr->startdev = startdev; |
2151 | cqr->memdev = startdev; | 2172 | cqr->memdev = startdev; |
2152 | cqr->block = block; | 2173 | cqr->block = block; |
2153 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ | 2174 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ |
2154 | cqr->lpm = private->path_data.ppm; | 2175 | cqr->lpm = private->path_data.ppm; |
2155 | cqr->retries = 256; | 2176 | cqr->retries = 256; |
2156 | cqr->buildclk = get_clock(); | 2177 | cqr->buildclk = get_clock(); |
@@ -2398,7 +2419,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( | |||
2398 | cqr->startdev = startdev; | 2419 | cqr->startdev = startdev; |
2399 | cqr->memdev = startdev; | 2420 | cqr->memdev = startdev; |
2400 | cqr->block = block; | 2421 | cqr->block = block; |
2401 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ | 2422 | cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ |
2402 | cqr->lpm = private->path_data.ppm; | 2423 | cqr->lpm = private->path_data.ppm; |
2403 | cqr->retries = 256; | 2424 | cqr->retries = 256; |
2404 | cqr->buildclk = get_clock(); | 2425 | cqr->buildclk = get_clock(); |
@@ -2645,15 +2666,23 @@ dasd_eckd_release(struct dasd_device *device) | |||
2645 | struct dasd_ccw_req *cqr; | 2666 | struct dasd_ccw_req *cqr; |
2646 | int rc; | 2667 | int rc; |
2647 | struct ccw1 *ccw; | 2668 | struct ccw1 *ccw; |
2669 | int useglobal; | ||
2648 | 2670 | ||
2649 | if (!capable(CAP_SYS_ADMIN)) | 2671 | if (!capable(CAP_SYS_ADMIN)) |
2650 | return -EACCES; | 2672 | return -EACCES; |
2651 | 2673 | ||
2674 | useglobal = 0; | ||
2652 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); | 2675 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); |
2653 | if (IS_ERR(cqr)) { | 2676 | if (IS_ERR(cqr)) { |
2654 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 2677 | mutex_lock(&dasd_reserve_mutex); |
2655 | "Could not allocate initialization request"); | 2678 | useglobal = 1; |
2656 | return PTR_ERR(cqr); | 2679 | cqr = &dasd_reserve_req->cqr; |
2680 | memset(cqr, 0, sizeof(*cqr)); | ||
2681 | memset(&dasd_reserve_req->ccw, 0, | ||
2682 | sizeof(dasd_reserve_req->ccw)); | ||
2683 | cqr->cpaddr = &dasd_reserve_req->ccw; | ||
2684 | cqr->data = &dasd_reserve_req->data; | ||
2685 | cqr->magic = DASD_ECKD_MAGIC; | ||
2657 | } | 2686 | } |
2658 | ccw = cqr->cpaddr; | 2687 | ccw = cqr->cpaddr; |
2659 | ccw->cmd_code = DASD_ECKD_CCW_RELEASE; | 2688 | ccw->cmd_code = DASD_ECKD_CCW_RELEASE; |
@@ -2671,7 +2700,10 @@ dasd_eckd_release(struct dasd_device *device) | |||
2671 | 2700 | ||
2672 | rc = dasd_sleep_on_immediatly(cqr); | 2701 | rc = dasd_sleep_on_immediatly(cqr); |
2673 | 2702 | ||
2674 | dasd_sfree_request(cqr, cqr->memdev); | 2703 | if (useglobal) |
2704 | mutex_unlock(&dasd_reserve_mutex); | ||
2705 | else | ||
2706 | dasd_sfree_request(cqr, cqr->memdev); | ||
2675 | return rc; | 2707 | return rc; |
2676 | } | 2708 | } |
2677 | 2709 | ||
@@ -2687,15 +2719,23 @@ dasd_eckd_reserve(struct dasd_device *device) | |||
2687 | struct dasd_ccw_req *cqr; | 2719 | struct dasd_ccw_req *cqr; |
2688 | int rc; | 2720 | int rc; |
2689 | struct ccw1 *ccw; | 2721 | struct ccw1 *ccw; |
2722 | int useglobal; | ||
2690 | 2723 | ||
2691 | if (!capable(CAP_SYS_ADMIN)) | 2724 | if (!capable(CAP_SYS_ADMIN)) |
2692 | return -EACCES; | 2725 | return -EACCES; |
2693 | 2726 | ||
2727 | useglobal = 0; | ||
2694 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); | 2728 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); |
2695 | if (IS_ERR(cqr)) { | 2729 | if (IS_ERR(cqr)) { |
2696 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 2730 | mutex_lock(&dasd_reserve_mutex); |
2697 | "Could not allocate initialization request"); | 2731 | useglobal = 1; |
2698 | return PTR_ERR(cqr); | 2732 | cqr = &dasd_reserve_req->cqr; |
2733 | memset(cqr, 0, sizeof(*cqr)); | ||
2734 | memset(&dasd_reserve_req->ccw, 0, | ||
2735 | sizeof(dasd_reserve_req->ccw)); | ||
2736 | cqr->cpaddr = &dasd_reserve_req->ccw; | ||
2737 | cqr->data = &dasd_reserve_req->data; | ||
2738 | cqr->magic = DASD_ECKD_MAGIC; | ||
2699 | } | 2739 | } |
2700 | ccw = cqr->cpaddr; | 2740 | ccw = cqr->cpaddr; |
2701 | ccw->cmd_code = DASD_ECKD_CCW_RESERVE; | 2741 | ccw->cmd_code = DASD_ECKD_CCW_RESERVE; |
@@ -2713,7 +2753,10 @@ dasd_eckd_reserve(struct dasd_device *device) | |||
2713 | 2753 | ||
2714 | rc = dasd_sleep_on_immediatly(cqr); | 2754 | rc = dasd_sleep_on_immediatly(cqr); |
2715 | 2755 | ||
2716 | dasd_sfree_request(cqr, cqr->memdev); | 2756 | if (useglobal) |
2757 | mutex_unlock(&dasd_reserve_mutex); | ||
2758 | else | ||
2759 | dasd_sfree_request(cqr, cqr->memdev); | ||
2717 | return rc; | 2760 | return rc; |
2718 | } | 2761 | } |
2719 | 2762 | ||
@@ -2728,15 +2771,23 @@ dasd_eckd_steal_lock(struct dasd_device *device) | |||
2728 | struct dasd_ccw_req *cqr; | 2771 | struct dasd_ccw_req *cqr; |
2729 | int rc; | 2772 | int rc; |
2730 | struct ccw1 *ccw; | 2773 | struct ccw1 *ccw; |
2774 | int useglobal; | ||
2731 | 2775 | ||
2732 | if (!capable(CAP_SYS_ADMIN)) | 2776 | if (!capable(CAP_SYS_ADMIN)) |
2733 | return -EACCES; | 2777 | return -EACCES; |
2734 | 2778 | ||
2779 | useglobal = 0; | ||
2735 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); | 2780 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); |
2736 | if (IS_ERR(cqr)) { | 2781 | if (IS_ERR(cqr)) { |
2737 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | 2782 | mutex_lock(&dasd_reserve_mutex); |
2738 | "Could not allocate initialization request"); | 2783 | useglobal = 1; |
2739 | return PTR_ERR(cqr); | 2784 | cqr = &dasd_reserve_req->cqr; |
2785 | memset(cqr, 0, sizeof(*cqr)); | ||
2786 | memset(&dasd_reserve_req->ccw, 0, | ||
2787 | sizeof(dasd_reserve_req->ccw)); | ||
2788 | cqr->cpaddr = &dasd_reserve_req->ccw; | ||
2789 | cqr->data = &dasd_reserve_req->data; | ||
2790 | cqr->magic = DASD_ECKD_MAGIC; | ||
2740 | } | 2791 | } |
2741 | ccw = cqr->cpaddr; | 2792 | ccw = cqr->cpaddr; |
2742 | ccw->cmd_code = DASD_ECKD_CCW_SLCK; | 2793 | ccw->cmd_code = DASD_ECKD_CCW_SLCK; |
@@ -2754,7 +2805,10 @@ dasd_eckd_steal_lock(struct dasd_device *device) | |||
2754 | 2805 | ||
2755 | rc = dasd_sleep_on_immediatly(cqr); | 2806 | rc = dasd_sleep_on_immediatly(cqr); |
2756 | 2807 | ||
2757 | dasd_sfree_request(cqr, cqr->memdev); | 2808 | if (useglobal) |
2809 | mutex_unlock(&dasd_reserve_mutex); | ||
2810 | else | ||
2811 | dasd_sfree_request(cqr, cqr->memdev); | ||
2758 | return rc; | 2812 | return rc; |
2759 | } | 2813 | } |
2760 | 2814 | ||
@@ -3488,10 +3542,15 @@ dasd_eckd_init(void) | |||
3488 | int ret; | 3542 | int ret; |
3489 | 3543 | ||
3490 | ASCEBC(dasd_eckd_discipline.ebcname, 4); | 3544 | ASCEBC(dasd_eckd_discipline.ebcname, 4); |
3545 | dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req), | ||
3546 | GFP_KERNEL | GFP_DMA); | ||
3547 | if (!dasd_reserve_req) | ||
3548 | return -ENOMEM; | ||
3491 | ret = ccw_driver_register(&dasd_eckd_driver); | 3549 | ret = ccw_driver_register(&dasd_eckd_driver); |
3492 | if (!ret) | 3550 | if (!ret) |
3493 | wait_for_device_probe(); | 3551 | wait_for_device_probe(); |
3494 | 3552 | else | |
3553 | kfree(dasd_reserve_req); | ||
3495 | return ret; | 3554 | return ret; |
3496 | } | 3555 | } |
3497 | 3556 | ||
@@ -3499,6 +3558,7 @@ static void __exit | |||
3499 | dasd_eckd_cleanup(void) | 3558 | dasd_eckd_cleanup(void) |
3500 | { | 3559 | { |
3501 | ccw_driver_unregister(&dasd_eckd_driver); | 3560 | ccw_driver_unregister(&dasd_eckd_driver); |
3561 | kfree(dasd_reserve_req); | ||
3502 | } | 3562 | } |
3503 | 3563 | ||
3504 | module_init(dasd_eckd_init); | 3564 | module_init(dasd_eckd_init); |
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index dd6385a5af14..0eb49655a6cd 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h | |||
@@ -320,7 +320,12 @@ struct dasd_gneq { | |||
320 | __u8 identifier:2; | 320 | __u8 identifier:2; |
321 | __u8 reserved:6; | 321 | __u8 reserved:6; |
322 | } __attribute__ ((packed)) flags; | 322 | } __attribute__ ((packed)) flags; |
323 | __u8 reserved[7]; | 323 | __u8 reserved[5]; |
324 | struct { | ||
325 | __u8 value:2; | ||
326 | __u8 number:6; | ||
327 | } __attribute__ ((packed)) timeout; | ||
328 | __u8 reserved3; | ||
324 | __u16 subsystemID; | 329 | __u16 subsystemID; |
325 | __u8 reserved2[22]; | 330 | __u8 reserved2[22]; |
326 | } __attribute__ ((packed)); | 331 | } __attribute__ ((packed)); |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index dd88803e4899..7158f9528ecc 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -701,7 +701,7 @@ int __init dasd_eer_init(void) | |||
701 | void dasd_eer_exit(void) | 701 | void dasd_eer_exit(void) |
702 | { | 702 | { |
703 | if (dasd_eer_dev) { | 703 | if (dasd_eer_dev) { |
704 | WARN_ON(misc_deregister(dasd_eer_dev) != 0); | 704 | misc_deregister(dasd_eer_dev); |
705 | kfree(dasd_eer_dev); | 705 | kfree(dasd_eer_dev); |
706 | dasd_eer_dev = NULL; | 706 | dasd_eer_dev = NULL; |
707 | } | 707 | } |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 37282b90eecc..bec5486e0e6d 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -163,6 +163,8 @@ dasd_fba_check_characteristics(struct dasd_device *device) | |||
163 | return rc; | 163 | return rc; |
164 | } | 164 | } |
165 | 165 | ||
166 | device->default_expires = DASD_EXPIRES; | ||
167 | |||
166 | readonly = dasd_device_is_ro(device); | 168 | readonly = dasd_device_is_ro(device); |
167 | if (readonly) | 169 | if (readonly) |
168 | set_bit(DASD_FLAG_DEVICE_RO, &device->flags); | 170 | set_bit(DASD_FLAG_DEVICE_RO, &device->flags); |
@@ -370,7 +372,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, | |||
370 | cqr->startdev = memdev; | 372 | cqr->startdev = memdev; |
371 | cqr->memdev = memdev; | 373 | cqr->memdev = memdev; |
372 | cqr->block = block; | 374 | cqr->block = block; |
373 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ | 375 | cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ |
374 | cqr->retries = 32; | 376 | cqr->retries = 32; |
375 | cqr->buildclk = get_clock(); | 377 | cqr->buildclk = get_clock(); |
376 | cqr->status = DASD_CQR_FILLED; | 378 | cqr->status = DASD_CQR_FILLED; |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 49b431d135e0..500678d7116c 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -186,7 +186,7 @@ struct dasd_ccw_req { | |||
186 | 186 | ||
187 | /* ... and how */ | 187 | /* ... and how */ |
188 | unsigned long starttime; /* jiffies time of request start */ | 188 | unsigned long starttime; /* jiffies time of request start */ |
189 | int expires; /* expiration period in jiffies */ | 189 | unsigned long expires; /* expiration period in jiffies */ |
190 | char lpm; /* logical path mask */ | 190 | char lpm; /* logical path mask */ |
191 | void *data; /* pointer to data area */ | 191 | void *data; /* pointer to data area */ |
192 | 192 | ||
@@ -224,6 +224,9 @@ struct dasd_ccw_req { | |||
224 | #define DASD_CQR_CLEARED 0x84 /* request was cleared */ | 224 | #define DASD_CQR_CLEARED 0x84 /* request was cleared */ |
225 | #define DASD_CQR_SUCCESS 0x85 /* request was successful */ | 225 | #define DASD_CQR_SUCCESS 0x85 /* request was successful */ |
226 | 226 | ||
227 | /* default expiration time*/ | ||
228 | #define DASD_EXPIRES 300 | ||
229 | #define DASD_EXPIRES_MAX 40000000 | ||
227 | 230 | ||
228 | /* per dasd_ccw_req flags */ | 231 | /* per dasd_ccw_req flags */ |
229 | #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ | 232 | #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ |
@@ -404,6 +407,9 @@ struct dasd_device { | |||
404 | 407 | ||
405 | /* hook for alias management */ | 408 | /* hook for alias management */ |
406 | struct list_head alias_list; | 409 | struct list_head alias_list; |
410 | |||
411 | /* default expiration time in s */ | ||
412 | unsigned long default_expires; | ||
407 | }; | 413 | }; |
408 | 414 | ||
409 | struct dasd_block { | 415 | struct dasd_block { |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 2ed3f82e5c30..e021ec663ef9 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -627,7 +627,7 @@ out_iucv: | |||
627 | static void __exit mon_exit(void) | 627 | static void __exit mon_exit(void) |
628 | { | 628 | { |
629 | segment_unload(mon_dcss_name); | 629 | segment_unload(mon_dcss_name); |
630 | WARN_ON(misc_deregister(&mon_dev) != 0); | 630 | misc_deregister(&mon_dev); |
631 | device_unregister(monreader_device); | 631 | device_unregister(monreader_device); |
632 | driver_unregister(&monreader_driver); | 632 | driver_unregister(&monreader_driver); |
633 | iucv_unregister(&monreader_iucv_handler, 1); | 633 | iucv_unregister(&monreader_iucv_handler, 1); |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 98a49dfda1de..572a1e7fd099 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -380,7 +380,7 @@ out_driver: | |||
380 | 380 | ||
381 | static void __exit mon_exit(void) | 381 | static void __exit mon_exit(void) |
382 | { | 382 | { |
383 | WARN_ON(misc_deregister(&mon_dev) != 0); | 383 | misc_deregister(&mon_dev); |
384 | platform_device_unregister(monwriter_pdev); | 384 | platform_device_unregister(monwriter_pdev); |
385 | platform_driver_unregister(&monwriter_pdrv); | 385 | platform_driver_unregister(&monwriter_pdrv); |
386 | } | 386 | } |
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 7f206ed44fdf..d15f8b4d78bd 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c | |||
@@ -38,9 +38,13 @@ static u16 ccwreq_next_path(struct ccw_device *cdev) | |||
38 | { | 38 | { |
39 | struct ccw_request *req = &cdev->private->req; | 39 | struct ccw_request *req = &cdev->private->req; |
40 | 40 | ||
41 | if (!req->singlepath) { | ||
42 | req->mask = 0; | ||
43 | goto out; | ||
44 | } | ||
41 | req->retries = req->maxretries; | 45 | req->retries = req->maxretries; |
42 | req->mask = lpm_adjust(req->mask >>= 1, req->lpm); | 46 | req->mask = lpm_adjust(req->mask >>= 1, req->lpm); |
43 | 47 | out: | |
44 | return req->mask; | 48 | return req->mask; |
45 | } | 49 | } |
46 | 50 | ||
@@ -113,8 +117,12 @@ void ccw_request_start(struct ccw_device *cdev) | |||
113 | { | 117 | { |
114 | struct ccw_request *req = &cdev->private->req; | 118 | struct ccw_request *req = &cdev->private->req; |
115 | 119 | ||
116 | /* Try all paths twice to counter link flapping. */ | 120 | if (req->singlepath) { |
117 | req->mask = 0x8080; | 121 | /* Try all paths twice to counter link flapping. */ |
122 | req->mask = 0x8080; | ||
123 | } else | ||
124 | req->mask = req->lpm; | ||
125 | |||
118 | req->retries = req->maxretries; | 126 | req->retries = req->maxretries; |
119 | req->mask = lpm_adjust(req->mask, req->lpm); | 127 | req->mask = lpm_adjust(req->mask, req->lpm); |
120 | req->drc = 0; | 128 | req->drc = 0; |
@@ -182,6 +190,8 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) | |||
182 | /* Ask the driver what to do */ | 190 | /* Ask the driver what to do */ |
183 | if (cdev->drv && cdev->drv->uc_handler) { | 191 | if (cdev->drv && cdev->drv->uc_handler) { |
184 | todo = cdev->drv->uc_handler(cdev, lcirb); | 192 | todo = cdev->drv->uc_handler(cdev, lcirb); |
193 | CIO_TRACE_EVENT(2, "uc_response"); | ||
194 | CIO_HEX_EVENT(2, &todo, sizeof(todo)); | ||
185 | switch (todo) { | 195 | switch (todo) { |
186 | case UC_TODO_RETRY: | 196 | case UC_TODO_RETRY: |
187 | return IO_STATUS_ERROR; | 197 | return IO_STATUS_ERROR; |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 407d0e9adfaf..4cbb1a6ca33c 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "chsc.h" | 29 | #include "chsc.h" |
30 | 30 | ||
31 | static void *sei_page; | 31 | static void *sei_page; |
32 | static DEFINE_SPINLOCK(siosl_lock); | ||
32 | static DEFINE_SPINLOCK(sda_lock); | 33 | static DEFINE_SPINLOCK(sda_lock); |
33 | 34 | ||
34 | /** | 35 | /** |
@@ -48,6 +49,7 @@ int chsc_error_from_response(int response) | |||
48 | case 0x0007: | 49 | case 0x0007: |
49 | case 0x0008: | 50 | case 0x0008: |
50 | case 0x000a: | 51 | case 0x000a: |
52 | case 0x0104: | ||
51 | return -EINVAL; | 53 | return -EINVAL; |
52 | case 0x0004: | 54 | case 0x0004: |
53 | return -EOPNOTSUPP; | 55 | return -EOPNOTSUPP; |
@@ -974,3 +976,49 @@ int chsc_sstpi(void *page, void *result, size_t size) | |||
974 | return (rr->response.code == 0x0001) ? 0 : -EIO; | 976 | return (rr->response.code == 0x0001) ? 0 : -EIO; |
975 | } | 977 | } |
976 | 978 | ||
979 | static struct { | ||
980 | struct chsc_header request; | ||
981 | u32 word1; | ||
982 | struct subchannel_id sid; | ||
983 | u32 word3; | ||
984 | struct chsc_header response; | ||
985 | u32 word[11]; | ||
986 | } __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE))); | ||
987 | |||
988 | int chsc_siosl(struct subchannel_id schid) | ||
989 | { | ||
990 | unsigned long flags; | ||
991 | int ccode; | ||
992 | int rc; | ||
993 | |||
994 | spin_lock_irqsave(&siosl_lock, flags); | ||
995 | memset(&siosl_area, 0, sizeof(siosl_area)); | ||
996 | siosl_area.request.length = 0x0010; | ||
997 | siosl_area.request.code = 0x0046; | ||
998 | siosl_area.word1 = 0x80000000; | ||
999 | siosl_area.sid = schid; | ||
1000 | |||
1001 | ccode = chsc(&siosl_area); | ||
1002 | if (ccode > 0) { | ||
1003 | if (ccode == 3) | ||
1004 | rc = -ENODEV; | ||
1005 | else | ||
1006 | rc = -EBUSY; | ||
1007 | CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", | ||
1008 | schid.ssid, schid.sch_no, ccode); | ||
1009 | goto out; | ||
1010 | } | ||
1011 | rc = chsc_error_from_response(siosl_area.response.code); | ||
1012 | if (rc) | ||
1013 | CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", | ||
1014 | schid.ssid, schid.sch_no, | ||
1015 | siosl_area.response.code); | ||
1016 | else | ||
1017 | CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", | ||
1018 | schid.ssid, schid.sch_no); | ||
1019 | out: | ||
1020 | spin_unlock_irqrestore(&siosl_lock, flags); | ||
1021 | |||
1022 | return rc; | ||
1023 | } | ||
1024 | EXPORT_SYMBOL_GPL(chsc_siosl); | ||
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 37aa611d4ac5..5453013f094b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -80,4 +80,6 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp); | |||
80 | 80 | ||
81 | int chsc_error_from_response(int response); | 81 | int chsc_error_from_response(int response); |
82 | 82 | ||
83 | int chsc_siosl(struct subchannel_id schid); | ||
84 | |||
83 | #endif | 85 | #endif |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 6d229f3523a0..51bd3687d163 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include "ioasm.h" | 36 | #include "ioasm.h" |
37 | #include "io_sch.h" | 37 | #include "io_sch.h" |
38 | #include "blacklist.h" | 38 | #include "blacklist.h" |
39 | #include "chsc.h" | ||
39 | 40 | ||
40 | static struct timer_list recovery_timer; | 41 | static struct timer_list recovery_timer; |
41 | static DEFINE_SPINLOCK(recovery_lock); | 42 | static DEFINE_SPINLOCK(recovery_lock); |
@@ -486,9 +487,11 @@ static int online_store_handle_offline(struct ccw_device *cdev) | |||
486 | spin_lock_irq(cdev->ccwlock); | 487 | spin_lock_irq(cdev->ccwlock); |
487 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); | 488 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); |
488 | spin_unlock_irq(cdev->ccwlock); | 489 | spin_unlock_irq(cdev->ccwlock); |
489 | } else if (cdev->online && cdev->drv && cdev->drv->set_offline) | 490 | return 0; |
491 | } | ||
492 | if (cdev->drv && cdev->drv->set_offline) | ||
490 | return ccw_device_set_offline(cdev); | 493 | return ccw_device_set_offline(cdev); |
491 | return 0; | 494 | return -EINVAL; |
492 | } | 495 | } |
493 | 496 | ||
494 | static int online_store_recog_and_online(struct ccw_device *cdev) | 497 | static int online_store_recog_and_online(struct ccw_device *cdev) |
@@ -505,8 +508,8 @@ static int online_store_recog_and_online(struct ccw_device *cdev) | |||
505 | return -EAGAIN; | 508 | return -EAGAIN; |
506 | } | 509 | } |
507 | if (cdev->drv && cdev->drv->set_online) | 510 | if (cdev->drv && cdev->drv->set_online) |
508 | ccw_device_set_online(cdev); | 511 | return ccw_device_set_online(cdev); |
509 | return 0; | 512 | return -EINVAL; |
510 | } | 513 | } |
511 | 514 | ||
512 | static int online_store_handle_online(struct ccw_device *cdev, int force) | 515 | static int online_store_handle_online(struct ccw_device *cdev, int force) |
@@ -598,6 +601,25 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
598 | } | 601 | } |
599 | } | 602 | } |
600 | 603 | ||
604 | static ssize_t | ||
605 | initiate_logging(struct device *dev, struct device_attribute *attr, | ||
606 | const char *buf, size_t count) | ||
607 | { | ||
608 | struct subchannel *sch = to_subchannel(dev); | ||
609 | int rc; | ||
610 | |||
611 | rc = chsc_siosl(sch->schid); | ||
612 | if (rc < 0) { | ||
613 | pr_warning("Logging for subchannel 0.%x.%04x failed with " | ||
614 | "errno=%d\n", | ||
615 | sch->schid.ssid, sch->schid.sch_no, rc); | ||
616 | return rc; | ||
617 | } | ||
618 | pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", | ||
619 | sch->schid.ssid, sch->schid.sch_no); | ||
620 | return count; | ||
621 | } | ||
622 | |||
601 | static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); | 623 | static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); |
602 | static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); | 624 | static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); |
603 | static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); | 625 | static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); |
@@ -605,10 +627,12 @@ static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); | |||
605 | static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); | 627 | static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); |
606 | static DEVICE_ATTR(online, 0644, online_show, online_store); | 628 | static DEVICE_ATTR(online, 0644, online_show, online_store); |
607 | static DEVICE_ATTR(availability, 0444, available_show, NULL); | 629 | static DEVICE_ATTR(availability, 0444, available_show, NULL); |
630 | static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); | ||
608 | 631 | ||
609 | static struct attribute *io_subchannel_attrs[] = { | 632 | static struct attribute *io_subchannel_attrs[] = { |
610 | &dev_attr_chpids.attr, | 633 | &dev_attr_chpids.attr, |
611 | &dev_attr_pimpampom.attr, | 634 | &dev_attr_pimpampom.attr, |
635 | &dev_attr_logging.attr, | ||
612 | NULL, | 636 | NULL, |
613 | }; | 637 | }; |
614 | 638 | ||
@@ -2036,6 +2060,21 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) | |||
2036 | } | 2060 | } |
2037 | } | 2061 | } |
2038 | 2062 | ||
2063 | /** | ||
2064 | * ccw_device_siosl() - initiate logging | ||
2065 | * @cdev: ccw device | ||
2066 | * | ||
2067 | * This function is used to invoke model-dependent logging within the channel | ||
2068 | * subsystem. | ||
2069 | */ | ||
2070 | int ccw_device_siosl(struct ccw_device *cdev) | ||
2071 | { | ||
2072 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
2073 | |||
2074 | return chsc_siosl(sch->schid); | ||
2075 | } | ||
2076 | EXPORT_SYMBOL_GPL(ccw_device_siosl); | ||
2077 | |||
2039 | MODULE_LICENSE("GPL"); | 2078 | MODULE_LICENSE("GPL"); |
2040 | EXPORT_SYMBOL(ccw_device_set_online); | 2079 | EXPORT_SYMBOL(ccw_device_set_online); |
2041 | EXPORT_SYMBOL(ccw_device_set_offline); | 2080 | EXPORT_SYMBOL(ccw_device_set_offline); |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 6facb5499a65..82a5ad0d63f6 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -208,6 +208,7 @@ static void spid_start(struct ccw_device *cdev) | |||
208 | req->timeout = PGID_TIMEOUT; | 208 | req->timeout = PGID_TIMEOUT; |
209 | req->maxretries = PGID_RETRIES; | 209 | req->maxretries = PGID_RETRIES; |
210 | req->lpm = 0x80; | 210 | req->lpm = 0x80; |
211 | req->singlepath = 1; | ||
211 | req->callback = spid_callback; | 212 | req->callback = spid_callback; |
212 | spid_do(cdev); | 213 | spid_do(cdev); |
213 | } | 214 | } |
@@ -420,6 +421,7 @@ static void verify_start(struct ccw_device *cdev) | |||
420 | req->timeout = PGID_TIMEOUT; | 421 | req->timeout = PGID_TIMEOUT; |
421 | req->maxretries = PGID_RETRIES; | 422 | req->maxretries = PGID_RETRIES; |
422 | req->lpm = 0x80; | 423 | req->lpm = 0x80; |
424 | req->singlepath = 1; | ||
423 | if (cdev->private->flags.pgroup) { | 425 | if (cdev->private->flags.pgroup) { |
424 | CIO_TRACE_EVENT(4, "snid"); | 426 | CIO_TRACE_EVENT(4, "snid"); |
425 | CIO_HEX_EVENT(4, devid, sizeof(*devid)); | 427 | CIO_HEX_EVENT(4, devid, sizeof(*devid)); |
@@ -507,6 +509,7 @@ void ccw_device_disband_start(struct ccw_device *cdev) | |||
507 | req->timeout = PGID_TIMEOUT; | 509 | req->timeout = PGID_TIMEOUT; |
508 | req->maxretries = PGID_RETRIES; | 510 | req->maxretries = PGID_RETRIES; |
509 | req->lpm = sch->schib.pmcw.pam & sch->opm; | 511 | req->lpm = sch->schib.pmcw.pam & sch->opm; |
512 | req->singlepath = 1; | ||
510 | req->callback = disband_callback; | 513 | req->callback = disband_callback; |
511 | fn = SPID_FUNC_DISBAND; | 514 | fn = SPID_FUNC_DISBAND; |
512 | if (cdev->private->flags.mpath) | 515 | if (cdev->private->flags.mpath) |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index b9ce712a7f25..469ef93f2302 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -92,11 +92,12 @@ enum io_status { | |||
92 | * @filter: optional callback to adjust request status based on IRB data | 92 | * @filter: optional callback to adjust request status based on IRB data |
93 | * @callback: final callback | 93 | * @callback: final callback |
94 | * @data: user-defined pointer passed to all callbacks | 94 | * @data: user-defined pointer passed to all callbacks |
95 | * @singlepath: if set, use only one path from @lpm per start I/O | ||
96 | * @cancel: non-zero if request was cancelled | ||
97 | * @done: non-zero if request was finished | ||
95 | * @mask: current path mask | 98 | * @mask: current path mask |
96 | * @retries: current number of retries | 99 | * @retries: current number of retries |
97 | * @drc: delayed return code | 100 | * @drc: delayed return code |
98 | * @cancel: non-zero if request was cancelled | ||
99 | * @done: non-zero if request was finished | ||
100 | */ | 101 | */ |
101 | struct ccw_request { | 102 | struct ccw_request { |
102 | struct ccw1 *cp; | 103 | struct ccw1 *cp; |
@@ -108,12 +109,13 @@ struct ccw_request { | |||
108 | enum io_status); | 109 | enum io_status); |
109 | void (*callback)(struct ccw_device *, void *, int); | 110 | void (*callback)(struct ccw_device *, void *, int); |
110 | void *data; | 111 | void *data; |
112 | unsigned int singlepath:1; | ||
111 | /* These fields are used internally. */ | 113 | /* These fields are used internally. */ |
114 | unsigned int cancel:1; | ||
115 | unsigned int done:1; | ||
112 | u16 mask; | 116 | u16 mask; |
113 | u16 retries; | 117 | u16 retries; |
114 | int drc; | 118 | int drc; |
115 | int cancel:1; | ||
116 | int done:1; | ||
117 | } __attribute__((packed)); | 119 | } __attribute__((packed)); |
118 | 120 | ||
119 | /* | 121 | /* |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 6326b67c45d2..34c7e4046df4 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -368,6 +368,8 @@ static void setup_qib(struct qdio_irq *irq_ptr, | |||
368 | if (qebsm_possible()) | 368 | if (qebsm_possible()) |
369 | irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; | 369 | irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; |
370 | 370 | ||
371 | irq_ptr->qib.rflags |= init_data->qib_rflags; | ||
372 | |||
371 | irq_ptr->qib.qfmt = init_data->q_format; | 373 | irq_ptr->qib.qfmt = init_data->q_format; |
372 | if (init_data->no_input_qs) | 374 | if (init_data->no_input_qs) |
373 | irq_ptr->qib.isliba = | 375 | irq_ptr->qib.isliba = |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 147bb1a69aba..a75ed3083a6a 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -295,7 +295,7 @@ claw_driver_group_store(struct device_driver *ddrv, const char *buf, | |||
295 | int err; | 295 | int err; |
296 | err = ccwgroup_create_from_string(claw_root_dev, | 296 | err = ccwgroup_create_from_string(claw_root_dev, |
297 | claw_group_driver.driver_id, | 297 | claw_group_driver.driver_id, |
298 | &claw_ccw_driver, 3, buf); | 298 | &claw_ccw_driver, 2, buf); |
299 | return err ? err : count; | 299 | return err ? err : count; |
300 | } | 300 | } |
301 | 301 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 7a44c38aaf65..d1257768be90 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -40,11 +40,7 @@ | |||
40 | */ | 40 | */ |
41 | enum qeth_dbf_names { | 41 | enum qeth_dbf_names { |
42 | QETH_DBF_SETUP, | 42 | QETH_DBF_SETUP, |
43 | QETH_DBF_QERR, | ||
44 | QETH_DBF_TRACE, | ||
45 | QETH_DBF_MSG, | 43 | QETH_DBF_MSG, |
46 | QETH_DBF_SENSE, | ||
47 | QETH_DBF_MISC, | ||
48 | QETH_DBF_CTRL, | 44 | QETH_DBF_CTRL, |
49 | QETH_DBF_INFOS /* must be last element */ | 45 | QETH_DBF_INFOS /* must be last element */ |
50 | }; | 46 | }; |
@@ -71,7 +67,19 @@ struct qeth_dbf_info { | |||
71 | debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) | 67 | debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) |
72 | 68 | ||
73 | #define QETH_DBF_TEXT_(name, level, text...) \ | 69 | #define QETH_DBF_TEXT_(name, level, text...) \ |
74 | qeth_dbf_longtext(QETH_DBF_##name, level, text) | 70 | qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text) |
71 | |||
72 | #define QETH_CARD_TEXT(card, level, text) \ | ||
73 | debug_text_event(card->debug, level, text) | ||
74 | |||
75 | #define QETH_CARD_HEX(card, level, addr, len) \ | ||
76 | debug_event(card->debug, level, (void *)(addr), len) | ||
77 | |||
78 | #define QETH_CARD_MESSAGE(card, text...) \ | ||
79 | debug_sprintf_event(card->debug, level, text) | ||
80 | |||
81 | #define QETH_CARD_TEXT_(card, level, text...) \ | ||
82 | qeth_dbf_longtext(card->debug, level, text) | ||
75 | 83 | ||
76 | #define SENSE_COMMAND_REJECT_BYTE 0 | 84 | #define SENSE_COMMAND_REJECT_BYTE 0 |
77 | #define SENSE_COMMAND_REJECT_FLAG 0x80 | 85 | #define SENSE_COMMAND_REJECT_FLAG 0x80 |
@@ -180,8 +188,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, | |||
180 | qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) | 188 | qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) |
181 | 189 | ||
182 | #define QETH_IDX_FUNC_LEVEL_OSD 0x0101 | 190 | #define QETH_IDX_FUNC_LEVEL_OSD 0x0101 |
183 | #define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108 | 191 | #define QETH_IDX_FUNC_LEVEL_IQD 0x4108 |
184 | #define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108 | ||
185 | 192 | ||
186 | #define QETH_MODELLIST_ARRAY \ | 193 | #define QETH_MODELLIST_ARRAY \ |
187 | {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \ | 194 | {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \ |
@@ -733,12 +740,15 @@ struct qeth_card { | |||
733 | struct qeth_qdio_info qdio; | 740 | struct qeth_qdio_info qdio; |
734 | struct qeth_perf_stats perf_stats; | 741 | struct qeth_perf_stats perf_stats; |
735 | int use_hard_stop; | 742 | int use_hard_stop; |
743 | int read_or_write_problem; | ||
736 | struct qeth_osn_info osn_info; | 744 | struct qeth_osn_info osn_info; |
737 | struct qeth_discipline discipline; | 745 | struct qeth_discipline discipline; |
738 | atomic_t force_alloc_skb; | 746 | atomic_t force_alloc_skb; |
739 | struct service_level qeth_service_level; | 747 | struct service_level qeth_service_level; |
740 | struct qdio_ssqd_desc ssqd; | 748 | struct qdio_ssqd_desc ssqd; |
749 | debug_info_t *debug; | ||
741 | struct mutex conf_mutex; | 750 | struct mutex conf_mutex; |
751 | struct mutex discipline_mutex; | ||
742 | }; | 752 | }; |
743 | 753 | ||
744 | struct qeth_card_list_struct { | 754 | struct qeth_card_list_struct { |
@@ -857,9 +867,10 @@ void qeth_core_get_ethtool_stats(struct net_device *, | |||
857 | struct ethtool_stats *, u64 *); | 867 | struct ethtool_stats *, u64 *); |
858 | void qeth_core_get_strings(struct net_device *, u32, u8 *); | 868 | void qeth_core_get_strings(struct net_device *, u32, u8 *); |
859 | void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); | 869 | void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); |
860 | void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); | 870 | void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); |
861 | int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); | 871 | int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); |
862 | int qeth_set_access_ctrl_online(struct qeth_card *card); | 872 | int qeth_set_access_ctrl_online(struct qeth_card *card); |
873 | int qeth_hdr_chk_and_bounce(struct sk_buff *, int); | ||
863 | 874 | ||
864 | /* exports for OSN */ | 875 | /* exports for OSN */ |
865 | int qeth_osn_assist(struct net_device *, void *, int); | 876 | int qeth_osn_assist(struct net_device *, void *, int); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 13ef46b9d388..3a5a18a0fc28 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -32,16 +32,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { | |||
32 | /* N P A M L V H */ | 32 | /* N P A M L V H */ |
33 | [QETH_DBF_SETUP] = {"qeth_setup", | 33 | [QETH_DBF_SETUP] = {"qeth_setup", |
34 | 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, | 34 | 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, |
35 | [QETH_DBF_QERR] = {"qeth_qerr", | ||
36 | 2, 1, 8, 2, &debug_hex_ascii_view, NULL}, | ||
37 | [QETH_DBF_TRACE] = {"qeth_trace", | ||
38 | 4, 1, 8, 3, &debug_hex_ascii_view, NULL}, | ||
39 | [QETH_DBF_MSG] = {"qeth_msg", | 35 | [QETH_DBF_MSG] = {"qeth_msg", |
40 | 8, 1, 128, 3, &debug_sprintf_view, NULL}, | 36 | 8, 1, 128, 3, &debug_sprintf_view, NULL}, |
41 | [QETH_DBF_SENSE] = {"qeth_sense", | ||
42 | 2, 1, 64, 2, &debug_hex_ascii_view, NULL}, | ||
43 | [QETH_DBF_MISC] = {"qeth_misc", | ||
44 | 2, 1, 256, 2, &debug_hex_ascii_view, NULL}, | ||
45 | [QETH_DBF_CTRL] = {"qeth_control", | 37 | [QETH_DBF_CTRL] = {"qeth_control", |
46 | 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, | 38 | 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, |
47 | }; | 39 | }; |
@@ -65,48 +57,6 @@ static void qeth_free_buffer_pool(struct qeth_card *); | |||
65 | static int qeth_qdio_establish(struct qeth_card *); | 57 | static int qeth_qdio_establish(struct qeth_card *); |
66 | 58 | ||
67 | 59 | ||
68 | static inline void __qeth_fill_buffer_frag(struct sk_buff *skb, | ||
69 | struct qdio_buffer *buffer, int is_tso, | ||
70 | int *next_element_to_fill) | ||
71 | { | ||
72 | struct skb_frag_struct *frag; | ||
73 | int fragno; | ||
74 | unsigned long addr; | ||
75 | int element, cnt, dlen; | ||
76 | |||
77 | fragno = skb_shinfo(skb)->nr_frags; | ||
78 | element = *next_element_to_fill; | ||
79 | dlen = 0; | ||
80 | |||
81 | if (is_tso) | ||
82 | buffer->element[element].flags = | ||
83 | SBAL_FLAGS_MIDDLE_FRAG; | ||
84 | else | ||
85 | buffer->element[element].flags = | ||
86 | SBAL_FLAGS_FIRST_FRAG; | ||
87 | dlen = skb->len - skb->data_len; | ||
88 | if (dlen) { | ||
89 | buffer->element[element].addr = skb->data; | ||
90 | buffer->element[element].length = dlen; | ||
91 | element++; | ||
92 | } | ||
93 | for (cnt = 0; cnt < fragno; cnt++) { | ||
94 | frag = &skb_shinfo(skb)->frags[cnt]; | ||
95 | addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + | ||
96 | frag->page_offset; | ||
97 | buffer->element[element].addr = (char *)addr; | ||
98 | buffer->element[element].length = frag->size; | ||
99 | if (cnt < (fragno - 1)) | ||
100 | buffer->element[element].flags = | ||
101 | SBAL_FLAGS_MIDDLE_FRAG; | ||
102 | else | ||
103 | buffer->element[element].flags = | ||
104 | SBAL_FLAGS_LAST_FRAG; | ||
105 | element++; | ||
106 | } | ||
107 | *next_element_to_fill = element; | ||
108 | } | ||
109 | |||
110 | static inline const char *qeth_get_cardname(struct qeth_card *card) | 60 | static inline const char *qeth_get_cardname(struct qeth_card *card) |
111 | { | 61 | { |
112 | if (card->info.guestlan) { | 62 | if (card->info.guestlan) { |
@@ -232,7 +182,7 @@ void qeth_clear_working_pool_list(struct qeth_card *card) | |||
232 | { | 182 | { |
233 | struct qeth_buffer_pool_entry *pool_entry, *tmp; | 183 | struct qeth_buffer_pool_entry *pool_entry, *tmp; |
234 | 184 | ||
235 | QETH_DBF_TEXT(TRACE, 5, "clwrklst"); | 185 | QETH_CARD_TEXT(card, 5, "clwrklst"); |
236 | list_for_each_entry_safe(pool_entry, tmp, | 186 | list_for_each_entry_safe(pool_entry, tmp, |
237 | &card->qdio.in_buf_pool.entry_list, list){ | 187 | &card->qdio.in_buf_pool.entry_list, list){ |
238 | list_del(&pool_entry->list); | 188 | list_del(&pool_entry->list); |
@@ -246,7 +196,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) | |||
246 | void *ptr; | 196 | void *ptr; |
247 | int i, j; | 197 | int i, j; |
248 | 198 | ||
249 | QETH_DBF_TEXT(TRACE, 5, "alocpool"); | 199 | QETH_CARD_TEXT(card, 5, "alocpool"); |
250 | for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { | 200 | for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { |
251 | pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); | 201 | pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); |
252 | if (!pool_entry) { | 202 | if (!pool_entry) { |
@@ -273,7 +223,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) | |||
273 | 223 | ||
274 | int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) | 224 | int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) |
275 | { | 225 | { |
276 | QETH_DBF_TEXT(TRACE, 2, "realcbp"); | 226 | QETH_CARD_TEXT(card, 2, "realcbp"); |
277 | 227 | ||
278 | if ((card->state != CARD_STATE_DOWN) && | 228 | if ((card->state != CARD_STATE_DOWN) && |
279 | (card->state != CARD_STATE_RECOVER)) | 229 | (card->state != CARD_STATE_RECOVER)) |
@@ -293,7 +243,7 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
293 | int rc; | 243 | int rc; |
294 | struct qeth_cmd_buffer *iob; | 244 | struct qeth_cmd_buffer *iob; |
295 | 245 | ||
296 | QETH_DBF_TEXT(TRACE, 5, "issnxrd"); | 246 | QETH_CARD_TEXT(card, 5, "issnxrd"); |
297 | if (card->read.state != CH_STATE_UP) | 247 | if (card->read.state != CH_STATE_UP) |
298 | return -EIO; | 248 | return -EIO; |
299 | iob = qeth_get_buffer(&card->read); | 249 | iob = qeth_get_buffer(&card->read); |
@@ -305,13 +255,14 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
305 | return -ENOMEM; | 255 | return -ENOMEM; |
306 | } | 256 | } |
307 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); | 257 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); |
308 | QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); | 258 | QETH_CARD_TEXT(card, 6, "noirqpnd"); |
309 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, | 259 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, |
310 | (addr_t) iob, 0, 0); | 260 | (addr_t) iob, 0, 0); |
311 | if (rc) { | 261 | if (rc) { |
312 | QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " | 262 | QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " |
313 | "rc=%i\n", dev_name(&card->gdev->dev), rc); | 263 | "rc=%i\n", dev_name(&card->gdev->dev), rc); |
314 | atomic_set(&card->read.irq_pending, 0); | 264 | atomic_set(&card->read.irq_pending, 0); |
265 | card->read_or_write_problem = 1; | ||
315 | qeth_schedule_recovery(card); | 266 | qeth_schedule_recovery(card); |
316 | wake_up(&card->wait_q); | 267 | wake_up(&card->wait_q); |
317 | } | 268 | } |
@@ -364,7 +315,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
364 | { | 315 | { |
365 | struct qeth_ipa_cmd *cmd = NULL; | 316 | struct qeth_ipa_cmd *cmd = NULL; |
366 | 317 | ||
367 | QETH_DBF_TEXT(TRACE, 5, "chkipad"); | 318 | QETH_CARD_TEXT(card, 5, "chkipad"); |
368 | if (IS_IPA(iob->data)) { | 319 | if (IS_IPA(iob->data)) { |
369 | cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); | 320 | cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); |
370 | if (IS_IPA_REPLY(cmd)) { | 321 | if (IS_IPA_REPLY(cmd)) { |
@@ -400,10 +351,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
400 | case IPA_CMD_MODCCID: | 351 | case IPA_CMD_MODCCID: |
401 | return cmd; | 352 | return cmd; |
402 | case IPA_CMD_REGISTER_LOCAL_ADDR: | 353 | case IPA_CMD_REGISTER_LOCAL_ADDR: |
403 | QETH_DBF_TEXT(TRACE, 3, "irla"); | 354 | QETH_CARD_TEXT(card, 3, "irla"); |
404 | break; | 355 | break; |
405 | case IPA_CMD_UNREGISTER_LOCAL_ADDR: | 356 | case IPA_CMD_UNREGISTER_LOCAL_ADDR: |
406 | QETH_DBF_TEXT(TRACE, 3, "urla"); | 357 | QETH_CARD_TEXT(card, 3, "urla"); |
407 | break; | 358 | break; |
408 | default: | 359 | default: |
409 | QETH_DBF_MESSAGE(2, "Received data is IPA " | 360 | QETH_DBF_MESSAGE(2, "Received data is IPA " |
@@ -420,7 +371,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) | |||
420 | struct qeth_reply *reply, *r; | 371 | struct qeth_reply *reply, *r; |
421 | unsigned long flags; | 372 | unsigned long flags; |
422 | 373 | ||
423 | QETH_DBF_TEXT(TRACE, 4, "clipalst"); | 374 | QETH_CARD_TEXT(card, 4, "clipalst"); |
424 | 375 | ||
425 | spin_lock_irqsave(&card->lock, flags); | 376 | spin_lock_irqsave(&card->lock, flags); |
426 | list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { | 377 | list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { |
@@ -432,6 +383,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) | |||
432 | qeth_put_reply(reply); | 383 | qeth_put_reply(reply); |
433 | } | 384 | } |
434 | spin_unlock_irqrestore(&card->lock, flags); | 385 | spin_unlock_irqrestore(&card->lock, flags); |
386 | atomic_set(&card->write.irq_pending, 0); | ||
435 | } | 387 | } |
436 | EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); | 388 | EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); |
437 | 389 | ||
@@ -448,9 +400,9 @@ static int qeth_check_idx_response(struct qeth_card *card, | |||
448 | buffer[4], | 400 | buffer[4], |
449 | ((buffer[4] == 0x22) ? | 401 | ((buffer[4] == 0x22) ? |
450 | " -- try another portname" : "")); | 402 | " -- try another portname" : "")); |
451 | QETH_DBF_TEXT(TRACE, 2, "ckidxres"); | 403 | QETH_CARD_TEXT(card, 2, "ckidxres"); |
452 | QETH_DBF_TEXT(TRACE, 2, " idxterm"); | 404 | QETH_CARD_TEXT(card, 2, " idxterm"); |
453 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); | 405 | QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); |
454 | if (buffer[4] == 0xf6) { | 406 | if (buffer[4] == 0xf6) { |
455 | dev_err(&card->gdev->dev, | 407 | dev_err(&card->gdev->dev, |
456 | "The qeth device is not configured " | 408 | "The qeth device is not configured " |
@@ -467,8 +419,8 @@ static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, | |||
467 | { | 419 | { |
468 | struct qeth_card *card; | 420 | struct qeth_card *card; |
469 | 421 | ||
470 | QETH_DBF_TEXT(TRACE, 4, "setupccw"); | ||
471 | card = CARD_FROM_CDEV(channel->ccwdev); | 422 | card = CARD_FROM_CDEV(channel->ccwdev); |
423 | QETH_CARD_TEXT(card, 4, "setupccw"); | ||
472 | if (channel == &card->read) | 424 | if (channel == &card->read) |
473 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); | 425 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); |
474 | else | 426 | else |
@@ -481,7 +433,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) | |||
481 | { | 433 | { |
482 | __u8 index; | 434 | __u8 index; |
483 | 435 | ||
484 | QETH_DBF_TEXT(TRACE, 6, "getbuff"); | 436 | QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff"); |
485 | index = channel->io_buf_no; | 437 | index = channel->io_buf_no; |
486 | do { | 438 | do { |
487 | if (channel->iob[index].state == BUF_STATE_FREE) { | 439 | if (channel->iob[index].state == BUF_STATE_FREE) { |
@@ -502,7 +454,7 @@ void qeth_release_buffer(struct qeth_channel *channel, | |||
502 | { | 454 | { |
503 | unsigned long flags; | 455 | unsigned long flags; |
504 | 456 | ||
505 | QETH_DBF_TEXT(TRACE, 6, "relbuff"); | 457 | QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff"); |
506 | spin_lock_irqsave(&channel->iob_lock, flags); | 458 | spin_lock_irqsave(&channel->iob_lock, flags); |
507 | memset(iob->data, 0, QETH_BUFSIZE); | 459 | memset(iob->data, 0, QETH_BUFSIZE); |
508 | iob->state = BUF_STATE_FREE; | 460 | iob->state = BUF_STATE_FREE; |
@@ -553,9 +505,8 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, | |||
553 | int keep_reply; | 505 | int keep_reply; |
554 | int rc = 0; | 506 | int rc = 0; |
555 | 507 | ||
556 | QETH_DBF_TEXT(TRACE, 4, "sndctlcb"); | ||
557 | |||
558 | card = CARD_FROM_CDEV(channel->ccwdev); | 508 | card = CARD_FROM_CDEV(channel->ccwdev); |
509 | QETH_CARD_TEXT(card, 4, "sndctlcb"); | ||
559 | rc = qeth_check_idx_response(card, iob->data); | 510 | rc = qeth_check_idx_response(card, iob->data); |
560 | switch (rc) { | 511 | switch (rc) { |
561 | case 0: | 512 | case 0: |
@@ -563,6 +514,7 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, | |||
563 | case -EIO: | 514 | case -EIO: |
564 | qeth_clear_ipacmd_list(card); | 515 | qeth_clear_ipacmd_list(card); |
565 | qeth_schedule_recovery(card); | 516 | qeth_schedule_recovery(card); |
517 | /* fall through */ | ||
566 | default: | 518 | default: |
567 | goto out; | 519 | goto out; |
568 | } | 520 | } |
@@ -722,7 +674,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread); | |||
722 | 674 | ||
723 | void qeth_schedule_recovery(struct qeth_card *card) | 675 | void qeth_schedule_recovery(struct qeth_card *card) |
724 | { | 676 | { |
725 | QETH_DBF_TEXT(TRACE, 2, "startrec"); | 677 | QETH_CARD_TEXT(card, 2, "startrec"); |
726 | if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) | 678 | if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) |
727 | schedule_work(&card->kernel_thread_starter); | 679 | schedule_work(&card->kernel_thread_starter); |
728 | } | 680 | } |
@@ -732,15 +684,17 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
732 | { | 684 | { |
733 | int dstat, cstat; | 685 | int dstat, cstat; |
734 | char *sense; | 686 | char *sense; |
687 | struct qeth_card *card; | ||
735 | 688 | ||
736 | sense = (char *) irb->ecw; | 689 | sense = (char *) irb->ecw; |
737 | cstat = irb->scsw.cmd.cstat; | 690 | cstat = irb->scsw.cmd.cstat; |
738 | dstat = irb->scsw.cmd.dstat; | 691 | dstat = irb->scsw.cmd.dstat; |
692 | card = CARD_FROM_CDEV(cdev); | ||
739 | 693 | ||
740 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | | 694 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | |
741 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 695 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
742 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { | 696 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { |
743 | QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); | 697 | QETH_CARD_TEXT(card, 2, "CGENCHK"); |
744 | dev_warn(&cdev->dev, "The qeth device driver " | 698 | dev_warn(&cdev->dev, "The qeth device driver " |
745 | "failed to recover an error on the device\n"); | 699 | "failed to recover an error on the device\n"); |
746 | QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", | 700 | QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", |
@@ -753,23 +707,23 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
753 | if (dstat & DEV_STAT_UNIT_CHECK) { | 707 | if (dstat & DEV_STAT_UNIT_CHECK) { |
754 | if (sense[SENSE_RESETTING_EVENT_BYTE] & | 708 | if (sense[SENSE_RESETTING_EVENT_BYTE] & |
755 | SENSE_RESETTING_EVENT_FLAG) { | 709 | SENSE_RESETTING_EVENT_FLAG) { |
756 | QETH_DBF_TEXT(TRACE, 2, "REVIND"); | 710 | QETH_CARD_TEXT(card, 2, "REVIND"); |
757 | return 1; | 711 | return 1; |
758 | } | 712 | } |
759 | if (sense[SENSE_COMMAND_REJECT_BYTE] & | 713 | if (sense[SENSE_COMMAND_REJECT_BYTE] & |
760 | SENSE_COMMAND_REJECT_FLAG) { | 714 | SENSE_COMMAND_REJECT_FLAG) { |
761 | QETH_DBF_TEXT(TRACE, 2, "CMDREJi"); | 715 | QETH_CARD_TEXT(card, 2, "CMDREJi"); |
762 | return 1; | 716 | return 1; |
763 | } | 717 | } |
764 | if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { | 718 | if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { |
765 | QETH_DBF_TEXT(TRACE, 2, "AFFE"); | 719 | QETH_CARD_TEXT(card, 2, "AFFE"); |
766 | return 1; | 720 | return 1; |
767 | } | 721 | } |
768 | if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { | 722 | if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { |
769 | QETH_DBF_TEXT(TRACE, 2, "ZEROSEN"); | 723 | QETH_CARD_TEXT(card, 2, "ZEROSEN"); |
770 | return 0; | 724 | return 0; |
771 | } | 725 | } |
772 | QETH_DBF_TEXT(TRACE, 2, "DGENCHK"); | 726 | QETH_CARD_TEXT(card, 2, "DGENCHK"); |
773 | return 1; | 727 | return 1; |
774 | } | 728 | } |
775 | return 0; | 729 | return 0; |
@@ -778,6 +732,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
778 | static long __qeth_check_irb_error(struct ccw_device *cdev, | 732 | static long __qeth_check_irb_error(struct ccw_device *cdev, |
779 | unsigned long intparm, struct irb *irb) | 733 | unsigned long intparm, struct irb *irb) |
780 | { | 734 | { |
735 | struct qeth_card *card; | ||
736 | |||
737 | card = CARD_FROM_CDEV(cdev); | ||
738 | |||
781 | if (!IS_ERR(irb)) | 739 | if (!IS_ERR(irb)) |
782 | return 0; | 740 | return 0; |
783 | 741 | ||
@@ -785,17 +743,15 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
785 | case -EIO: | 743 | case -EIO: |
786 | QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", | 744 | QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", |
787 | dev_name(&cdev->dev)); | 745 | dev_name(&cdev->dev)); |
788 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 746 | QETH_CARD_TEXT(card, 2, "ckirberr"); |
789 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); | 747 | QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); |
790 | break; | 748 | break; |
791 | case -ETIMEDOUT: | 749 | case -ETIMEDOUT: |
792 | dev_warn(&cdev->dev, "A hardware operation timed out" | 750 | dev_warn(&cdev->dev, "A hardware operation timed out" |
793 | " on the device\n"); | 751 | " on the device\n"); |
794 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 752 | QETH_CARD_TEXT(card, 2, "ckirberr"); |
795 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); | 753 | QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); |
796 | if (intparm == QETH_RCD_PARM) { | 754 | if (intparm == QETH_RCD_PARM) { |
797 | struct qeth_card *card = CARD_FROM_CDEV(cdev); | ||
798 | |||
799 | if (card && (card->data.ccwdev == cdev)) { | 755 | if (card && (card->data.ccwdev == cdev)) { |
800 | card->data.state = CH_STATE_DOWN; | 756 | card->data.state = CH_STATE_DOWN; |
801 | wake_up(&card->wait_q); | 757 | wake_up(&card->wait_q); |
@@ -805,8 +761,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
805 | default: | 761 | default: |
806 | QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", | 762 | QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", |
807 | dev_name(&cdev->dev), PTR_ERR(irb)); | 763 | dev_name(&cdev->dev), PTR_ERR(irb)); |
808 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 764 | QETH_CARD_TEXT(card, 2, "ckirberr"); |
809 | QETH_DBF_TEXT(TRACE, 2, " rc???"); | 765 | QETH_CARD_TEXT(card, 2, " rc???"); |
810 | } | 766 | } |
811 | return PTR_ERR(irb); | 767 | return PTR_ERR(irb); |
812 | } | 768 | } |
@@ -822,8 +778,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
822 | struct qeth_cmd_buffer *iob; | 778 | struct qeth_cmd_buffer *iob; |
823 | __u8 index; | 779 | __u8 index; |
824 | 780 | ||
825 | QETH_DBF_TEXT(TRACE, 5, "irq"); | ||
826 | |||
827 | if (__qeth_check_irb_error(cdev, intparm, irb)) | 781 | if (__qeth_check_irb_error(cdev, intparm, irb)) |
828 | return; | 782 | return; |
829 | cstat = irb->scsw.cmd.cstat; | 783 | cstat = irb->scsw.cmd.cstat; |
@@ -833,15 +787,17 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
833 | if (!card) | 787 | if (!card) |
834 | return; | 788 | return; |
835 | 789 | ||
790 | QETH_CARD_TEXT(card, 5, "irq"); | ||
791 | |||
836 | if (card->read.ccwdev == cdev) { | 792 | if (card->read.ccwdev == cdev) { |
837 | channel = &card->read; | 793 | channel = &card->read; |
838 | QETH_DBF_TEXT(TRACE, 5, "read"); | 794 | QETH_CARD_TEXT(card, 5, "read"); |
839 | } else if (card->write.ccwdev == cdev) { | 795 | } else if (card->write.ccwdev == cdev) { |
840 | channel = &card->write; | 796 | channel = &card->write; |
841 | QETH_DBF_TEXT(TRACE, 5, "write"); | 797 | QETH_CARD_TEXT(card, 5, "write"); |
842 | } else { | 798 | } else { |
843 | channel = &card->data; | 799 | channel = &card->data; |
844 | QETH_DBF_TEXT(TRACE, 5, "data"); | 800 | QETH_CARD_TEXT(card, 5, "data"); |
845 | } | 801 | } |
846 | atomic_set(&channel->irq_pending, 0); | 802 | atomic_set(&channel->irq_pending, 0); |
847 | 803 | ||
@@ -857,12 +813,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
857 | goto out; | 813 | goto out; |
858 | 814 | ||
859 | if (intparm == QETH_CLEAR_CHANNEL_PARM) { | 815 | if (intparm == QETH_CLEAR_CHANNEL_PARM) { |
860 | QETH_DBF_TEXT(TRACE, 6, "clrchpar"); | 816 | QETH_CARD_TEXT(card, 6, "clrchpar"); |
861 | /* we don't have to handle this further */ | 817 | /* we don't have to handle this further */ |
862 | intparm = 0; | 818 | intparm = 0; |
863 | } | 819 | } |
864 | if (intparm == QETH_HALT_CHANNEL_PARM) { | 820 | if (intparm == QETH_HALT_CHANNEL_PARM) { |
865 | QETH_DBF_TEXT(TRACE, 6, "hltchpar"); | 821 | QETH_CARD_TEXT(card, 6, "hltchpar"); |
866 | /* we don't have to handle this further */ | 822 | /* we don't have to handle this further */ |
867 | intparm = 0; | 823 | intparm = 0; |
868 | } | 824 | } |
@@ -963,7 +919,7 @@ void qeth_clear_qdio_buffers(struct qeth_card *card) | |||
963 | { | 919 | { |
964 | int i, j; | 920 | int i, j; |
965 | 921 | ||
966 | QETH_DBF_TEXT(TRACE, 2, "clearqdbf"); | 922 | QETH_CARD_TEXT(card, 2, "clearqdbf"); |
967 | /* clear outbound buffers to free skbs */ | 923 | /* clear outbound buffers to free skbs */ |
968 | for (i = 0; i < card->qdio.no_out_queues; ++i) | 924 | for (i = 0; i < card->qdio.no_out_queues; ++i) |
969 | if (card->qdio.out_qs[i]) { | 925 | if (card->qdio.out_qs[i]) { |
@@ -978,7 +934,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card) | |||
978 | { | 934 | { |
979 | struct qeth_buffer_pool_entry *pool_entry, *tmp; | 935 | struct qeth_buffer_pool_entry *pool_entry, *tmp; |
980 | int i = 0; | 936 | int i = 0; |
981 | QETH_DBF_TEXT(TRACE, 5, "freepool"); | ||
982 | list_for_each_entry_safe(pool_entry, tmp, | 937 | list_for_each_entry_safe(pool_entry, tmp, |
983 | &card->qdio.init_pool.entry_list, init_list){ | 938 | &card->qdio.init_pool.entry_list, init_list){ |
984 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) | 939 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) |
@@ -992,7 +947,6 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) | |||
992 | { | 947 | { |
993 | int i, j; | 948 | int i, j; |
994 | 949 | ||
995 | QETH_DBF_TEXT(TRACE, 2, "freeqdbf"); | ||
996 | if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == | 950 | if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == |
997 | QETH_QDIO_UNINITIALIZED) | 951 | QETH_QDIO_UNINITIALIZED) |
998 | return; | 952 | return; |
@@ -1089,7 +1043,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) | |||
1089 | int rc = 0; | 1043 | int rc = 0; |
1090 | 1044 | ||
1091 | spin_lock_irqsave(&card->thread_mask_lock, flags); | 1045 | spin_lock_irqsave(&card->thread_mask_lock, flags); |
1092 | QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x", | 1046 | QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", |
1093 | (u8) card->thread_start_mask, | 1047 | (u8) card->thread_start_mask, |
1094 | (u8) card->thread_allowed_mask, | 1048 | (u8) card->thread_allowed_mask, |
1095 | (u8) card->thread_running_mask); | 1049 | (u8) card->thread_running_mask); |
@@ -1102,7 +1056,7 @@ static void qeth_start_kernel_thread(struct work_struct *work) | |||
1102 | { | 1056 | { |
1103 | struct qeth_card *card = container_of(work, struct qeth_card, | 1057 | struct qeth_card *card = container_of(work, struct qeth_card, |
1104 | kernel_thread_starter); | 1058 | kernel_thread_starter); |
1105 | QETH_DBF_TEXT(TRACE , 2, "strthrd"); | 1059 | QETH_CARD_TEXT(card , 2, "strthrd"); |
1106 | 1060 | ||
1107 | if (card->read.state != CH_STATE_UP && | 1061 | if (card->read.state != CH_STATE_UP && |
1108 | card->write.state != CH_STATE_UP) | 1062 | card->write.state != CH_STATE_UP) |
@@ -1124,6 +1078,7 @@ static int qeth_setup_card(struct qeth_card *card) | |||
1124 | card->state = CARD_STATE_DOWN; | 1078 | card->state = CARD_STATE_DOWN; |
1125 | card->lan_online = 0; | 1079 | card->lan_online = 0; |
1126 | card->use_hard_stop = 0; | 1080 | card->use_hard_stop = 0; |
1081 | card->read_or_write_problem = 0; | ||
1127 | card->dev = NULL; | 1082 | card->dev = NULL; |
1128 | spin_lock_init(&card->vlanlock); | 1083 | spin_lock_init(&card->vlanlock); |
1129 | spin_lock_init(&card->mclock); | 1084 | spin_lock_init(&card->mclock); |
@@ -1132,6 +1087,7 @@ static int qeth_setup_card(struct qeth_card *card) | |||
1132 | spin_lock_init(&card->ip_lock); | 1087 | spin_lock_init(&card->ip_lock); |
1133 | spin_lock_init(&card->thread_mask_lock); | 1088 | spin_lock_init(&card->thread_mask_lock); |
1134 | mutex_init(&card->conf_mutex); | 1089 | mutex_init(&card->conf_mutex); |
1090 | mutex_init(&card->discipline_mutex); | ||
1135 | card->thread_start_mask = 0; | 1091 | card->thread_start_mask = 0; |
1136 | card->thread_allowed_mask = 0; | 1092 | card->thread_allowed_mask = 0; |
1137 | card->thread_running_mask = 0; | 1093 | card->thread_running_mask = 0; |
@@ -1229,8 +1185,8 @@ static int qeth_clear_channel(struct qeth_channel *channel) | |||
1229 | struct qeth_card *card; | 1185 | struct qeth_card *card; |
1230 | int rc; | 1186 | int rc; |
1231 | 1187 | ||
1232 | QETH_DBF_TEXT(TRACE, 3, "clearch"); | ||
1233 | card = CARD_FROM_CDEV(channel->ccwdev); | 1188 | card = CARD_FROM_CDEV(channel->ccwdev); |
1189 | QETH_CARD_TEXT(card, 3, "clearch"); | ||
1234 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 1190 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
1235 | rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); | 1191 | rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); |
1236 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); | 1192 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); |
@@ -1253,8 +1209,8 @@ static int qeth_halt_channel(struct qeth_channel *channel) | |||
1253 | struct qeth_card *card; | 1209 | struct qeth_card *card; |
1254 | int rc; | 1210 | int rc; |
1255 | 1211 | ||
1256 | QETH_DBF_TEXT(TRACE, 3, "haltch"); | ||
1257 | card = CARD_FROM_CDEV(channel->ccwdev); | 1212 | card = CARD_FROM_CDEV(channel->ccwdev); |
1213 | QETH_CARD_TEXT(card, 3, "haltch"); | ||
1258 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 1214 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
1259 | rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); | 1215 | rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); |
1260 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); | 1216 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); |
@@ -1274,7 +1230,7 @@ static int qeth_halt_channels(struct qeth_card *card) | |||
1274 | { | 1230 | { |
1275 | int rc1 = 0, rc2 = 0, rc3 = 0; | 1231 | int rc1 = 0, rc2 = 0, rc3 = 0; |
1276 | 1232 | ||
1277 | QETH_DBF_TEXT(TRACE, 3, "haltchs"); | 1233 | QETH_CARD_TEXT(card, 3, "haltchs"); |
1278 | rc1 = qeth_halt_channel(&card->read); | 1234 | rc1 = qeth_halt_channel(&card->read); |
1279 | rc2 = qeth_halt_channel(&card->write); | 1235 | rc2 = qeth_halt_channel(&card->write); |
1280 | rc3 = qeth_halt_channel(&card->data); | 1236 | rc3 = qeth_halt_channel(&card->data); |
@@ -1289,7 +1245,7 @@ static int qeth_clear_channels(struct qeth_card *card) | |||
1289 | { | 1245 | { |
1290 | int rc1 = 0, rc2 = 0, rc3 = 0; | 1246 | int rc1 = 0, rc2 = 0, rc3 = 0; |
1291 | 1247 | ||
1292 | QETH_DBF_TEXT(TRACE, 3, "clearchs"); | 1248 | QETH_CARD_TEXT(card, 3, "clearchs"); |
1293 | rc1 = qeth_clear_channel(&card->read); | 1249 | rc1 = qeth_clear_channel(&card->read); |
1294 | rc2 = qeth_clear_channel(&card->write); | 1250 | rc2 = qeth_clear_channel(&card->write); |
1295 | rc3 = qeth_clear_channel(&card->data); | 1251 | rc3 = qeth_clear_channel(&card->data); |
@@ -1304,8 +1260,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt) | |||
1304 | { | 1260 | { |
1305 | int rc = 0; | 1261 | int rc = 0; |
1306 | 1262 | ||
1307 | QETH_DBF_TEXT(TRACE, 3, "clhacrd"); | 1263 | QETH_CARD_TEXT(card, 3, "clhacrd"); |
1308 | QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *)); | ||
1309 | 1264 | ||
1310 | if (halt) | 1265 | if (halt) |
1311 | rc = qeth_halt_channels(card); | 1266 | rc = qeth_halt_channels(card); |
@@ -1318,7 +1273,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
1318 | { | 1273 | { |
1319 | int rc = 0; | 1274 | int rc = 0; |
1320 | 1275 | ||
1321 | QETH_DBF_TEXT(TRACE, 3, "qdioclr"); | 1276 | QETH_CARD_TEXT(card, 3, "qdioclr"); |
1322 | switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, | 1277 | switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, |
1323 | QETH_QDIO_CLEANING)) { | 1278 | QETH_QDIO_CLEANING)) { |
1324 | case QETH_QDIO_ESTABLISHED: | 1279 | case QETH_QDIO_ESTABLISHED: |
@@ -1329,7 +1284,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
1329 | rc = qdio_shutdown(CARD_DDEV(card), | 1284 | rc = qdio_shutdown(CARD_DDEV(card), |
1330 | QDIO_FLAG_CLEANUP_USING_CLEAR); | 1285 | QDIO_FLAG_CLEANUP_USING_CLEAR); |
1331 | if (rc) | 1286 | if (rc) |
1332 | QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc); | 1287 | QETH_CARD_TEXT_(card, 3, "1err%d", rc); |
1333 | qdio_free(CARD_DDEV(card)); | 1288 | qdio_free(CARD_DDEV(card)); |
1334 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); | 1289 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); |
1335 | break; | 1290 | break; |
@@ -1340,7 +1295,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
1340 | } | 1295 | } |
1341 | rc = qeth_clear_halt_card(card, use_halt); | 1296 | rc = qeth_clear_halt_card(card, use_halt); |
1342 | if (rc) | 1297 | if (rc) |
1343 | QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc); | 1298 | QETH_CARD_TEXT_(card, 3, "2err%d", rc); |
1344 | card->state = CARD_STATE_DOWN; | 1299 | card->state = CARD_STATE_DOWN; |
1345 | return rc; | 1300 | return rc; |
1346 | } | 1301 | } |
@@ -1432,14 +1387,10 @@ static void qeth_init_func_level(struct qeth_card *card) | |||
1432 | { | 1387 | { |
1433 | switch (card->info.type) { | 1388 | switch (card->info.type) { |
1434 | case QETH_CARD_TYPE_IQD: | 1389 | case QETH_CARD_TYPE_IQD: |
1435 | if (card->ipato.enabled) | 1390 | card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; |
1436 | card->info.func_level = | ||
1437 | QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT; | ||
1438 | else | ||
1439 | card->info.func_level = | ||
1440 | QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT; | ||
1441 | break; | 1391 | break; |
1442 | case QETH_CARD_TYPE_OSD: | 1392 | case QETH_CARD_TYPE_OSD: |
1393 | case QETH_CARD_TYPE_OSN: | ||
1443 | card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; | 1394 | card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; |
1444 | break; | 1395 | break; |
1445 | default: | 1396 | default: |
@@ -1637,15 +1588,18 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, | |||
1637 | "host\n"); | 1588 | "host\n"); |
1638 | break; | 1589 | break; |
1639 | case QETH_IDX_ACT_ERR_AUTH: | 1590 | case QETH_IDX_ACT_ERR_AUTH: |
1591 | case QETH_IDX_ACT_ERR_AUTH_USER: | ||
1640 | dev_err(&card->read.ccwdev->dev, | 1592 | dev_err(&card->read.ccwdev->dev, |
1641 | "Setting the device online failed because of " | 1593 | "Setting the device online failed because of " |
1642 | "insufficient LPAR authorization\n"); | 1594 | "insufficient authorization\n"); |
1643 | break; | 1595 | break; |
1644 | default: | 1596 | default: |
1645 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" | 1597 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" |
1646 | " negative reply\n", | 1598 | " negative reply\n", |
1647 | dev_name(&card->read.ccwdev->dev)); | 1599 | dev_name(&card->read.ccwdev->dev)); |
1648 | } | 1600 | } |
1601 | QETH_CARD_TEXT_(card, 2, "idxread%c", | ||
1602 | QETH_IDX_ACT_CAUSE_CODE(iob->data)); | ||
1649 | goto out; | 1603 | goto out; |
1650 | } | 1604 | } |
1651 | 1605 | ||
@@ -1705,8 +1659,12 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1705 | unsigned long timeout, event_timeout; | 1659 | unsigned long timeout, event_timeout; |
1706 | struct qeth_ipa_cmd *cmd; | 1660 | struct qeth_ipa_cmd *cmd; |
1707 | 1661 | ||
1708 | QETH_DBF_TEXT(TRACE, 2, "sendctl"); | 1662 | QETH_CARD_TEXT(card, 2, "sendctl"); |
1709 | 1663 | ||
1664 | if (card->read_or_write_problem) { | ||
1665 | qeth_release_buffer(iob->channel, iob); | ||
1666 | return -EIO; | ||
1667 | } | ||
1710 | reply = qeth_alloc_reply(card); | 1668 | reply = qeth_alloc_reply(card); |
1711 | if (!reply) { | 1669 | if (!reply) { |
1712 | return -ENOMEM; | 1670 | return -ENOMEM; |
@@ -1732,7 +1690,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1732 | event_timeout = QETH_TIMEOUT; | 1690 | event_timeout = QETH_TIMEOUT; |
1733 | timeout = jiffies + event_timeout; | 1691 | timeout = jiffies + event_timeout; |
1734 | 1692 | ||
1735 | QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); | 1693 | QETH_CARD_TEXT(card, 6, "noirqpnd"); |
1736 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); | 1694 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); |
1737 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, | 1695 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, |
1738 | (addr_t) iob, 0, 0); | 1696 | (addr_t) iob, 0, 0); |
@@ -1741,7 +1699,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1741 | QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " | 1699 | QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " |
1742 | "ccw_device_start rc = %i\n", | 1700 | "ccw_device_start rc = %i\n", |
1743 | dev_name(&card->write.ccwdev->dev), rc); | 1701 | dev_name(&card->write.ccwdev->dev), rc); |
1744 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); | 1702 | QETH_CARD_TEXT_(card, 2, " err%d", rc); |
1745 | spin_lock_irqsave(&card->lock, flags); | 1703 | spin_lock_irqsave(&card->lock, flags); |
1746 | list_del_init(&reply->list); | 1704 | list_del_init(&reply->list); |
1747 | qeth_put_reply(reply); | 1705 | qeth_put_reply(reply); |
@@ -1778,6 +1736,9 @@ time_err: | |||
1778 | spin_unlock_irqrestore(&reply->card->lock, flags); | 1736 | spin_unlock_irqrestore(&reply->card->lock, flags); |
1779 | reply->rc = -ETIME; | 1737 | reply->rc = -ETIME; |
1780 | atomic_inc(&reply->received); | 1738 | atomic_inc(&reply->received); |
1739 | atomic_set(&card->write.irq_pending, 0); | ||
1740 | qeth_release_buffer(iob->channel, iob); | ||
1741 | card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; | ||
1781 | wake_up(&reply->wait_q); | 1742 | wake_up(&reply->wait_q); |
1782 | rc = reply->rc; | 1743 | rc = reply->rc; |
1783 | qeth_put_reply(reply); | 1744 | qeth_put_reply(reply); |
@@ -1978,7 +1939,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1978 | card->info.link_type = link_type; | 1939 | card->info.link_type = link_type; |
1979 | } else | 1940 | } else |
1980 | card->info.link_type = 0; | 1941 | card->info.link_type = 0; |
1981 | QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type); | 1942 | QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); |
1982 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); | 1943 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); |
1983 | return 0; | 1944 | return 0; |
1984 | } | 1945 | } |
@@ -2035,7 +1996,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
2035 | QETH_DBF_TEXT(SETUP, 2, "olmlimit"); | 1996 | QETH_DBF_TEXT(SETUP, 2, "olmlimit"); |
2036 | dev_err(&card->gdev->dev, "A connection could not be " | 1997 | dev_err(&card->gdev->dev, "A connection could not be " |
2037 | "established because of an OLM limit\n"); | 1998 | "established because of an OLM limit\n"); |
2038 | rc = -EMLINK; | 1999 | iob->rc = -EMLINK; |
2039 | } | 2000 | } |
2040 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); | 2001 | QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); |
2041 | return rc; | 2002 | return rc; |
@@ -2335,7 +2296,7 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card) | |||
2335 | { | 2296 | { |
2336 | struct qeth_buffer_pool_entry *entry; | 2297 | struct qeth_buffer_pool_entry *entry; |
2337 | 2298 | ||
2338 | QETH_DBF_TEXT(TRACE, 5, "inwrklst"); | 2299 | QETH_CARD_TEXT(card, 5, "inwrklst"); |
2339 | 2300 | ||
2340 | list_for_each_entry(entry, | 2301 | list_for_each_entry(entry, |
2341 | &card->qdio.init_pool.entry_list, init_list) { | 2302 | &card->qdio.init_pool.entry_list, init_list) { |
@@ -2522,7 +2483,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
2522 | int rc; | 2483 | int rc; |
2523 | char prot_type; | 2484 | char prot_type; |
2524 | 2485 | ||
2525 | QETH_DBF_TEXT(TRACE, 4, "sendipa"); | 2486 | QETH_CARD_TEXT(card, 4, "sendipa"); |
2526 | 2487 | ||
2527 | if (card->options.layer2) | 2488 | if (card->options.layer2) |
2528 | if (card->info.type == QETH_CARD_TYPE_OSN) | 2489 | if (card->info.type == QETH_CARD_TYPE_OSN) |
@@ -2534,6 +2495,10 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
2534 | qeth_prepare_ipa_cmd(card, iob, prot_type); | 2495 | qeth_prepare_ipa_cmd(card, iob, prot_type); |
2535 | rc = qeth_send_control_data(card, IPA_CMD_LENGTH, | 2496 | rc = qeth_send_control_data(card, IPA_CMD_LENGTH, |
2536 | iob, reply_cb, reply_param); | 2497 | iob, reply_cb, reply_param); |
2498 | if (rc == -ETIME) { | ||
2499 | qeth_clear_ipacmd_list(card); | ||
2500 | qeth_schedule_recovery(card); | ||
2501 | } | ||
2537 | return rc; | 2502 | return rc; |
2538 | } | 2503 | } |
2539 | EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); | 2504 | EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); |
@@ -2582,7 +2547,7 @@ int qeth_default_setadapterparms_cb(struct qeth_card *card, | |||
2582 | { | 2547 | { |
2583 | struct qeth_ipa_cmd *cmd; | 2548 | struct qeth_ipa_cmd *cmd; |
2584 | 2549 | ||
2585 | QETH_DBF_TEXT(TRACE, 4, "defadpcb"); | 2550 | QETH_CARD_TEXT(card, 4, "defadpcb"); |
2586 | 2551 | ||
2587 | cmd = (struct qeth_ipa_cmd *) data; | 2552 | cmd = (struct qeth_ipa_cmd *) data; |
2588 | if (cmd->hdr.return_code == 0) | 2553 | if (cmd->hdr.return_code == 0) |
@@ -2597,7 +2562,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, | |||
2597 | { | 2562 | { |
2598 | struct qeth_ipa_cmd *cmd; | 2563 | struct qeth_ipa_cmd *cmd; |
2599 | 2564 | ||
2600 | QETH_DBF_TEXT(TRACE, 3, "quyadpcb"); | 2565 | QETH_CARD_TEXT(card, 3, "quyadpcb"); |
2601 | 2566 | ||
2602 | cmd = (struct qeth_ipa_cmd *) data; | 2567 | cmd = (struct qeth_ipa_cmd *) data; |
2603 | if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { | 2568 | if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { |
@@ -2633,7 +2598,7 @@ int qeth_query_setadapterparms(struct qeth_card *card) | |||
2633 | int rc; | 2598 | int rc; |
2634 | struct qeth_cmd_buffer *iob; | 2599 | struct qeth_cmd_buffer *iob; |
2635 | 2600 | ||
2636 | QETH_DBF_TEXT(TRACE, 3, "queryadp"); | 2601 | QETH_CARD_TEXT(card, 3, "queryadp"); |
2637 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, | 2602 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, |
2638 | sizeof(struct qeth_ipacmd_setadpparms)); | 2603 | sizeof(struct qeth_ipacmd_setadpparms)); |
2639 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); | 2604 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); |
@@ -2645,13 +2610,12 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, | |||
2645 | unsigned int qdio_error, const char *dbftext) | 2610 | unsigned int qdio_error, const char *dbftext) |
2646 | { | 2611 | { |
2647 | if (qdio_error) { | 2612 | if (qdio_error) { |
2648 | QETH_DBF_TEXT(TRACE, 2, dbftext); | 2613 | QETH_CARD_TEXT(card, 2, dbftext); |
2649 | QETH_DBF_TEXT(QERR, 2, dbftext); | 2614 | QETH_CARD_TEXT_(card, 2, " F15=%02X", |
2650 | QETH_DBF_TEXT_(QERR, 2, " F15=%02X", | ||
2651 | buf->element[15].flags & 0xff); | 2615 | buf->element[15].flags & 0xff); |
2652 | QETH_DBF_TEXT_(QERR, 2, " F14=%02X", | 2616 | QETH_CARD_TEXT_(card, 2, " F14=%02X", |
2653 | buf->element[14].flags & 0xff); | 2617 | buf->element[14].flags & 0xff); |
2654 | QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); | 2618 | QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); |
2655 | if ((buf->element[15].flags & 0xff) == 0x12) { | 2619 | if ((buf->element[15].flags & 0xff) == 0x12) { |
2656 | card->stats.rx_dropped++; | 2620 | card->stats.rx_dropped++; |
2657 | return 0; | 2621 | return 0; |
@@ -2717,8 +2681,7 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2717 | if (rc) { | 2681 | if (rc) { |
2718 | dev_warn(&card->gdev->dev, | 2682 | dev_warn(&card->gdev->dev, |
2719 | "QDIO reported an error, rc=%i\n", rc); | 2683 | "QDIO reported an error, rc=%i\n", rc); |
2720 | QETH_DBF_TEXT(TRACE, 2, "qinberr"); | 2684 | QETH_CARD_TEXT(card, 2, "qinberr"); |
2721 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); | ||
2722 | } | 2685 | } |
2723 | queue->next_buf_to_init = (queue->next_buf_to_init + count) % | 2686 | queue->next_buf_to_init = (queue->next_buf_to_init + count) % |
2724 | QDIO_MAX_BUFFERS_PER_Q; | 2687 | QDIO_MAX_BUFFERS_PER_Q; |
@@ -2731,7 +2694,7 @@ static int qeth_handle_send_error(struct qeth_card *card, | |||
2731 | { | 2694 | { |
2732 | int sbalf15 = buffer->buffer->element[15].flags & 0xff; | 2695 | int sbalf15 = buffer->buffer->element[15].flags & 0xff; |
2733 | 2696 | ||
2734 | QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); | 2697 | QETH_CARD_TEXT(card, 6, "hdsnderr"); |
2735 | if (card->info.type == QETH_CARD_TYPE_IQD) { | 2698 | if (card->info.type == QETH_CARD_TYPE_IQD) { |
2736 | if (sbalf15 == 0) { | 2699 | if (sbalf15 == 0) { |
2737 | qdio_err = 0; | 2700 | qdio_err = 0; |
@@ -2747,9 +2710,8 @@ static int qeth_handle_send_error(struct qeth_card *card, | |||
2747 | if ((sbalf15 >= 15) && (sbalf15 <= 31)) | 2710 | if ((sbalf15 >= 15) && (sbalf15 <= 31)) |
2748 | return QETH_SEND_ERROR_RETRY; | 2711 | return QETH_SEND_ERROR_RETRY; |
2749 | 2712 | ||
2750 | QETH_DBF_TEXT(TRACE, 1, "lnkfail"); | 2713 | QETH_CARD_TEXT(card, 1, "lnkfail"); |
2751 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); | 2714 | QETH_CARD_TEXT_(card, 1, "%04x %02x", |
2752 | QETH_DBF_TEXT_(TRACE, 1, "%04x %02x", | ||
2753 | (u16)qdio_err, (u8)sbalf15); | 2715 | (u16)qdio_err, (u8)sbalf15); |
2754 | return QETH_SEND_ERROR_LINK_FAILURE; | 2716 | return QETH_SEND_ERROR_LINK_FAILURE; |
2755 | } | 2717 | } |
@@ -2764,7 +2726,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) | |||
2764 | if (atomic_read(&queue->used_buffers) | 2726 | if (atomic_read(&queue->used_buffers) |
2765 | >= QETH_HIGH_WATERMARK_PACK){ | 2727 | >= QETH_HIGH_WATERMARK_PACK){ |
2766 | /* switch non-PACKING -> PACKING */ | 2728 | /* switch non-PACKING -> PACKING */ |
2767 | QETH_DBF_TEXT(TRACE, 6, "np->pack"); | 2729 | QETH_CARD_TEXT(queue->card, 6, "np->pack"); |
2768 | if (queue->card->options.performance_stats) | 2730 | if (queue->card->options.performance_stats) |
2769 | queue->card->perf_stats.sc_dp_p++; | 2731 | queue->card->perf_stats.sc_dp_p++; |
2770 | queue->do_pack = 1; | 2732 | queue->do_pack = 1; |
@@ -2787,7 +2749,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) | |||
2787 | if (atomic_read(&queue->used_buffers) | 2749 | if (atomic_read(&queue->used_buffers) |
2788 | <= QETH_LOW_WATERMARK_PACK) { | 2750 | <= QETH_LOW_WATERMARK_PACK) { |
2789 | /* switch PACKING -> non-PACKING */ | 2751 | /* switch PACKING -> non-PACKING */ |
2790 | QETH_DBF_TEXT(TRACE, 6, "pack->np"); | 2752 | QETH_CARD_TEXT(queue->card, 6, "pack->np"); |
2791 | if (queue->card->options.performance_stats) | 2753 | if (queue->card->options.performance_stats) |
2792 | queue->card->perf_stats.sc_p_dp++; | 2754 | queue->card->perf_stats.sc_p_dp++; |
2793 | queue->do_pack = 0; | 2755 | queue->do_pack = 0; |
@@ -2896,9 +2858,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, | |||
2896 | /* ignore temporary SIGA errors without busy condition */ | 2858 | /* ignore temporary SIGA errors without busy condition */ |
2897 | if (rc == QDIO_ERROR_SIGA_TARGET) | 2859 | if (rc == QDIO_ERROR_SIGA_TARGET) |
2898 | return; | 2860 | return; |
2899 | QETH_DBF_TEXT(TRACE, 2, "flushbuf"); | 2861 | QETH_CARD_TEXT(queue->card, 2, "flushbuf"); |
2900 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); | 2862 | QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); |
2901 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card)); | ||
2902 | 2863 | ||
2903 | /* this must not happen under normal circumstances. if it | 2864 | /* this must not happen under normal circumstances. if it |
2904 | * happens something is really wrong -> recover */ | 2865 | * happens something is really wrong -> recover */ |
@@ -2960,10 +2921,9 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, | |||
2960 | int i; | 2921 | int i; |
2961 | unsigned qeth_send_err; | 2922 | unsigned qeth_send_err; |
2962 | 2923 | ||
2963 | QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); | 2924 | QETH_CARD_TEXT(card, 6, "qdouhdl"); |
2964 | if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { | 2925 | if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { |
2965 | QETH_DBF_TEXT(TRACE, 2, "achkcond"); | 2926 | QETH_CARD_TEXT(card, 2, "achkcond"); |
2966 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); | ||
2967 | netif_stop_queue(card->dev); | 2927 | netif_stop_queue(card->dev); |
2968 | qeth_schedule_recovery(card); | 2928 | qeth_schedule_recovery(card); |
2969 | return; | 2929 | return; |
@@ -3033,13 +2993,11 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue); | |||
3033 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, | 2993 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, |
3034 | struct sk_buff *skb, int elems) | 2994 | struct sk_buff *skb, int elems) |
3035 | { | 2995 | { |
3036 | int elements_needed = 0; | 2996 | int dlen = skb->len - skb->data_len; |
2997 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - | ||
2998 | PFN_DOWN((unsigned long)skb->data); | ||
3037 | 2999 | ||
3038 | if (skb_shinfo(skb)->nr_frags > 0) | 3000 | elements_needed += skb_shinfo(skb)->nr_frags; |
3039 | elements_needed = (skb_shinfo(skb)->nr_frags + 1); | ||
3040 | if (elements_needed == 0) | ||
3041 | elements_needed = 1 + (((((unsigned long) skb->data) % | ||
3042 | PAGE_SIZE) + skb->len) >> PAGE_SHIFT); | ||
3043 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3001 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3044 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " | 3002 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
3045 | "(Number=%d / Length=%d). Discarded.\n", | 3003 | "(Number=%d / Length=%d). Discarded.\n", |
@@ -3050,15 +3008,35 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
3050 | } | 3008 | } |
3051 | EXPORT_SYMBOL_GPL(qeth_get_elements_no); | 3009 | EXPORT_SYMBOL_GPL(qeth_get_elements_no); |
3052 | 3010 | ||
3011 | int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len) | ||
3012 | { | ||
3013 | int hroom, inpage, rest; | ||
3014 | |||
3015 | if (((unsigned long)skb->data & PAGE_MASK) != | ||
3016 | (((unsigned long)skb->data + len - 1) & PAGE_MASK)) { | ||
3017 | hroom = skb_headroom(skb); | ||
3018 | inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE); | ||
3019 | rest = len - inpage; | ||
3020 | if (rest > hroom) | ||
3021 | return 1; | ||
3022 | memmove(skb->data - rest, skb->data, skb->len - skb->data_len); | ||
3023 | skb->data -= rest; | ||
3024 | QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); | ||
3025 | } | ||
3026 | return 0; | ||
3027 | } | ||
3028 | EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); | ||
3029 | |||
3053 | static inline void __qeth_fill_buffer(struct sk_buff *skb, | 3030 | static inline void __qeth_fill_buffer(struct sk_buff *skb, |
3054 | struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, | 3031 | struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, |
3055 | int offset) | 3032 | int offset) |
3056 | { | 3033 | { |
3057 | int length = skb->len; | 3034 | int length = skb->len - skb->data_len; |
3058 | int length_here; | 3035 | int length_here; |
3059 | int element; | 3036 | int element; |
3060 | char *data; | 3037 | char *data; |
3061 | int first_lap ; | 3038 | int first_lap, cnt; |
3039 | struct skb_frag_struct *frag; | ||
3062 | 3040 | ||
3063 | element = *next_element_to_fill; | 3041 | element = *next_element_to_fill; |
3064 | data = skb->data; | 3042 | data = skb->data; |
@@ -3081,10 +3059,14 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, | |||
3081 | length -= length_here; | 3059 | length -= length_here; |
3082 | if (!length) { | 3060 | if (!length) { |
3083 | if (first_lap) | 3061 | if (first_lap) |
3084 | buffer->element[element].flags = 0; | 3062 | if (skb_shinfo(skb)->nr_frags) |
3063 | buffer->element[element].flags = | ||
3064 | SBAL_FLAGS_FIRST_FRAG; | ||
3065 | else | ||
3066 | buffer->element[element].flags = 0; | ||
3085 | else | 3067 | else |
3086 | buffer->element[element].flags = | 3068 | buffer->element[element].flags = |
3087 | SBAL_FLAGS_LAST_FRAG; | 3069 | SBAL_FLAGS_MIDDLE_FRAG; |
3088 | } else { | 3070 | } else { |
3089 | if (first_lap) | 3071 | if (first_lap) |
3090 | buffer->element[element].flags = | 3072 | buffer->element[element].flags = |
@@ -3097,6 +3079,18 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, | |||
3097 | element++; | 3079 | element++; |
3098 | first_lap = 0; | 3080 | first_lap = 0; |
3099 | } | 3081 | } |
3082 | |||
3083 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | ||
3084 | frag = &skb_shinfo(skb)->frags[cnt]; | ||
3085 | buffer->element[element].addr = (char *)page_to_phys(frag->page) | ||
3086 | + frag->page_offset; | ||
3087 | buffer->element[element].length = frag->size; | ||
3088 | buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; | ||
3089 | element++; | ||
3090 | } | ||
3091 | |||
3092 | if (buffer->element[element - 1].flags) | ||
3093 | buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG; | ||
3100 | *next_element_to_fill = element; | 3094 | *next_element_to_fill = element; |
3101 | } | 3095 | } |
3102 | 3096 | ||
@@ -3137,20 +3131,16 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, | |||
3137 | buf->next_element_to_fill++; | 3131 | buf->next_element_to_fill++; |
3138 | } | 3132 | } |
3139 | 3133 | ||
3140 | if (skb_shinfo(skb)->nr_frags == 0) | 3134 | __qeth_fill_buffer(skb, buffer, large_send, |
3141 | __qeth_fill_buffer(skb, buffer, large_send, | 3135 | (int *)&buf->next_element_to_fill, offset); |
3142 | (int *)&buf->next_element_to_fill, offset); | ||
3143 | else | ||
3144 | __qeth_fill_buffer_frag(skb, buffer, large_send, | ||
3145 | (int *)&buf->next_element_to_fill); | ||
3146 | 3136 | ||
3147 | if (!queue->do_pack) { | 3137 | if (!queue->do_pack) { |
3148 | QETH_DBF_TEXT(TRACE, 6, "fillbfnp"); | 3138 | QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); |
3149 | /* set state to PRIMED -> will be flushed */ | 3139 | /* set state to PRIMED -> will be flushed */ |
3150 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | 3140 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); |
3151 | flush_cnt = 1; | 3141 | flush_cnt = 1; |
3152 | } else { | 3142 | } else { |
3153 | QETH_DBF_TEXT(TRACE, 6, "fillbfpa"); | 3143 | QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); |
3154 | if (queue->card->options.performance_stats) | 3144 | if (queue->card->options.performance_stats) |
3155 | queue->card->perf_stats.skbs_sent_pack++; | 3145 | queue->card->perf_stats.skbs_sent_pack++; |
3156 | if (buf->next_element_to_fill >= | 3146 | if (buf->next_element_to_fill >= |
@@ -3210,7 +3200,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card, | |||
3210 | rc = dev_queue_xmit(skb); | 3200 | rc = dev_queue_xmit(skb); |
3211 | } else { | 3201 | } else { |
3212 | dev_kfree_skb_any(skb); | 3202 | dev_kfree_skb_any(skb); |
3213 | QETH_DBF_TEXT(QERR, 2, "qrdrop"); | 3203 | QETH_CARD_TEXT(card, 2, "qrdrop"); |
3214 | } | 3204 | } |
3215 | } | 3205 | } |
3216 | return 0; | 3206 | return 0; |
@@ -3312,14 +3302,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, | |||
3312 | struct qeth_ipa_cmd *cmd; | 3302 | struct qeth_ipa_cmd *cmd; |
3313 | struct qeth_ipacmd_setadpparms *setparms; | 3303 | struct qeth_ipacmd_setadpparms *setparms; |
3314 | 3304 | ||
3315 | QETH_DBF_TEXT(TRACE, 4, "prmadpcb"); | 3305 | QETH_CARD_TEXT(card, 4, "prmadpcb"); |
3316 | 3306 | ||
3317 | cmd = (struct qeth_ipa_cmd *) data; | 3307 | cmd = (struct qeth_ipa_cmd *) data; |
3318 | setparms = &(cmd->data.setadapterparms); | 3308 | setparms = &(cmd->data.setadapterparms); |
3319 | 3309 | ||
3320 | qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); | 3310 | qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); |
3321 | if (cmd->hdr.return_code) { | 3311 | if (cmd->hdr.return_code) { |
3322 | QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code); | 3312 | QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code); |
3323 | setparms->data.mode = SET_PROMISC_MODE_OFF; | 3313 | setparms->data.mode = SET_PROMISC_MODE_OFF; |
3324 | } | 3314 | } |
3325 | card->info.promisc_mode = setparms->data.mode; | 3315 | card->info.promisc_mode = setparms->data.mode; |
@@ -3333,7 +3323,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) | |||
3333 | struct qeth_cmd_buffer *iob; | 3323 | struct qeth_cmd_buffer *iob; |
3334 | struct qeth_ipa_cmd *cmd; | 3324 | struct qeth_ipa_cmd *cmd; |
3335 | 3325 | ||
3336 | QETH_DBF_TEXT(TRACE, 4, "setprom"); | 3326 | QETH_CARD_TEXT(card, 4, "setprom"); |
3337 | 3327 | ||
3338 | if (((dev->flags & IFF_PROMISC) && | 3328 | if (((dev->flags & IFF_PROMISC) && |
3339 | (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || | 3329 | (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || |
@@ -3343,7 +3333,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) | |||
3343 | mode = SET_PROMISC_MODE_OFF; | 3333 | mode = SET_PROMISC_MODE_OFF; |
3344 | if (dev->flags & IFF_PROMISC) | 3334 | if (dev->flags & IFF_PROMISC) |
3345 | mode = SET_PROMISC_MODE_ON; | 3335 | mode = SET_PROMISC_MODE_ON; |
3346 | QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode); | 3336 | QETH_CARD_TEXT_(card, 4, "mode:%x", mode); |
3347 | 3337 | ||
3348 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, | 3338 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, |
3349 | sizeof(struct qeth_ipacmd_setadpparms)); | 3339 | sizeof(struct qeth_ipacmd_setadpparms)); |
@@ -3360,9 +3350,9 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu) | |||
3360 | 3350 | ||
3361 | card = dev->ml_priv; | 3351 | card = dev->ml_priv; |
3362 | 3352 | ||
3363 | QETH_DBF_TEXT(TRACE, 4, "chgmtu"); | 3353 | QETH_CARD_TEXT(card, 4, "chgmtu"); |
3364 | sprintf(dbf_text, "%8x", new_mtu); | 3354 | sprintf(dbf_text, "%8x", new_mtu); |
3365 | QETH_DBF_TEXT(TRACE, 4, dbf_text); | 3355 | QETH_CARD_TEXT(card, 4, dbf_text); |
3366 | 3356 | ||
3367 | if (new_mtu < 64) | 3357 | if (new_mtu < 64) |
3368 | return -EINVAL; | 3358 | return -EINVAL; |
@@ -3382,7 +3372,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev) | |||
3382 | 3372 | ||
3383 | card = dev->ml_priv; | 3373 | card = dev->ml_priv; |
3384 | 3374 | ||
3385 | QETH_DBF_TEXT(TRACE, 5, "getstat"); | 3375 | QETH_CARD_TEXT(card, 5, "getstat"); |
3386 | 3376 | ||
3387 | return &card->stats; | 3377 | return &card->stats; |
3388 | } | 3378 | } |
@@ -3393,7 +3383,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, | |||
3393 | { | 3383 | { |
3394 | struct qeth_ipa_cmd *cmd; | 3384 | struct qeth_ipa_cmd *cmd; |
3395 | 3385 | ||
3396 | QETH_DBF_TEXT(TRACE, 4, "chgmaccb"); | 3386 | QETH_CARD_TEXT(card, 4, "chgmaccb"); |
3397 | 3387 | ||
3398 | cmd = (struct qeth_ipa_cmd *) data; | 3388 | cmd = (struct qeth_ipa_cmd *) data; |
3399 | if (!card->options.layer2 || | 3389 | if (!card->options.layer2 || |
@@ -3413,7 +3403,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) | |||
3413 | struct qeth_cmd_buffer *iob; | 3403 | struct qeth_cmd_buffer *iob; |
3414 | struct qeth_ipa_cmd *cmd; | 3404 | struct qeth_ipa_cmd *cmd; |
3415 | 3405 | ||
3416 | QETH_DBF_TEXT(TRACE, 4, "chgmac"); | 3406 | QETH_CARD_TEXT(card, 4, "chgmac"); |
3417 | 3407 | ||
3418 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, | 3408 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, |
3419 | sizeof(struct qeth_ipacmd_setadpparms)); | 3409 | sizeof(struct qeth_ipacmd_setadpparms)); |
@@ -3433,9 +3423,8 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3433 | { | 3423 | { |
3434 | struct qeth_ipa_cmd *cmd; | 3424 | struct qeth_ipa_cmd *cmd; |
3435 | struct qeth_set_access_ctrl *access_ctrl_req; | 3425 | struct qeth_set_access_ctrl *access_ctrl_req; |
3436 | int rc; | ||
3437 | 3426 | ||
3438 | QETH_DBF_TEXT(TRACE, 4, "setaccb"); | 3427 | QETH_CARD_TEXT(card, 4, "setaccb"); |
3439 | 3428 | ||
3440 | cmd = (struct qeth_ipa_cmd *) data; | 3429 | cmd = (struct qeth_ipa_cmd *) data; |
3441 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; | 3430 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; |
@@ -3460,7 +3449,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3460 | card->gdev->dev.kobj.name, | 3449 | card->gdev->dev.kobj.name, |
3461 | access_ctrl_req->subcmd_code, | 3450 | access_ctrl_req->subcmd_code, |
3462 | cmd->data.setadapterparms.hdr.return_code); | 3451 | cmd->data.setadapterparms.hdr.return_code); |
3463 | rc = 0; | ||
3464 | break; | 3452 | break; |
3465 | } | 3453 | } |
3466 | case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: | 3454 | case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: |
@@ -3474,7 +3462,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3474 | 3462 | ||
3475 | /* ensure isolation mode is "none" */ | 3463 | /* ensure isolation mode is "none" */ |
3476 | card->options.isolation = ISOLATION_MODE_NONE; | 3464 | card->options.isolation = ISOLATION_MODE_NONE; |
3477 | rc = -EOPNOTSUPP; | ||
3478 | break; | 3465 | break; |
3479 | } | 3466 | } |
3480 | case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: | 3467 | case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: |
@@ -3489,7 +3476,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3489 | 3476 | ||
3490 | /* ensure isolation mode is "none" */ | 3477 | /* ensure isolation mode is "none" */ |
3491 | card->options.isolation = ISOLATION_MODE_NONE; | 3478 | card->options.isolation = ISOLATION_MODE_NONE; |
3492 | rc = -EOPNOTSUPP; | ||
3493 | break; | 3479 | break; |
3494 | } | 3480 | } |
3495 | case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: | 3481 | case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: |
@@ -3503,7 +3489,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3503 | 3489 | ||
3504 | /* ensure isolation mode is "none" */ | 3490 | /* ensure isolation mode is "none" */ |
3505 | card->options.isolation = ISOLATION_MODE_NONE; | 3491 | card->options.isolation = ISOLATION_MODE_NONE; |
3506 | rc = -EPERM; | ||
3507 | break; | 3492 | break; |
3508 | } | 3493 | } |
3509 | default: | 3494 | default: |
@@ -3517,12 +3502,11 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, | |||
3517 | 3502 | ||
3518 | /* ensure isolation mode is "none" */ | 3503 | /* ensure isolation mode is "none" */ |
3519 | card->options.isolation = ISOLATION_MODE_NONE; | 3504 | card->options.isolation = ISOLATION_MODE_NONE; |
3520 | rc = 0; | ||
3521 | break; | 3505 | break; |
3522 | } | 3506 | } |
3523 | } | 3507 | } |
3524 | qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); | 3508 | qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); |
3525 | return rc; | 3509 | return 0; |
3526 | } | 3510 | } |
3527 | 3511 | ||
3528 | static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, | 3512 | static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, |
@@ -3533,7 +3517,7 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, | |||
3533 | struct qeth_ipa_cmd *cmd; | 3517 | struct qeth_ipa_cmd *cmd; |
3534 | struct qeth_set_access_ctrl *access_ctrl_req; | 3518 | struct qeth_set_access_ctrl *access_ctrl_req; |
3535 | 3519 | ||
3536 | QETH_DBF_TEXT(TRACE, 4, "setacctl"); | 3520 | QETH_CARD_TEXT(card, 4, "setacctl"); |
3537 | 3521 | ||
3538 | QETH_DBF_TEXT_(SETUP, 2, "setacctl"); | 3522 | QETH_DBF_TEXT_(SETUP, 2, "setacctl"); |
3539 | QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); | 3523 | QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); |
@@ -3555,7 +3539,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card) | |||
3555 | { | 3539 | { |
3556 | int rc = 0; | 3540 | int rc = 0; |
3557 | 3541 | ||
3558 | QETH_DBF_TEXT(TRACE, 4, "setactlo"); | 3542 | QETH_CARD_TEXT(card, 4, "setactlo"); |
3559 | 3543 | ||
3560 | if ((card->info.type == QETH_CARD_TYPE_OSD || | 3544 | if ((card->info.type == QETH_CARD_TYPE_OSD || |
3561 | card->info.type == QETH_CARD_TYPE_OSX) && | 3545 | card->info.type == QETH_CARD_TYPE_OSX) && |
@@ -3583,8 +3567,8 @@ void qeth_tx_timeout(struct net_device *dev) | |||
3583 | { | 3567 | { |
3584 | struct qeth_card *card; | 3568 | struct qeth_card *card; |
3585 | 3569 | ||
3586 | QETH_DBF_TEXT(TRACE, 4, "txtimeo"); | ||
3587 | card = dev->ml_priv; | 3570 | card = dev->ml_priv; |
3571 | QETH_CARD_TEXT(card, 4, "txtimeo"); | ||
3588 | card->stats.tx_errors++; | 3572 | card->stats.tx_errors++; |
3589 | qeth_schedule_recovery(card); | 3573 | qeth_schedule_recovery(card); |
3590 | } | 3574 | } |
@@ -3663,7 +3647,7 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, | |||
3663 | { | 3647 | { |
3664 | u16 s1, s2; | 3648 | u16 s1, s2; |
3665 | 3649 | ||
3666 | QETH_DBF_TEXT(TRACE, 4, "sendsnmp"); | 3650 | QETH_CARD_TEXT(card, 4, "sendsnmp"); |
3667 | 3651 | ||
3668 | memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); | 3652 | memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); |
3669 | memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), | 3653 | memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), |
@@ -3688,7 +3672,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
3688 | unsigned char *data; | 3672 | unsigned char *data; |
3689 | __u16 data_len; | 3673 | __u16 data_len; |
3690 | 3674 | ||
3691 | QETH_DBF_TEXT(TRACE, 3, "snpcmdcb"); | 3675 | QETH_CARD_TEXT(card, 3, "snpcmdcb"); |
3692 | 3676 | ||
3693 | cmd = (struct qeth_ipa_cmd *) sdata; | 3677 | cmd = (struct qeth_ipa_cmd *) sdata; |
3694 | data = (unsigned char *)((char *)cmd - reply->offset); | 3678 | data = (unsigned char *)((char *)cmd - reply->offset); |
@@ -3696,13 +3680,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
3696 | snmp = &cmd->data.setadapterparms.data.snmp; | 3680 | snmp = &cmd->data.setadapterparms.data.snmp; |
3697 | 3681 | ||
3698 | if (cmd->hdr.return_code) { | 3682 | if (cmd->hdr.return_code) { |
3699 | QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code); | 3683 | QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code); |
3700 | return 0; | 3684 | return 0; |
3701 | } | 3685 | } |
3702 | if (cmd->data.setadapterparms.hdr.return_code) { | 3686 | if (cmd->data.setadapterparms.hdr.return_code) { |
3703 | cmd->hdr.return_code = | 3687 | cmd->hdr.return_code = |
3704 | cmd->data.setadapterparms.hdr.return_code; | 3688 | cmd->data.setadapterparms.hdr.return_code; |
3705 | QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code); | 3689 | QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code); |
3706 | return 0; | 3690 | return 0; |
3707 | } | 3691 | } |
3708 | data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); | 3692 | data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); |
@@ -3713,13 +3697,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
3713 | 3697 | ||
3714 | /* check if there is enough room in userspace */ | 3698 | /* check if there is enough room in userspace */ |
3715 | if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { | 3699 | if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { |
3716 | QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM); | 3700 | QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); |
3717 | cmd->hdr.return_code = -ENOMEM; | 3701 | cmd->hdr.return_code = -ENOMEM; |
3718 | return 0; | 3702 | return 0; |
3719 | } | 3703 | } |
3720 | QETH_DBF_TEXT_(TRACE, 4, "snore%i", | 3704 | QETH_CARD_TEXT_(card, 4, "snore%i", |
3721 | cmd->data.setadapterparms.hdr.used_total); | 3705 | cmd->data.setadapterparms.hdr.used_total); |
3722 | QETH_DBF_TEXT_(TRACE, 4, "sseqn%i", | 3706 | QETH_CARD_TEXT_(card, 4, "sseqn%i", |
3723 | cmd->data.setadapterparms.hdr.seq_no); | 3707 | cmd->data.setadapterparms.hdr.seq_no); |
3724 | /*copy entries to user buffer*/ | 3708 | /*copy entries to user buffer*/ |
3725 | if (cmd->data.setadapterparms.hdr.seq_no == 1) { | 3709 | if (cmd->data.setadapterparms.hdr.seq_no == 1) { |
@@ -3733,9 +3717,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card, | |||
3733 | } | 3717 | } |
3734 | qinfo->udata_offset += data_len; | 3718 | qinfo->udata_offset += data_len; |
3735 | /* check if all replies received ... */ | 3719 | /* check if all replies received ... */ |
3736 | QETH_DBF_TEXT_(TRACE, 4, "srtot%i", | 3720 | QETH_CARD_TEXT_(card, 4, "srtot%i", |
3737 | cmd->data.setadapterparms.hdr.used_total); | 3721 | cmd->data.setadapterparms.hdr.used_total); |
3738 | QETH_DBF_TEXT_(TRACE, 4, "srseq%i", | 3722 | QETH_CARD_TEXT_(card, 4, "srseq%i", |
3739 | cmd->data.setadapterparms.hdr.seq_no); | 3723 | cmd->data.setadapterparms.hdr.seq_no); |
3740 | if (cmd->data.setadapterparms.hdr.seq_no < | 3724 | if (cmd->data.setadapterparms.hdr.seq_no < |
3741 | cmd->data.setadapterparms.hdr.used_total) | 3725 | cmd->data.setadapterparms.hdr.used_total) |
@@ -3752,7 +3736,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3752 | struct qeth_arp_query_info qinfo = {0, }; | 3736 | struct qeth_arp_query_info qinfo = {0, }; |
3753 | int rc = 0; | 3737 | int rc = 0; |
3754 | 3738 | ||
3755 | QETH_DBF_TEXT(TRACE, 3, "snmpcmd"); | 3739 | QETH_CARD_TEXT(card, 3, "snmpcmd"); |
3756 | 3740 | ||
3757 | if (card->info.guestlan) | 3741 | if (card->info.guestlan) |
3758 | return -EOPNOTSUPP; | 3742 | return -EOPNOTSUPP; |
@@ -3764,15 +3748,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3764 | /* skip 4 bytes (data_len struct member) to get req_len */ | 3748 | /* skip 4 bytes (data_len struct member) to get req_len */ |
3765 | if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) | 3749 | if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) |
3766 | return -EFAULT; | 3750 | return -EFAULT; |
3767 | ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL); | 3751 | ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); |
3768 | if (!ureq) { | 3752 | if (IS_ERR(ureq)) { |
3769 | QETH_DBF_TEXT(TRACE, 2, "snmpnome"); | 3753 | QETH_CARD_TEXT(card, 2, "snmpnome"); |
3770 | return -ENOMEM; | 3754 | return PTR_ERR(ureq); |
3771 | } | ||
3772 | if (copy_from_user(ureq, udata, | ||
3773 | req_len + sizeof(struct qeth_snmp_ureq_hdr))) { | ||
3774 | kfree(ureq); | ||
3775 | return -EFAULT; | ||
3776 | } | 3755 | } |
3777 | qinfo.udata_len = ureq->hdr.data_len; | 3756 | qinfo.udata_len = ureq->hdr.data_len; |
3778 | qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); | 3757 | qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); |
@@ -3991,6 +3970,7 @@ retriable: | |||
3991 | else | 3970 | else |
3992 | goto retry; | 3971 | goto retry; |
3993 | } | 3972 | } |
3973 | card->read_or_write_problem = 0; | ||
3994 | rc = qeth_mpc_initialize(card); | 3974 | rc = qeth_mpc_initialize(card); |
3995 | if (rc) { | 3975 | if (rc) { |
3996 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); | 3976 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); |
@@ -4120,13 +4100,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | |||
4120 | skb_len -= data_len; | 4100 | skb_len -= data_len; |
4121 | if (skb_len) { | 4101 | if (skb_len) { |
4122 | if (qeth_is_last_sbale(element)) { | 4102 | if (qeth_is_last_sbale(element)) { |
4123 | QETH_DBF_TEXT(TRACE, 4, "unexeob"); | 4103 | QETH_CARD_TEXT(card, 4, "unexeob"); |
4124 | QETH_DBF_TEXT_(TRACE, 4, "%s", | 4104 | QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); |
4125 | CARD_BUS_ID(card)); | ||
4126 | QETH_DBF_TEXT(QERR, 2, "unexeob"); | ||
4127 | QETH_DBF_TEXT_(QERR, 2, "%s", | ||
4128 | CARD_BUS_ID(card)); | ||
4129 | QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer)); | ||
4130 | dev_kfree_skb_any(skb); | 4105 | dev_kfree_skb_any(skb); |
4131 | card->stats.rx_errors++; | 4106 | card->stats.rx_errors++; |
4132 | return NULL; | 4107 | return NULL; |
@@ -4147,8 +4122,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | |||
4147 | return skb; | 4122 | return skb; |
4148 | no_mem: | 4123 | no_mem: |
4149 | if (net_ratelimit()) { | 4124 | if (net_ratelimit()) { |
4150 | QETH_DBF_TEXT(TRACE, 2, "noskbmem"); | 4125 | QETH_CARD_TEXT(card, 2, "noskbmem"); |
4151 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); | ||
4152 | } | 4126 | } |
4153 | card->stats.rx_dropped++; | 4127 | card->stats.rx_dropped++; |
4154 | return NULL; | 4128 | return NULL; |
@@ -4164,17 +4138,17 @@ static void qeth_unregister_dbf_views(void) | |||
4164 | } | 4138 | } |
4165 | } | 4139 | } |
4166 | 4140 | ||
4167 | void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...) | 4141 | void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) |
4168 | { | 4142 | { |
4169 | char dbf_txt_buf[32]; | 4143 | char dbf_txt_buf[32]; |
4170 | va_list args; | 4144 | va_list args; |
4171 | 4145 | ||
4172 | if (level > (qeth_dbf[dbf_nix].id)->level) | 4146 | if (level > id->level) |
4173 | return; | 4147 | return; |
4174 | va_start(args, fmt); | 4148 | va_start(args, fmt); |
4175 | vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); | 4149 | vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); |
4176 | va_end(args); | 4150 | va_end(args); |
4177 | debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); | 4151 | debug_text_event(id, level, dbf_txt_buf); |
4178 | } | 4152 | } |
4179 | EXPORT_SYMBOL_GPL(qeth_dbf_longtext); | 4153 | EXPORT_SYMBOL_GPL(qeth_dbf_longtext); |
4180 | 4154 | ||
@@ -4282,6 +4256,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
4282 | struct device *dev; | 4256 | struct device *dev; |
4283 | int rc; | 4257 | int rc; |
4284 | unsigned long flags; | 4258 | unsigned long flags; |
4259 | char dbf_name[20]; | ||
4285 | 4260 | ||
4286 | QETH_DBF_TEXT(SETUP, 2, "probedev"); | 4261 | QETH_DBF_TEXT(SETUP, 2, "probedev"); |
4287 | 4262 | ||
@@ -4297,6 +4272,17 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
4297 | rc = -ENOMEM; | 4272 | rc = -ENOMEM; |
4298 | goto err_dev; | 4273 | goto err_dev; |
4299 | } | 4274 | } |
4275 | |||
4276 | snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", | ||
4277 | dev_name(&gdev->dev)); | ||
4278 | card->debug = debug_register(dbf_name, 2, 1, 8); | ||
4279 | if (!card->debug) { | ||
4280 | QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); | ||
4281 | rc = -ENOMEM; | ||
4282 | goto err_card; | ||
4283 | } | ||
4284 | debug_register_view(card->debug, &debug_hex_ascii_view); | ||
4285 | |||
4300 | card->read.ccwdev = gdev->cdev[0]; | 4286 | card->read.ccwdev = gdev->cdev[0]; |
4301 | card->write.ccwdev = gdev->cdev[1]; | 4287 | card->write.ccwdev = gdev->cdev[1]; |
4302 | card->data.ccwdev = gdev->cdev[2]; | 4288 | card->data.ccwdev = gdev->cdev[2]; |
@@ -4309,12 +4295,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
4309 | rc = qeth_determine_card_type(card); | 4295 | rc = qeth_determine_card_type(card); |
4310 | if (rc) { | 4296 | if (rc) { |
4311 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); | 4297 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); |
4312 | goto err_card; | 4298 | goto err_dbf; |
4313 | } | 4299 | } |
4314 | rc = qeth_setup_card(card); | 4300 | rc = qeth_setup_card(card); |
4315 | if (rc) { | 4301 | if (rc) { |
4316 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 4302 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
4317 | goto err_card; | 4303 | goto err_dbf; |
4318 | } | 4304 | } |
4319 | 4305 | ||
4320 | if (card->info.type == QETH_CARD_TYPE_OSN) | 4306 | if (card->info.type == QETH_CARD_TYPE_OSN) |
@@ -4322,7 +4308,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
4322 | else | 4308 | else |
4323 | rc = qeth_core_create_device_attributes(dev); | 4309 | rc = qeth_core_create_device_attributes(dev); |
4324 | if (rc) | 4310 | if (rc) |
4325 | goto err_card; | 4311 | goto err_dbf; |
4326 | switch (card->info.type) { | 4312 | switch (card->info.type) { |
4327 | case QETH_CARD_TYPE_OSN: | 4313 | case QETH_CARD_TYPE_OSN: |
4328 | case QETH_CARD_TYPE_OSM: | 4314 | case QETH_CARD_TYPE_OSM: |
@@ -4352,6 +4338,8 @@ err_attr: | |||
4352 | qeth_core_remove_osn_attributes(dev); | 4338 | qeth_core_remove_osn_attributes(dev); |
4353 | else | 4339 | else |
4354 | qeth_core_remove_device_attributes(dev); | 4340 | qeth_core_remove_device_attributes(dev); |
4341 | err_dbf: | ||
4342 | debug_unregister(card->debug); | ||
4355 | err_card: | 4343 | err_card: |
4356 | qeth_core_free_card(card); | 4344 | qeth_core_free_card(card); |
4357 | err_dev: | 4345 | err_dev: |
@@ -4365,16 +4353,19 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev) | |||
4365 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | 4353 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); |
4366 | 4354 | ||
4367 | QETH_DBF_TEXT(SETUP, 2, "removedv"); | 4355 | QETH_DBF_TEXT(SETUP, 2, "removedv"); |
4368 | if (card->discipline.ccwgdriver) { | ||
4369 | card->discipline.ccwgdriver->remove(gdev); | ||
4370 | qeth_core_free_discipline(card); | ||
4371 | } | ||
4372 | 4356 | ||
4373 | if (card->info.type == QETH_CARD_TYPE_OSN) { | 4357 | if (card->info.type == QETH_CARD_TYPE_OSN) { |
4374 | qeth_core_remove_osn_attributes(&gdev->dev); | 4358 | qeth_core_remove_osn_attributes(&gdev->dev); |
4375 | } else { | 4359 | } else { |
4376 | qeth_core_remove_device_attributes(&gdev->dev); | 4360 | qeth_core_remove_device_attributes(&gdev->dev); |
4377 | } | 4361 | } |
4362 | |||
4363 | if (card->discipline.ccwgdriver) { | ||
4364 | card->discipline.ccwgdriver->remove(gdev); | ||
4365 | qeth_core_free_discipline(card); | ||
4366 | } | ||
4367 | |||
4368 | debug_unregister(card->debug); | ||
4378 | write_lock_irqsave(&qeth_core_card_list.rwlock, flags); | 4369 | write_lock_irqsave(&qeth_core_card_list.rwlock, flags); |
4379 | list_del(&card->list); | 4370 | list_del(&card->list); |
4380 | write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); | 4371 | write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); |
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index f9ed24de7514..e37dd8c4bf4e 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h | |||
@@ -616,8 +616,9 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; | |||
616 | #define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2) | 616 | #define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2) |
617 | #define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12) | 617 | #define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12) |
618 | #define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09] | 618 | #define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09] |
619 | #define QETH_IDX_ACT_ERR_EXCL 0x19 | 619 | #define QETH_IDX_ACT_ERR_EXCL 0x19 |
620 | #define QETH_IDX_ACT_ERR_AUTH 0x1E | 620 | #define QETH_IDX_ACT_ERR_AUTH 0x1E |
621 | #define QETH_IDX_ACT_ERR_AUTH_USER 0x20 | ||
621 | 622 | ||
622 | #define PDU_ENCAPSULATION(buffer) \ | 623 | #define PDU_ENCAPSULATION(buffer) \ |
623 | (buffer + *(buffer + (*(buffer + 0x0b)) + \ | 624 | (buffer + *(buffer + (*(buffer + 0x0b)) + \ |
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 2eb022ff2610..42fa783a70c8 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c | |||
@@ -411,7 +411,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
411 | if (!card) | 411 | if (!card) |
412 | return -EINVAL; | 412 | return -EINVAL; |
413 | 413 | ||
414 | mutex_lock(&card->conf_mutex); | 414 | mutex_lock(&card->discipline_mutex); |
415 | if (card->state != CARD_STATE_DOWN) { | 415 | if (card->state != CARD_STATE_DOWN) { |
416 | rc = -EPERM; | 416 | rc = -EPERM; |
417 | goto out; | 417 | goto out; |
@@ -433,6 +433,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
433 | if (card->options.layer2 == newdis) | 433 | if (card->options.layer2 == newdis) |
434 | goto out; | 434 | goto out; |
435 | else { | 435 | else { |
436 | card->info.mac_bits = 0; | ||
436 | if (card->discipline.ccwgdriver) { | 437 | if (card->discipline.ccwgdriver) { |
437 | card->discipline.ccwgdriver->remove(card->gdev); | 438 | card->discipline.ccwgdriver->remove(card->gdev); |
438 | qeth_core_free_discipline(card); | 439 | qeth_core_free_discipline(card); |
@@ -445,7 +446,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
445 | 446 | ||
446 | rc = card->discipline.ccwgdriver->probe(card->gdev); | 447 | rc = card->discipline.ccwgdriver->probe(card->gdev); |
447 | out: | 448 | out: |
448 | mutex_unlock(&card->conf_mutex); | 449 | mutex_unlock(&card->discipline_mutex); |
449 | return rc ? rc : count; | 450 | return rc ? rc : count; |
450 | } | 451 | } |
451 | 452 | ||
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d43f57a4ac66..830d63524d61 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -79,7 +79,7 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
79 | rc = -EOPNOTSUPP; | 79 | rc = -EOPNOTSUPP; |
80 | } | 80 | } |
81 | if (rc) | 81 | if (rc) |
82 | QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); | 82 | QETH_CARD_TEXT_(card, 2, "ioce%d", rc); |
83 | return rc; | 83 | return rc; |
84 | } | 84 | } |
85 | 85 | ||
@@ -130,7 +130,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | |||
130 | struct qeth_ipa_cmd *cmd; | 130 | struct qeth_ipa_cmd *cmd; |
131 | __u8 *mac; | 131 | __u8 *mac; |
132 | 132 | ||
133 | QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb"); | 133 | QETH_CARD_TEXT(card, 2, "L2Sgmacb"); |
134 | cmd = (struct qeth_ipa_cmd *) data; | 134 | cmd = (struct qeth_ipa_cmd *) data; |
135 | mac = &cmd->data.setdelmac.mac[0]; | 135 | mac = &cmd->data.setdelmac.mac[0]; |
136 | /* MAC already registered, needed in couple/uncouple case */ | 136 | /* MAC already registered, needed in couple/uncouple case */ |
@@ -147,7 +147,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | |||
147 | 147 | ||
148 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) | 148 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) |
149 | { | 149 | { |
150 | QETH_DBF_TEXT(TRACE, 2, "L2Sgmac"); | 150 | QETH_CARD_TEXT(card, 2, "L2Sgmac"); |
151 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, | 151 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, |
152 | qeth_l2_send_setgroupmac_cb); | 152 | qeth_l2_send_setgroupmac_cb); |
153 | } | 153 | } |
@@ -159,7 +159,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | |||
159 | struct qeth_ipa_cmd *cmd; | 159 | struct qeth_ipa_cmd *cmd; |
160 | __u8 *mac; | 160 | __u8 *mac; |
161 | 161 | ||
162 | QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb"); | 162 | QETH_CARD_TEXT(card, 2, "L2Dgmacb"); |
163 | cmd = (struct qeth_ipa_cmd *) data; | 163 | cmd = (struct qeth_ipa_cmd *) data; |
164 | mac = &cmd->data.setdelmac.mac[0]; | 164 | mac = &cmd->data.setdelmac.mac[0]; |
165 | if (cmd->hdr.return_code) | 165 | if (cmd->hdr.return_code) |
@@ -170,7 +170,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | |||
170 | 170 | ||
171 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) | 171 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) |
172 | { | 172 | { |
173 | QETH_DBF_TEXT(TRACE, 2, "L2Dgmac"); | 173 | QETH_CARD_TEXT(card, 2, "L2Dgmac"); |
174 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, | 174 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, |
175 | qeth_l2_send_delgroupmac_cb); | 175 | qeth_l2_send_delgroupmac_cb); |
176 | } | 176 | } |
@@ -262,15 +262,14 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, | |||
262 | { | 262 | { |
263 | struct qeth_ipa_cmd *cmd; | 263 | struct qeth_ipa_cmd *cmd; |
264 | 264 | ||
265 | QETH_DBF_TEXT(TRACE, 2, "L2sdvcb"); | 265 | QETH_CARD_TEXT(card, 2, "L2sdvcb"); |
266 | cmd = (struct qeth_ipa_cmd *) data; | 266 | cmd = (struct qeth_ipa_cmd *) data; |
267 | if (cmd->hdr.return_code) { | 267 | if (cmd->hdr.return_code) { |
268 | QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. " | 268 | QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. " |
269 | "Continuing\n", cmd->data.setdelvlan.vlan_id, | 269 | "Continuing\n", cmd->data.setdelvlan.vlan_id, |
270 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 270 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); |
271 | QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command); | 271 | QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); |
272 | QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card)); | 272 | QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); |
273 | QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code); | ||
274 | } | 273 | } |
275 | return 0; | 274 | return 0; |
276 | } | 275 | } |
@@ -281,7 +280,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, | |||
281 | struct qeth_ipa_cmd *cmd; | 280 | struct qeth_ipa_cmd *cmd; |
282 | struct qeth_cmd_buffer *iob; | 281 | struct qeth_cmd_buffer *iob; |
283 | 282 | ||
284 | QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd); | 283 | QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); |
285 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 284 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
286 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 285 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
287 | cmd->data.setdelvlan.vlan_id = i; | 286 | cmd->data.setdelvlan.vlan_id = i; |
@@ -292,7 +291,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, | |||
292 | static void qeth_l2_process_vlans(struct qeth_card *card, int clear) | 291 | static void qeth_l2_process_vlans(struct qeth_card *card, int clear) |
293 | { | 292 | { |
294 | struct qeth_vlan_vid *id; | 293 | struct qeth_vlan_vid *id; |
295 | QETH_DBF_TEXT(TRACE, 3, "L2prcvln"); | 294 | QETH_CARD_TEXT(card, 3, "L2prcvln"); |
296 | spin_lock_bh(&card->vlanlock); | 295 | spin_lock_bh(&card->vlanlock); |
297 | list_for_each_entry(id, &card->vid_list, list) { | 296 | list_for_each_entry(id, &card->vid_list, list) { |
298 | if (clear) | 297 | if (clear) |
@@ -310,13 +309,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
310 | struct qeth_card *card = dev->ml_priv; | 309 | struct qeth_card *card = dev->ml_priv; |
311 | struct qeth_vlan_vid *id; | 310 | struct qeth_vlan_vid *id; |
312 | 311 | ||
313 | QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid); | 312 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); |
314 | if (card->info.type == QETH_CARD_TYPE_OSM) { | 313 | if (card->info.type == QETH_CARD_TYPE_OSM) { |
315 | QETH_DBF_TEXT(TRACE, 3, "aidOSM"); | 314 | QETH_CARD_TEXT(card, 3, "aidOSM"); |
316 | return; | 315 | return; |
317 | } | 316 | } |
318 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { | 317 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { |
319 | QETH_DBF_TEXT(TRACE, 3, "aidREC"); | 318 | QETH_CARD_TEXT(card, 3, "aidREC"); |
320 | return; | 319 | return; |
321 | } | 320 | } |
322 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); | 321 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); |
@@ -334,13 +333,13 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
334 | struct qeth_vlan_vid *id, *tmpid = NULL; | 333 | struct qeth_vlan_vid *id, *tmpid = NULL; |
335 | struct qeth_card *card = dev->ml_priv; | 334 | struct qeth_card *card = dev->ml_priv; |
336 | 335 | ||
337 | QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); | 336 | QETH_CARD_TEXT_(card, 4, "kid:%d", vid); |
338 | if (card->info.type == QETH_CARD_TYPE_OSM) { | 337 | if (card->info.type == QETH_CARD_TYPE_OSM) { |
339 | QETH_DBF_TEXT(TRACE, 3, "kidOSM"); | 338 | QETH_CARD_TEXT(card, 3, "kidOSM"); |
340 | return; | 339 | return; |
341 | } | 340 | } |
342 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { | 341 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { |
343 | QETH_DBF_TEXT(TRACE, 3, "kidREC"); | 342 | QETH_CARD_TEXT(card, 3, "kidREC"); |
344 | return; | 343 | return; |
345 | } | 344 | } |
346 | spin_lock_bh(&card->vlanlock); | 345 | spin_lock_bh(&card->vlanlock); |
@@ -456,7 +455,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card, | |||
456 | /* else unknown */ | 455 | /* else unknown */ |
457 | default: | 456 | default: |
458 | dev_kfree_skb_any(skb); | 457 | dev_kfree_skb_any(skb); |
459 | QETH_DBF_TEXT(TRACE, 3, "inbunkno"); | 458 | QETH_CARD_TEXT(card, 3, "inbunkno"); |
460 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); | 459 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); |
461 | continue; | 460 | continue; |
462 | } | 461 | } |
@@ -474,7 +473,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, | |||
474 | struct qeth_ipa_cmd *cmd; | 473 | struct qeth_ipa_cmd *cmd; |
475 | struct qeth_cmd_buffer *iob; | 474 | struct qeth_cmd_buffer *iob; |
476 | 475 | ||
477 | QETH_DBF_TEXT(TRACE, 2, "L2sdmac"); | 476 | QETH_CARD_TEXT(card, 2, "L2sdmac"); |
478 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 477 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
479 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 478 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
480 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; | 479 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; |
@@ -488,10 +487,10 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, | |||
488 | { | 487 | { |
489 | struct qeth_ipa_cmd *cmd; | 488 | struct qeth_ipa_cmd *cmd; |
490 | 489 | ||
491 | QETH_DBF_TEXT(TRACE, 2, "L2Smaccb"); | 490 | QETH_CARD_TEXT(card, 2, "L2Smaccb"); |
492 | cmd = (struct qeth_ipa_cmd *) data; | 491 | cmd = (struct qeth_ipa_cmd *) data; |
493 | if (cmd->hdr.return_code) { | 492 | if (cmd->hdr.return_code) { |
494 | QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code); | 493 | QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); |
495 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | 494 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; |
496 | switch (cmd->hdr.return_code) { | 495 | switch (cmd->hdr.return_code) { |
497 | case IPA_RC_L2_DUP_MAC: | 496 | case IPA_RC_L2_DUP_MAC: |
@@ -523,7 +522,7 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, | |||
523 | 522 | ||
524 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) | 523 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) |
525 | { | 524 | { |
526 | QETH_DBF_TEXT(TRACE, 2, "L2Setmac"); | 525 | QETH_CARD_TEXT(card, 2, "L2Setmac"); |
527 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | 526 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, |
528 | qeth_l2_send_setmac_cb); | 527 | qeth_l2_send_setmac_cb); |
529 | } | 528 | } |
@@ -534,10 +533,10 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card, | |||
534 | { | 533 | { |
535 | struct qeth_ipa_cmd *cmd; | 534 | struct qeth_ipa_cmd *cmd; |
536 | 535 | ||
537 | QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb"); | 536 | QETH_CARD_TEXT(card, 2, "L2Dmaccb"); |
538 | cmd = (struct qeth_ipa_cmd *) data; | 537 | cmd = (struct qeth_ipa_cmd *) data; |
539 | if (cmd->hdr.return_code) { | 538 | if (cmd->hdr.return_code) { |
540 | QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code); | 539 | QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); |
541 | cmd->hdr.return_code = -EIO; | 540 | cmd->hdr.return_code = -EIO; |
542 | return 0; | 541 | return 0; |
543 | } | 542 | } |
@@ -548,7 +547,7 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card, | |||
548 | 547 | ||
549 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) | 548 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) |
550 | { | 549 | { |
551 | QETH_DBF_TEXT(TRACE, 2, "L2Delmac"); | 550 | QETH_CARD_TEXT(card, 2, "L2Delmac"); |
552 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) | 551 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) |
553 | return 0; | 552 | return 0; |
554 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, | 553 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, |
@@ -594,23 +593,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
594 | struct qeth_card *card = dev->ml_priv; | 593 | struct qeth_card *card = dev->ml_priv; |
595 | int rc = 0; | 594 | int rc = 0; |
596 | 595 | ||
597 | QETH_DBF_TEXT(TRACE, 3, "setmac"); | 596 | QETH_CARD_TEXT(card, 3, "setmac"); |
598 | 597 | ||
599 | if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) { | 598 | if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) { |
600 | QETH_DBF_TEXT(TRACE, 3, "setmcINV"); | 599 | QETH_CARD_TEXT(card, 3, "setmcINV"); |
601 | return -EOPNOTSUPP; | 600 | return -EOPNOTSUPP; |
602 | } | 601 | } |
603 | 602 | ||
604 | if (card->info.type == QETH_CARD_TYPE_OSN || | 603 | if (card->info.type == QETH_CARD_TYPE_OSN || |
605 | card->info.type == QETH_CARD_TYPE_OSM || | 604 | card->info.type == QETH_CARD_TYPE_OSM || |
606 | card->info.type == QETH_CARD_TYPE_OSX) { | 605 | card->info.type == QETH_CARD_TYPE_OSX) { |
607 | QETH_DBF_TEXT(TRACE, 3, "setmcTYP"); | 606 | QETH_CARD_TEXT(card, 3, "setmcTYP"); |
608 | return -EOPNOTSUPP; | 607 | return -EOPNOTSUPP; |
609 | } | 608 | } |
610 | QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card)); | 609 | QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN); |
611 | QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN); | ||
612 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { | 610 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { |
613 | QETH_DBF_TEXT(TRACE, 3, "setmcREC"); | 611 | QETH_CARD_TEXT(card, 3, "setmcREC"); |
614 | return -ERESTARTSYS; | 612 | return -ERESTARTSYS; |
615 | } | 613 | } |
616 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); | 614 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); |
@@ -627,7 +625,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) | |||
627 | if (card->info.type == QETH_CARD_TYPE_OSN) | 625 | if (card->info.type == QETH_CARD_TYPE_OSN) |
628 | return ; | 626 | return ; |
629 | 627 | ||
630 | QETH_DBF_TEXT(TRACE, 3, "setmulti"); | 628 | QETH_CARD_TEXT(card, 3, "setmulti"); |
631 | if (qeth_threads_running(card, QETH_RECOVER_THREAD) && | 629 | if (qeth_threads_running(card, QETH_RECOVER_THREAD) && |
632 | (card->state != CARD_STATE_UP)) | 630 | (card->state != CARD_STATE_UP)) |
633 | return; | 631 | return; |
@@ -714,10 +712,13 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
714 | goto tx_drop; | 712 | goto tx_drop; |
715 | } | 713 | } |
716 | 714 | ||
717 | if (card->info.type != QETH_CARD_TYPE_IQD) | 715 | if (card->info.type != QETH_CARD_TYPE_IQD) { |
716 | if (qeth_hdr_chk_and_bounce(new_skb, | ||
717 | sizeof(struct qeth_hdr_layer2))) | ||
718 | goto tx_drop; | ||
718 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, | 719 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, |
719 | elements); | 720 | elements); |
720 | else | 721 | } else |
721 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, | 722 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, |
722 | elements, data_offset, hd_len); | 723 | elements, data_offset, hd_len); |
723 | if (!rc) { | 724 | if (!rc) { |
@@ -771,11 +772,10 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, | |||
771 | card->perf_stats.inbound_start_time = qeth_get_micros(); | 772 | card->perf_stats.inbound_start_time = qeth_get_micros(); |
772 | } | 773 | } |
773 | if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { | 774 | if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { |
774 | QETH_DBF_TEXT(TRACE, 1, "qdinchk"); | 775 | QETH_CARD_TEXT(card, 1, "qdinchk"); |
775 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); | 776 | QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element, |
776 | QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, | ||
777 | count); | 777 | count); |
778 | QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); | 778 | QETH_CARD_TEXT_(card, 1, "%04X", queue); |
779 | qeth_schedule_recovery(card); | 779 | qeth_schedule_recovery(card); |
780 | return; | 780 | return; |
781 | } | 781 | } |
@@ -799,13 +799,13 @@ static int qeth_l2_open(struct net_device *dev) | |||
799 | { | 799 | { |
800 | struct qeth_card *card = dev->ml_priv; | 800 | struct qeth_card *card = dev->ml_priv; |
801 | 801 | ||
802 | QETH_DBF_TEXT(TRACE, 4, "qethopen"); | 802 | QETH_CARD_TEXT(card, 4, "qethopen"); |
803 | if (card->state != CARD_STATE_SOFTSETUP) | 803 | if (card->state != CARD_STATE_SOFTSETUP) |
804 | return -ENODEV; | 804 | return -ENODEV; |
805 | 805 | ||
806 | if ((card->info.type != QETH_CARD_TYPE_OSN) && | 806 | if ((card->info.type != QETH_CARD_TYPE_OSN) && |
807 | (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) { | 807 | (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) { |
808 | QETH_DBF_TEXT(TRACE, 4, "nomacadr"); | 808 | QETH_CARD_TEXT(card, 4, "nomacadr"); |
809 | return -EPERM; | 809 | return -EPERM; |
810 | } | 810 | } |
811 | card->data.state = CH_STATE_UP; | 811 | card->data.state = CH_STATE_UP; |
@@ -822,7 +822,7 @@ static int qeth_l2_stop(struct net_device *dev) | |||
822 | { | 822 | { |
823 | struct qeth_card *card = dev->ml_priv; | 823 | struct qeth_card *card = dev->ml_priv; |
824 | 824 | ||
825 | QETH_DBF_TEXT(TRACE, 4, "qethstop"); | 825 | QETH_CARD_TEXT(card, 4, "qethstop"); |
826 | netif_tx_disable(dev); | 826 | netif_tx_disable(dev); |
827 | if (card->state == CARD_STATE_UP) | 827 | if (card->state == CARD_STATE_UP) |
828 | card->state = CARD_STATE_SOFTSETUP; | 828 | card->state = CARD_STATE_SOFTSETUP; |
@@ -860,8 +860,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
860 | unregister_netdev(card->dev); | 860 | unregister_netdev(card->dev); |
861 | card->dev = NULL; | 861 | card->dev = NULL; |
862 | } | 862 | } |
863 | |||
864 | qeth_l2_del_all_mc(card); | ||
865 | return; | 863 | return; |
866 | } | 864 | } |
867 | 865 | ||
@@ -935,6 +933,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
935 | enum qeth_card_states recover_flag; | 933 | enum qeth_card_states recover_flag; |
936 | 934 | ||
937 | BUG_ON(!card); | 935 | BUG_ON(!card); |
936 | mutex_lock(&card->discipline_mutex); | ||
938 | mutex_lock(&card->conf_mutex); | 937 | mutex_lock(&card->conf_mutex); |
939 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); | 938 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); |
940 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); | 939 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
@@ -1012,6 +1011,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1012 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 1011 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
1013 | out: | 1012 | out: |
1014 | mutex_unlock(&card->conf_mutex); | 1013 | mutex_unlock(&card->conf_mutex); |
1014 | mutex_unlock(&card->discipline_mutex); | ||
1015 | return 0; | 1015 | return 0; |
1016 | 1016 | ||
1017 | out_remove: | 1017 | out_remove: |
@@ -1025,6 +1025,7 @@ out_remove: | |||
1025 | else | 1025 | else |
1026 | card->state = CARD_STATE_DOWN; | 1026 | card->state = CARD_STATE_DOWN; |
1027 | mutex_unlock(&card->conf_mutex); | 1027 | mutex_unlock(&card->conf_mutex); |
1028 | mutex_unlock(&card->discipline_mutex); | ||
1028 | return rc; | 1029 | return rc; |
1029 | } | 1030 | } |
1030 | 1031 | ||
@@ -1040,6 +1041,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, | |||
1040 | int rc = 0, rc2 = 0, rc3 = 0; | 1041 | int rc = 0, rc2 = 0, rc3 = 0; |
1041 | enum qeth_card_states recover_flag; | 1042 | enum qeth_card_states recover_flag; |
1042 | 1043 | ||
1044 | mutex_lock(&card->discipline_mutex); | ||
1043 | mutex_lock(&card->conf_mutex); | 1045 | mutex_lock(&card->conf_mutex); |
1044 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); | 1046 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); |
1045 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); | 1047 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); |
@@ -1060,6 +1062,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, | |||
1060 | /* let user_space know that device is offline */ | 1062 | /* let user_space know that device is offline */ |
1061 | kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); | 1063 | kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); |
1062 | mutex_unlock(&card->conf_mutex); | 1064 | mutex_unlock(&card->conf_mutex); |
1065 | mutex_unlock(&card->discipline_mutex); | ||
1063 | return 0; | 1066 | return 0; |
1064 | } | 1067 | } |
1065 | 1068 | ||
@@ -1074,11 +1077,10 @@ static int qeth_l2_recover(void *ptr) | |||
1074 | int rc = 0; | 1077 | int rc = 0; |
1075 | 1078 | ||
1076 | card = (struct qeth_card *) ptr; | 1079 | card = (struct qeth_card *) ptr; |
1077 | QETH_DBF_TEXT(TRACE, 2, "recover1"); | 1080 | QETH_CARD_TEXT(card, 2, "recover1"); |
1078 | QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); | ||
1079 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 1081 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
1080 | return 0; | 1082 | return 0; |
1081 | QETH_DBF_TEXT(TRACE, 2, "recover2"); | 1083 | QETH_CARD_TEXT(card, 2, "recover2"); |
1082 | dev_warn(&card->gdev->dev, | 1084 | dev_warn(&card->gdev->dev, |
1083 | "A recovery process has been started for the device\n"); | 1085 | "A recovery process has been started for the device\n"); |
1084 | card->use_hard_stop = 1; | 1086 | card->use_hard_stop = 1; |
@@ -1181,12 +1183,12 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, | |||
1181 | unsigned long flags; | 1183 | unsigned long flags; |
1182 | int rc = 0; | 1184 | int rc = 0; |
1183 | 1185 | ||
1184 | QETH_DBF_TEXT(TRACE, 5, "osndctrd"); | 1186 | QETH_CARD_TEXT(card, 5, "osndctrd"); |
1185 | 1187 | ||
1186 | wait_event(card->wait_q, | 1188 | wait_event(card->wait_q, |
1187 | atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); | 1189 | atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); |
1188 | qeth_prepare_control_data(card, len, iob); | 1190 | qeth_prepare_control_data(card, len, iob); |
1189 | QETH_DBF_TEXT(TRACE, 6, "osnoirqp"); | 1191 | QETH_CARD_TEXT(card, 6, "osnoirqp"); |
1190 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); | 1192 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); |
1191 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, | 1193 | rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, |
1192 | (addr_t) iob, 0, 0); | 1194 | (addr_t) iob, 0, 0); |
@@ -1194,7 +1196,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, | |||
1194 | if (rc) { | 1196 | if (rc) { |
1195 | QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " | 1197 | QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " |
1196 | "ccw_device_start rc = %i\n", rc); | 1198 | "ccw_device_start rc = %i\n", rc); |
1197 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); | 1199 | QETH_CARD_TEXT_(card, 2, " err%d", rc); |
1198 | qeth_release_buffer(iob->channel, iob); | 1200 | qeth_release_buffer(iob->channel, iob); |
1199 | atomic_set(&card->write.irq_pending, 0); | 1201 | atomic_set(&card->write.irq_pending, 0); |
1200 | wake_up(&card->wait_q); | 1202 | wake_up(&card->wait_q); |
@@ -1207,7 +1209,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card, | |||
1207 | { | 1209 | { |
1208 | u16 s1, s2; | 1210 | u16 s1, s2; |
1209 | 1211 | ||
1210 | QETH_DBF_TEXT(TRACE, 4, "osndipa"); | 1212 | QETH_CARD_TEXT(card, 4, "osndipa"); |
1211 | 1213 | ||
1212 | qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2); | 1214 | qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2); |
1213 | s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); | 1215 | s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); |
@@ -1225,12 +1227,12 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len) | |||
1225 | struct qeth_card *card; | 1227 | struct qeth_card *card; |
1226 | int rc; | 1228 | int rc; |
1227 | 1229 | ||
1228 | QETH_DBF_TEXT(TRACE, 2, "osnsdmc"); | ||
1229 | if (!dev) | 1230 | if (!dev) |
1230 | return -ENODEV; | 1231 | return -ENODEV; |
1231 | card = dev->ml_priv; | 1232 | card = dev->ml_priv; |
1232 | if (!card) | 1233 | if (!card) |
1233 | return -ENODEV; | 1234 | return -ENODEV; |
1235 | QETH_CARD_TEXT(card, 2, "osnsdmc"); | ||
1234 | if ((card->state != CARD_STATE_UP) && | 1236 | if ((card->state != CARD_STATE_UP) && |
1235 | (card->state != CARD_STATE_SOFTSETUP)) | 1237 | (card->state != CARD_STATE_SOFTSETUP)) |
1236 | return -ENODEV; | 1238 | return -ENODEV; |
@@ -1247,13 +1249,13 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev, | |||
1247 | { | 1249 | { |
1248 | struct qeth_card *card; | 1250 | struct qeth_card *card; |
1249 | 1251 | ||
1250 | QETH_DBF_TEXT(TRACE, 2, "osnreg"); | ||
1251 | *dev = qeth_l2_netdev_by_devno(read_dev_no); | 1252 | *dev = qeth_l2_netdev_by_devno(read_dev_no); |
1252 | if (*dev == NULL) | 1253 | if (*dev == NULL) |
1253 | return -ENODEV; | 1254 | return -ENODEV; |
1254 | card = (*dev)->ml_priv; | 1255 | card = (*dev)->ml_priv; |
1255 | if (!card) | 1256 | if (!card) |
1256 | return -ENODEV; | 1257 | return -ENODEV; |
1258 | QETH_CARD_TEXT(card, 2, "osnreg"); | ||
1257 | if ((assist_cb == NULL) || (data_cb == NULL)) | 1259 | if ((assist_cb == NULL) || (data_cb == NULL)) |
1258 | return -EINVAL; | 1260 | return -EINVAL; |
1259 | card->osn_info.assist_cb = assist_cb; | 1261 | card->osn_info.assist_cb = assist_cb; |
@@ -1266,12 +1268,12 @@ void qeth_osn_deregister(struct net_device *dev) | |||
1266 | { | 1268 | { |
1267 | struct qeth_card *card; | 1269 | struct qeth_card *card; |
1268 | 1270 | ||
1269 | QETH_DBF_TEXT(TRACE, 2, "osndereg"); | ||
1270 | if (!dev) | 1271 | if (!dev) |
1271 | return; | 1272 | return; |
1272 | card = dev->ml_priv; | 1273 | card = dev->ml_priv; |
1273 | if (!card) | 1274 | if (!card) |
1274 | return; | 1275 | return; |
1276 | QETH_CARD_TEXT(card, 2, "osndereg"); | ||
1275 | card->osn_info.assist_cb = NULL; | 1277 | card->osn_info.assist_cb = NULL; |
1276 | card->osn_info.data_cb = NULL; | 1278 | card->osn_info.data_cb = NULL; |
1277 | return; | 1279 | return; |
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 8447d233d0b3..e705b27ec7dc 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h | |||
@@ -64,5 +64,6 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, | |||
64 | const u8 *); | 64 | const u8 *); |
65 | int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types); | 65 | int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types); |
66 | int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types); | 66 | int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types); |
67 | int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); | ||
67 | 68 | ||
68 | #endif /* __QETH_L3_H__ */ | 69 | #endif /* __QETH_L3_H__ */ |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 61adae21a464..e22ae248f613 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -195,7 +195,7 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) | |||
195 | } | 195 | } |
196 | } | 196 | } |
197 | 197 | ||
198 | static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, | 198 | int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, |
199 | struct qeth_ipaddr *addr) | 199 | struct qeth_ipaddr *addr) |
200 | { | 200 | { |
201 | struct qeth_ipato_entry *ipatoe; | 201 | struct qeth_ipato_entry *ipatoe; |
@@ -287,7 +287,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card, | |||
287 | addr->users += add ? 1 : -1; | 287 | addr->users += add ? 1 : -1; |
288 | if (add && (addr->type == QETH_IP_TYPE_NORMAL) && | 288 | if (add && (addr->type == QETH_IP_TYPE_NORMAL) && |
289 | qeth_l3_is_addr_covered_by_ipato(card, addr)) { | 289 | qeth_l3_is_addr_covered_by_ipato(card, addr)) { |
290 | QETH_DBF_TEXT(TRACE, 2, "tkovaddr"); | 290 | QETH_CARD_TEXT(card, 2, "tkovaddr"); |
291 | addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; | 291 | addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; |
292 | } | 292 | } |
293 | list_add_tail(&addr->entry, card->ip_tbd_list); | 293 | list_add_tail(&addr->entry, card->ip_tbd_list); |
@@ -301,13 +301,13 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) | |||
301 | unsigned long flags; | 301 | unsigned long flags; |
302 | int rc = 0; | 302 | int rc = 0; |
303 | 303 | ||
304 | QETH_DBF_TEXT(TRACE, 4, "delip"); | 304 | QETH_CARD_TEXT(card, 4, "delip"); |
305 | 305 | ||
306 | if (addr->proto == QETH_PROT_IPV4) | 306 | if (addr->proto == QETH_PROT_IPV4) |
307 | QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); | 307 | QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); |
308 | else { | 308 | else { |
309 | QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); | 309 | QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); |
310 | QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); | 310 | QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); |
311 | } | 311 | } |
312 | spin_lock_irqsave(&card->ip_lock, flags); | 312 | spin_lock_irqsave(&card->ip_lock, flags); |
313 | rc = __qeth_l3_insert_ip_todo(card, addr, 0); | 313 | rc = __qeth_l3_insert_ip_todo(card, addr, 0); |
@@ -320,12 +320,12 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) | |||
320 | unsigned long flags; | 320 | unsigned long flags; |
321 | int rc = 0; | 321 | int rc = 0; |
322 | 322 | ||
323 | QETH_DBF_TEXT(TRACE, 4, "addip"); | 323 | QETH_CARD_TEXT(card, 4, "addip"); |
324 | if (addr->proto == QETH_PROT_IPV4) | 324 | if (addr->proto == QETH_PROT_IPV4) |
325 | QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); | 325 | QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); |
326 | else { | 326 | else { |
327 | QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); | 327 | QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); |
328 | QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); | 328 | QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); |
329 | } | 329 | } |
330 | spin_lock_irqsave(&card->ip_lock, flags); | 330 | spin_lock_irqsave(&card->ip_lock, flags); |
331 | rc = __qeth_l3_insert_ip_todo(card, addr, 1); | 331 | rc = __qeth_l3_insert_ip_todo(card, addr, 1); |
@@ -353,10 +353,10 @@ static void qeth_l3_delete_mc_addresses(struct qeth_card *card) | |||
353 | struct qeth_ipaddr *iptodo; | 353 | struct qeth_ipaddr *iptodo; |
354 | unsigned long flags; | 354 | unsigned long flags; |
355 | 355 | ||
356 | QETH_DBF_TEXT(TRACE, 4, "delmc"); | 356 | QETH_CARD_TEXT(card, 4, "delmc"); |
357 | iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); | 357 | iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); |
358 | if (!iptodo) { | 358 | if (!iptodo) { |
359 | QETH_DBF_TEXT(TRACE, 2, "dmcnomem"); | 359 | QETH_CARD_TEXT(card, 2, "dmcnomem"); |
360 | return; | 360 | return; |
361 | } | 361 | } |
362 | iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; | 362 | iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; |
@@ -457,8 +457,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) | |||
457 | unsigned long flags; | 457 | unsigned long flags; |
458 | int rc; | 458 | int rc; |
459 | 459 | ||
460 | QETH_DBF_TEXT(TRACE, 2, "sdiplist"); | 460 | QETH_CARD_TEXT(card, 2, "sdiplist"); |
461 | QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); | 461 | QETH_CARD_HEX(card, 2, &card, sizeof(void *)); |
462 | 462 | ||
463 | if (card->options.sniffer) | 463 | if (card->options.sniffer) |
464 | return; | 464 | return; |
@@ -466,7 +466,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) | |||
466 | tbd_list = card->ip_tbd_list; | 466 | tbd_list = card->ip_tbd_list; |
467 | card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); | 467 | card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); |
468 | if (!card->ip_tbd_list) { | 468 | if (!card->ip_tbd_list) { |
469 | QETH_DBF_TEXT(TRACE, 0, "silnomem"); | 469 | QETH_CARD_TEXT(card, 0, "silnomem"); |
470 | card->ip_tbd_list = tbd_list; | 470 | card->ip_tbd_list = tbd_list; |
471 | spin_unlock_irqrestore(&card->ip_lock, flags); | 471 | spin_unlock_irqrestore(&card->ip_lock, flags); |
472 | return; | 472 | return; |
@@ -517,7 +517,7 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, | |||
517 | struct qeth_ipaddr *addr, *tmp; | 517 | struct qeth_ipaddr *addr, *tmp; |
518 | unsigned long flags; | 518 | unsigned long flags; |
519 | 519 | ||
520 | QETH_DBF_TEXT(TRACE, 4, "clearip"); | 520 | QETH_CARD_TEXT(card, 4, "clearip"); |
521 | if (recover && card->options.sniffer) | 521 | if (recover && card->options.sniffer) |
522 | return; | 522 | return; |
523 | spin_lock_irqsave(&card->ip_lock, flags); | 523 | spin_lock_irqsave(&card->ip_lock, flags); |
@@ -577,7 +577,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, | |||
577 | struct qeth_cmd_buffer *iob; | 577 | struct qeth_cmd_buffer *iob; |
578 | struct qeth_ipa_cmd *cmd; | 578 | struct qeth_ipa_cmd *cmd; |
579 | 579 | ||
580 | QETH_DBF_TEXT(TRACE, 4, "setdelmc"); | 580 | QETH_CARD_TEXT(card, 4, "setdelmc"); |
581 | 581 | ||
582 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 582 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
583 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 583 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
@@ -615,8 +615,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, | |||
615 | struct qeth_ipa_cmd *cmd; | 615 | struct qeth_ipa_cmd *cmd; |
616 | __u8 netmask[16]; | 616 | __u8 netmask[16]; |
617 | 617 | ||
618 | QETH_DBF_TEXT(TRACE, 4, "setdelip"); | 618 | QETH_CARD_TEXT(card, 4, "setdelip"); |
619 | QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags); | 619 | QETH_CARD_TEXT_(card, 4, "flags%02X", flags); |
620 | 620 | ||
621 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 621 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
622 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 622 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
@@ -645,7 +645,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
645 | struct qeth_ipa_cmd *cmd; | 645 | struct qeth_ipa_cmd *cmd; |
646 | struct qeth_cmd_buffer *iob; | 646 | struct qeth_cmd_buffer *iob; |
647 | 647 | ||
648 | QETH_DBF_TEXT(TRACE, 4, "setroutg"); | 648 | QETH_CARD_TEXT(card, 4, "setroutg"); |
649 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); | 649 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); |
650 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 650 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
651 | cmd->data.setrtg.type = (type); | 651 | cmd->data.setrtg.type = (type); |
@@ -689,7 +689,7 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) | |||
689 | { | 689 | { |
690 | int rc; | 690 | int rc; |
691 | 691 | ||
692 | QETH_DBF_TEXT(TRACE, 3, "setrtg4"); | 692 | QETH_CARD_TEXT(card, 3, "setrtg4"); |
693 | 693 | ||
694 | qeth_l3_correct_routing_type(card, &card->options.route4.type, | 694 | qeth_l3_correct_routing_type(card, &card->options.route4.type, |
695 | QETH_PROT_IPV4); | 695 | QETH_PROT_IPV4); |
@@ -709,7 +709,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
709 | { | 709 | { |
710 | int rc = 0; | 710 | int rc = 0; |
711 | 711 | ||
712 | QETH_DBF_TEXT(TRACE, 3, "setrtg6"); | 712 | QETH_CARD_TEXT(card, 3, "setrtg6"); |
713 | #ifdef CONFIG_QETH_IPV6 | 713 | #ifdef CONFIG_QETH_IPV6 |
714 | 714 | ||
715 | if (!qeth_is_supported(card, IPA_IPV6)) | 715 | if (!qeth_is_supported(card, IPA_IPV6)) |
@@ -753,7 +753,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, | |||
753 | unsigned long flags; | 753 | unsigned long flags; |
754 | int rc = 0; | 754 | int rc = 0; |
755 | 755 | ||
756 | QETH_DBF_TEXT(TRACE, 2, "addipato"); | 756 | QETH_CARD_TEXT(card, 2, "addipato"); |
757 | spin_lock_irqsave(&card->ip_lock, flags); | 757 | spin_lock_irqsave(&card->ip_lock, flags); |
758 | list_for_each_entry(ipatoe, &card->ipato.entries, entry) { | 758 | list_for_each_entry(ipatoe, &card->ipato.entries, entry) { |
759 | if (ipatoe->proto != new->proto) | 759 | if (ipatoe->proto != new->proto) |
@@ -778,7 +778,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, | |||
778 | struct qeth_ipato_entry *ipatoe, *tmp; | 778 | struct qeth_ipato_entry *ipatoe, *tmp; |
779 | unsigned long flags; | 779 | unsigned long flags; |
780 | 780 | ||
781 | QETH_DBF_TEXT(TRACE, 2, "delipato"); | 781 | QETH_CARD_TEXT(card, 2, "delipato"); |
782 | spin_lock_irqsave(&card->ip_lock, flags); | 782 | spin_lock_irqsave(&card->ip_lock, flags); |
783 | list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { | 783 | list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { |
784 | if (ipatoe->proto != proto) | 784 | if (ipatoe->proto != proto) |
@@ -806,11 +806,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
806 | ipaddr = qeth_l3_get_addr_buffer(proto); | 806 | ipaddr = qeth_l3_get_addr_buffer(proto); |
807 | if (ipaddr) { | 807 | if (ipaddr) { |
808 | if (proto == QETH_PROT_IPV4) { | 808 | if (proto == QETH_PROT_IPV4) { |
809 | QETH_DBF_TEXT(TRACE, 2, "addvipa4"); | 809 | QETH_CARD_TEXT(card, 2, "addvipa4"); |
810 | memcpy(&ipaddr->u.a4.addr, addr, 4); | 810 | memcpy(&ipaddr->u.a4.addr, addr, 4); |
811 | ipaddr->u.a4.mask = 0; | 811 | ipaddr->u.a4.mask = 0; |
812 | } else if (proto == QETH_PROT_IPV6) { | 812 | } else if (proto == QETH_PROT_IPV6) { |
813 | QETH_DBF_TEXT(TRACE, 2, "addvipa6"); | 813 | QETH_CARD_TEXT(card, 2, "addvipa6"); |
814 | memcpy(&ipaddr->u.a6.addr, addr, 16); | 814 | memcpy(&ipaddr->u.a6.addr, addr, 16); |
815 | ipaddr->u.a6.pfxlen = 0; | 815 | ipaddr->u.a6.pfxlen = 0; |
816 | } | 816 | } |
@@ -841,11 +841,11 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
841 | ipaddr = qeth_l3_get_addr_buffer(proto); | 841 | ipaddr = qeth_l3_get_addr_buffer(proto); |
842 | if (ipaddr) { | 842 | if (ipaddr) { |
843 | if (proto == QETH_PROT_IPV4) { | 843 | if (proto == QETH_PROT_IPV4) { |
844 | QETH_DBF_TEXT(TRACE, 2, "delvipa4"); | 844 | QETH_CARD_TEXT(card, 2, "delvipa4"); |
845 | memcpy(&ipaddr->u.a4.addr, addr, 4); | 845 | memcpy(&ipaddr->u.a4.addr, addr, 4); |
846 | ipaddr->u.a4.mask = 0; | 846 | ipaddr->u.a4.mask = 0; |
847 | } else if (proto == QETH_PROT_IPV6) { | 847 | } else if (proto == QETH_PROT_IPV6) { |
848 | QETH_DBF_TEXT(TRACE, 2, "delvipa6"); | 848 | QETH_CARD_TEXT(card, 2, "delvipa6"); |
849 | memcpy(&ipaddr->u.a6.addr, addr, 16); | 849 | memcpy(&ipaddr->u.a6.addr, addr, 16); |
850 | ipaddr->u.a6.pfxlen = 0; | 850 | ipaddr->u.a6.pfxlen = 0; |
851 | } | 851 | } |
@@ -870,11 +870,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
870 | ipaddr = qeth_l3_get_addr_buffer(proto); | 870 | ipaddr = qeth_l3_get_addr_buffer(proto); |
871 | if (ipaddr) { | 871 | if (ipaddr) { |
872 | if (proto == QETH_PROT_IPV4) { | 872 | if (proto == QETH_PROT_IPV4) { |
873 | QETH_DBF_TEXT(TRACE, 2, "addrxip4"); | 873 | QETH_CARD_TEXT(card, 2, "addrxip4"); |
874 | memcpy(&ipaddr->u.a4.addr, addr, 4); | 874 | memcpy(&ipaddr->u.a4.addr, addr, 4); |
875 | ipaddr->u.a4.mask = 0; | 875 | ipaddr->u.a4.mask = 0; |
876 | } else if (proto == QETH_PROT_IPV6) { | 876 | } else if (proto == QETH_PROT_IPV6) { |
877 | QETH_DBF_TEXT(TRACE, 2, "addrxip6"); | 877 | QETH_CARD_TEXT(card, 2, "addrxip6"); |
878 | memcpy(&ipaddr->u.a6.addr, addr, 16); | 878 | memcpy(&ipaddr->u.a6.addr, addr, 16); |
879 | ipaddr->u.a6.pfxlen = 0; | 879 | ipaddr->u.a6.pfxlen = 0; |
880 | } | 880 | } |
@@ -905,11 +905,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
905 | ipaddr = qeth_l3_get_addr_buffer(proto); | 905 | ipaddr = qeth_l3_get_addr_buffer(proto); |
906 | if (ipaddr) { | 906 | if (ipaddr) { |
907 | if (proto == QETH_PROT_IPV4) { | 907 | if (proto == QETH_PROT_IPV4) { |
908 | QETH_DBF_TEXT(TRACE, 2, "addrxip4"); | 908 | QETH_CARD_TEXT(card, 2, "addrxip4"); |
909 | memcpy(&ipaddr->u.a4.addr, addr, 4); | 909 | memcpy(&ipaddr->u.a4.addr, addr, 4); |
910 | ipaddr->u.a4.mask = 0; | 910 | ipaddr->u.a4.mask = 0; |
911 | } else if (proto == QETH_PROT_IPV6) { | 911 | } else if (proto == QETH_PROT_IPV6) { |
912 | QETH_DBF_TEXT(TRACE, 2, "addrxip6"); | 912 | QETH_CARD_TEXT(card, 2, "addrxip6"); |
913 | memcpy(&ipaddr->u.a6.addr, addr, 16); | 913 | memcpy(&ipaddr->u.a6.addr, addr, 16); |
914 | ipaddr->u.a6.pfxlen = 0; | 914 | ipaddr->u.a6.pfxlen = 0; |
915 | } | 915 | } |
@@ -929,15 +929,15 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, | |||
929 | int cnt = 3; | 929 | int cnt = 3; |
930 | 930 | ||
931 | if (addr->proto == QETH_PROT_IPV4) { | 931 | if (addr->proto == QETH_PROT_IPV4) { |
932 | QETH_DBF_TEXT(TRACE, 2, "setaddr4"); | 932 | QETH_CARD_TEXT(card, 2, "setaddr4"); |
933 | QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); | 933 | QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); |
934 | } else if (addr->proto == QETH_PROT_IPV6) { | 934 | } else if (addr->proto == QETH_PROT_IPV6) { |
935 | QETH_DBF_TEXT(TRACE, 2, "setaddr6"); | 935 | QETH_CARD_TEXT(card, 2, "setaddr6"); |
936 | QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); | 936 | QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); |
937 | QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); | 937 | QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); |
938 | } else { | 938 | } else { |
939 | QETH_DBF_TEXT(TRACE, 2, "setaddr?"); | 939 | QETH_CARD_TEXT(card, 2, "setaddr?"); |
940 | QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); | 940 | QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); |
941 | } | 941 | } |
942 | do { | 942 | do { |
943 | if (addr->is_multicast) | 943 | if (addr->is_multicast) |
@@ -946,10 +946,10 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, | |||
946 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP, | 946 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP, |
947 | addr->set_flags); | 947 | addr->set_flags); |
948 | if (rc) | 948 | if (rc) |
949 | QETH_DBF_TEXT(TRACE, 2, "failed"); | 949 | QETH_CARD_TEXT(card, 2, "failed"); |
950 | } while ((--cnt > 0) && rc); | 950 | } while ((--cnt > 0) && rc); |
951 | if (rc) { | 951 | if (rc) { |
952 | QETH_DBF_TEXT(TRACE, 2, "FAILED"); | 952 | QETH_CARD_TEXT(card, 2, "FAILED"); |
953 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); | 953 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); |
954 | dev_warn(&card->gdev->dev, | 954 | dev_warn(&card->gdev->dev, |
955 | "Registering IP address %s failed\n", buf); | 955 | "Registering IP address %s failed\n", buf); |
@@ -963,15 +963,15 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, | |||
963 | int rc = 0; | 963 | int rc = 0; |
964 | 964 | ||
965 | if (addr->proto == QETH_PROT_IPV4) { | 965 | if (addr->proto == QETH_PROT_IPV4) { |
966 | QETH_DBF_TEXT(TRACE, 2, "deladdr4"); | 966 | QETH_CARD_TEXT(card, 2, "deladdr4"); |
967 | QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); | 967 | QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); |
968 | } else if (addr->proto == QETH_PROT_IPV6) { | 968 | } else if (addr->proto == QETH_PROT_IPV6) { |
969 | QETH_DBF_TEXT(TRACE, 2, "deladdr6"); | 969 | QETH_CARD_TEXT(card, 2, "deladdr6"); |
970 | QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); | 970 | QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); |
971 | QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); | 971 | QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); |
972 | } else { | 972 | } else { |
973 | QETH_DBF_TEXT(TRACE, 2, "deladdr?"); | 973 | QETH_CARD_TEXT(card, 2, "deladdr?"); |
974 | QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); | 974 | QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); |
975 | } | 975 | } |
976 | if (addr->is_multicast) | 976 | if (addr->is_multicast) |
977 | rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); | 977 | rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); |
@@ -979,7 +979,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, | |||
979 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, | 979 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, |
980 | addr->del_flags); | 980 | addr->del_flags); |
981 | if (rc) | 981 | if (rc) |
982 | QETH_DBF_TEXT(TRACE, 2, "failed"); | 982 | QETH_CARD_TEXT(card, 2, "failed"); |
983 | 983 | ||
984 | return rc; | 984 | return rc; |
985 | } | 985 | } |
@@ -1012,7 +1012,7 @@ static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command, | |||
1012 | struct qeth_cmd_buffer *iob; | 1012 | struct qeth_cmd_buffer *iob; |
1013 | struct qeth_ipa_cmd *cmd; | 1013 | struct qeth_ipa_cmd *cmd; |
1014 | 1014 | ||
1015 | QETH_DBF_TEXT(TRACE, 4, "adpmode"); | 1015 | QETH_CARD_TEXT(card, 4, "adpmode"); |
1016 | 1016 | ||
1017 | iob = qeth_get_adapter_cmd(card, command, | 1017 | iob = qeth_get_adapter_cmd(card, command, |
1018 | sizeof(struct qeth_ipacmd_setadpparms)); | 1018 | sizeof(struct qeth_ipacmd_setadpparms)); |
@@ -1027,7 +1027,7 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card) | |||
1027 | { | 1027 | { |
1028 | int rc; | 1028 | int rc; |
1029 | 1029 | ||
1030 | QETH_DBF_TEXT(TRACE, 4, "adphstr"); | 1030 | QETH_CARD_TEXT(card, 4, "adphstr"); |
1031 | 1031 | ||
1032 | if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { | 1032 | if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { |
1033 | rc = qeth_l3_send_setadp_mode(card, | 1033 | rc = qeth_l3_send_setadp_mode(card, |
@@ -1093,7 +1093,7 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, | |||
1093 | { | 1093 | { |
1094 | struct qeth_ipa_cmd *cmd; | 1094 | struct qeth_ipa_cmd *cmd; |
1095 | 1095 | ||
1096 | QETH_DBF_TEXT(TRACE, 4, "defadpcb"); | 1096 | QETH_CARD_TEXT(card, 4, "defadpcb"); |
1097 | 1097 | ||
1098 | cmd = (struct qeth_ipa_cmd *) data; | 1098 | cmd = (struct qeth_ipa_cmd *) data; |
1099 | if (cmd->hdr.return_code == 0) { | 1099 | if (cmd->hdr.return_code == 0) { |
@@ -1106,13 +1106,13 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, | |||
1106 | if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && | 1106 | if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && |
1107 | cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { | 1107 | cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { |
1108 | card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; | 1108 | card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; |
1109 | QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask); | 1109 | QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask); |
1110 | } | 1110 | } |
1111 | if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM && | 1111 | if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM && |
1112 | cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { | 1112 | cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { |
1113 | card->info.tx_csum_mask = | 1113 | card->info.tx_csum_mask = |
1114 | cmd->data.setassparms.data.flags_32bit; | 1114 | cmd->data.setassparms.data.flags_32bit; |
1115 | QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask); | 1115 | QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask); |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | return 0; | 1118 | return 0; |
@@ -1125,7 +1125,7 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( | |||
1125 | struct qeth_cmd_buffer *iob; | 1125 | struct qeth_cmd_buffer *iob; |
1126 | struct qeth_ipa_cmd *cmd; | 1126 | struct qeth_ipa_cmd *cmd; |
1127 | 1127 | ||
1128 | QETH_DBF_TEXT(TRACE, 4, "getasscm"); | 1128 | QETH_CARD_TEXT(card, 4, "getasscm"); |
1129 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); | 1129 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); |
1130 | 1130 | ||
1131 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1131 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
@@ -1147,7 +1147,7 @@ static int qeth_l3_send_setassparms(struct qeth_card *card, | |||
1147 | int rc; | 1147 | int rc; |
1148 | struct qeth_ipa_cmd *cmd; | 1148 | struct qeth_ipa_cmd *cmd; |
1149 | 1149 | ||
1150 | QETH_DBF_TEXT(TRACE, 4, "sendassp"); | 1150 | QETH_CARD_TEXT(card, 4, "sendassp"); |
1151 | 1151 | ||
1152 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1152 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1153 | if (len <= sizeof(__u32)) | 1153 | if (len <= sizeof(__u32)) |
@@ -1166,7 +1166,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, | |||
1166 | int rc; | 1166 | int rc; |
1167 | struct qeth_cmd_buffer *iob; | 1167 | struct qeth_cmd_buffer *iob; |
1168 | 1168 | ||
1169 | QETH_DBF_TEXT(TRACE, 4, "simassp6"); | 1169 | QETH_CARD_TEXT(card, 4, "simassp6"); |
1170 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1170 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
1171 | 0, QETH_PROT_IPV6); | 1171 | 0, QETH_PROT_IPV6); |
1172 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, | 1172 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, |
@@ -1182,7 +1182,7 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, | |||
1182 | int length = 0; | 1182 | int length = 0; |
1183 | struct qeth_cmd_buffer *iob; | 1183 | struct qeth_cmd_buffer *iob; |
1184 | 1184 | ||
1185 | QETH_DBF_TEXT(TRACE, 4, "simassp4"); | 1185 | QETH_CARD_TEXT(card, 4, "simassp4"); |
1186 | if (data) | 1186 | if (data) |
1187 | length = sizeof(__u32); | 1187 | length = sizeof(__u32); |
1188 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1188 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
@@ -1196,7 +1196,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) | |||
1196 | { | 1196 | { |
1197 | int rc; | 1197 | int rc; |
1198 | 1198 | ||
1199 | QETH_DBF_TEXT(TRACE, 3, "ipaarp"); | 1199 | QETH_CARD_TEXT(card, 3, "ipaarp"); |
1200 | 1200 | ||
1201 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 1201 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
1202 | dev_info(&card->gdev->dev, | 1202 | dev_info(&card->gdev->dev, |
@@ -1218,7 +1218,7 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) | |||
1218 | { | 1218 | { |
1219 | int rc; | 1219 | int rc; |
1220 | 1220 | ||
1221 | QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); | 1221 | QETH_CARD_TEXT(card, 3, "ipaipfrg"); |
1222 | 1222 | ||
1223 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { | 1223 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { |
1224 | dev_info(&card->gdev->dev, | 1224 | dev_info(&card->gdev->dev, |
@@ -1243,7 +1243,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) | |||
1243 | { | 1243 | { |
1244 | int rc; | 1244 | int rc; |
1245 | 1245 | ||
1246 | QETH_DBF_TEXT(TRACE, 3, "stsrcmac"); | 1246 | QETH_CARD_TEXT(card, 3, "stsrcmac"); |
1247 | 1247 | ||
1248 | if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { | 1248 | if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { |
1249 | dev_info(&card->gdev->dev, | 1249 | dev_info(&card->gdev->dev, |
@@ -1265,7 +1265,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card) | |||
1265 | { | 1265 | { |
1266 | int rc = 0; | 1266 | int rc = 0; |
1267 | 1267 | ||
1268 | QETH_DBF_TEXT(TRACE, 3, "strtvlan"); | 1268 | QETH_CARD_TEXT(card, 3, "strtvlan"); |
1269 | 1269 | ||
1270 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { | 1270 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { |
1271 | dev_info(&card->gdev->dev, | 1271 | dev_info(&card->gdev->dev, |
@@ -1289,7 +1289,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) | |||
1289 | { | 1289 | { |
1290 | int rc; | 1290 | int rc; |
1291 | 1291 | ||
1292 | QETH_DBF_TEXT(TRACE, 3, "stmcast"); | 1292 | QETH_CARD_TEXT(card, 3, "stmcast"); |
1293 | 1293 | ||
1294 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { | 1294 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { |
1295 | dev_info(&card->gdev->dev, | 1295 | dev_info(&card->gdev->dev, |
@@ -1349,7 +1349,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) | |||
1349 | { | 1349 | { |
1350 | int rc; | 1350 | int rc; |
1351 | 1351 | ||
1352 | QETH_DBF_TEXT(TRACE, 3, "softipv6"); | 1352 | QETH_CARD_TEXT(card, 3, "softipv6"); |
1353 | 1353 | ||
1354 | if (card->info.type == QETH_CARD_TYPE_IQD) | 1354 | if (card->info.type == QETH_CARD_TYPE_IQD) |
1355 | goto out; | 1355 | goto out; |
@@ -1395,7 +1395,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) | |||
1395 | { | 1395 | { |
1396 | int rc = 0; | 1396 | int rc = 0; |
1397 | 1397 | ||
1398 | QETH_DBF_TEXT(TRACE, 3, "strtipv6"); | 1398 | QETH_CARD_TEXT(card, 3, "strtipv6"); |
1399 | 1399 | ||
1400 | if (!qeth_is_supported(card, IPA_IPV6)) { | 1400 | if (!qeth_is_supported(card, IPA_IPV6)) { |
1401 | dev_info(&card->gdev->dev, | 1401 | dev_info(&card->gdev->dev, |
@@ -1412,7 +1412,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) | |||
1412 | { | 1412 | { |
1413 | int rc; | 1413 | int rc; |
1414 | 1414 | ||
1415 | QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); | 1415 | QETH_CARD_TEXT(card, 3, "stbrdcst"); |
1416 | card->info.broadcast_capable = 0; | 1416 | card->info.broadcast_capable = 0; |
1417 | if (!qeth_is_supported(card, IPA_FILTERING)) { | 1417 | if (!qeth_is_supported(card, IPA_FILTERING)) { |
1418 | dev_info(&card->gdev->dev, | 1418 | dev_info(&card->gdev->dev, |
@@ -1512,7 +1512,7 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card) | |||
1512 | { | 1512 | { |
1513 | int rc = 0; | 1513 | int rc = 0; |
1514 | 1514 | ||
1515 | QETH_DBF_TEXT(TRACE, 3, "strtcsum"); | 1515 | QETH_CARD_TEXT(card, 3, "strtcsum"); |
1516 | 1516 | ||
1517 | if (card->options.checksum_type == NO_CHECKSUMMING) { | 1517 | if (card->options.checksum_type == NO_CHECKSUMMING) { |
1518 | dev_info(&card->gdev->dev, | 1518 | dev_info(&card->gdev->dev, |
@@ -1569,7 +1569,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) | |||
1569 | { | 1569 | { |
1570 | int rc; | 1570 | int rc; |
1571 | 1571 | ||
1572 | QETH_DBF_TEXT(TRACE, 3, "sttso"); | 1572 | QETH_CARD_TEXT(card, 3, "sttso"); |
1573 | 1573 | ||
1574 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { | 1574 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { |
1575 | dev_info(&card->gdev->dev, | 1575 | dev_info(&card->gdev->dev, |
@@ -1596,7 +1596,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) | |||
1596 | 1596 | ||
1597 | static int qeth_l3_start_ipassists(struct qeth_card *card) | 1597 | static int qeth_l3_start_ipassists(struct qeth_card *card) |
1598 | { | 1598 | { |
1599 | QETH_DBF_TEXT(TRACE, 3, "strtipas"); | 1599 | QETH_CARD_TEXT(card, 3, "strtipas"); |
1600 | 1600 | ||
1601 | qeth_set_access_ctrl_online(card); /* go on*/ | 1601 | qeth_set_access_ctrl_online(card); /* go on*/ |
1602 | qeth_l3_start_ipa_arp_processing(card); /* go on*/ | 1602 | qeth_l3_start_ipa_arp_processing(card); /* go on*/ |
@@ -1619,7 +1619,7 @@ static int qeth_l3_put_unique_id(struct qeth_card *card) | |||
1619 | struct qeth_cmd_buffer *iob; | 1619 | struct qeth_cmd_buffer *iob; |
1620 | struct qeth_ipa_cmd *cmd; | 1620 | struct qeth_ipa_cmd *cmd; |
1621 | 1621 | ||
1622 | QETH_DBF_TEXT(TRACE, 2, "puniqeid"); | 1622 | QETH_CARD_TEXT(card, 2, "puniqeid"); |
1623 | 1623 | ||
1624 | if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == | 1624 | if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == |
1625 | UNIQUE_ID_NOT_BY_CARD) | 1625 | UNIQUE_ID_NOT_BY_CARD) |
@@ -1723,7 +1723,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, | |||
1723 | cmd = (struct qeth_ipa_cmd *)data; | 1723 | cmd = (struct qeth_ipa_cmd *)data; |
1724 | rc = cmd->hdr.return_code; | 1724 | rc = cmd->hdr.return_code; |
1725 | if (rc) | 1725 | if (rc) |
1726 | QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc); | 1726 | QETH_CARD_TEXT_(card, 2, "dxter%x", rc); |
1727 | switch (cmd->data.diagass.action) { | 1727 | switch (cmd->data.diagass.action) { |
1728 | case QETH_DIAGS_CMD_TRACE_QUERY: | 1728 | case QETH_DIAGS_CMD_TRACE_QUERY: |
1729 | break; | 1729 | break; |
@@ -1800,7 +1800,7 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) | |||
1800 | struct ip_mc_list *im4; | 1800 | struct ip_mc_list *im4; |
1801 | char buf[MAX_ADDR_LEN]; | 1801 | char buf[MAX_ADDR_LEN]; |
1802 | 1802 | ||
1803 | QETH_DBF_TEXT(TRACE, 4, "addmc"); | 1803 | QETH_CARD_TEXT(card, 4, "addmc"); |
1804 | for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { | 1804 | for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { |
1805 | qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); | 1805 | qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); |
1806 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); | 1806 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); |
@@ -1820,7 +1820,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card) | |||
1820 | struct vlan_group *vg; | 1820 | struct vlan_group *vg; |
1821 | int i; | 1821 | int i; |
1822 | 1822 | ||
1823 | QETH_DBF_TEXT(TRACE, 4, "addmcvl"); | 1823 | QETH_CARD_TEXT(card, 4, "addmcvl"); |
1824 | if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) | 1824 | if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) |
1825 | return; | 1825 | return; |
1826 | 1826 | ||
@@ -1844,7 +1844,7 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) | |||
1844 | { | 1844 | { |
1845 | struct in_device *in4_dev; | 1845 | struct in_device *in4_dev; |
1846 | 1846 | ||
1847 | QETH_DBF_TEXT(TRACE, 4, "chkmcv4"); | 1847 | QETH_CARD_TEXT(card, 4, "chkmcv4"); |
1848 | in4_dev = in_dev_get(card->dev); | 1848 | in4_dev = in_dev_get(card->dev); |
1849 | if (in4_dev == NULL) | 1849 | if (in4_dev == NULL) |
1850 | return; | 1850 | return; |
@@ -1862,7 +1862,7 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) | |||
1862 | struct ifmcaddr6 *im6; | 1862 | struct ifmcaddr6 *im6; |
1863 | char buf[MAX_ADDR_LEN]; | 1863 | char buf[MAX_ADDR_LEN]; |
1864 | 1864 | ||
1865 | QETH_DBF_TEXT(TRACE, 4, "addmc6"); | 1865 | QETH_CARD_TEXT(card, 4, "addmc6"); |
1866 | for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { | 1866 | for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { |
1867 | ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); | 1867 | ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); |
1868 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); | 1868 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); |
@@ -1883,7 +1883,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card) | |||
1883 | struct vlan_group *vg; | 1883 | struct vlan_group *vg; |
1884 | int i; | 1884 | int i; |
1885 | 1885 | ||
1886 | QETH_DBF_TEXT(TRACE, 4, "admc6vl"); | 1886 | QETH_CARD_TEXT(card, 4, "admc6vl"); |
1887 | if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) | 1887 | if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) |
1888 | return; | 1888 | return; |
1889 | 1889 | ||
@@ -1907,7 +1907,7 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) | |||
1907 | { | 1907 | { |
1908 | struct inet6_dev *in6_dev; | 1908 | struct inet6_dev *in6_dev; |
1909 | 1909 | ||
1910 | QETH_DBF_TEXT(TRACE, 4, "chkmcv6"); | 1910 | QETH_CARD_TEXT(card, 4, "chkmcv6"); |
1911 | if (!qeth_is_supported(card, IPA_IPV6)) | 1911 | if (!qeth_is_supported(card, IPA_IPV6)) |
1912 | return ; | 1912 | return ; |
1913 | in6_dev = in6_dev_get(card->dev); | 1913 | in6_dev = in6_dev_get(card->dev); |
@@ -1928,7 +1928,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, | |||
1928 | struct in_ifaddr *ifa; | 1928 | struct in_ifaddr *ifa; |
1929 | struct qeth_ipaddr *addr; | 1929 | struct qeth_ipaddr *addr; |
1930 | 1930 | ||
1931 | QETH_DBF_TEXT(TRACE, 4, "frvaddr4"); | 1931 | QETH_CARD_TEXT(card, 4, "frvaddr4"); |
1932 | 1932 | ||
1933 | in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid)); | 1933 | in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid)); |
1934 | if (!in_dev) | 1934 | if (!in_dev) |
@@ -1954,7 +1954,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, | |||
1954 | struct inet6_ifaddr *ifa; | 1954 | struct inet6_ifaddr *ifa; |
1955 | struct qeth_ipaddr *addr; | 1955 | struct qeth_ipaddr *addr; |
1956 | 1956 | ||
1957 | QETH_DBF_TEXT(TRACE, 4, "frvaddr6"); | 1957 | QETH_CARD_TEXT(card, 4, "frvaddr6"); |
1958 | 1958 | ||
1959 | in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); | 1959 | in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); |
1960 | if (!in6_dev) | 1960 | if (!in6_dev) |
@@ -1989,7 +1989,7 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev, | |||
1989 | struct qeth_card *card = dev->ml_priv; | 1989 | struct qeth_card *card = dev->ml_priv; |
1990 | unsigned long flags; | 1990 | unsigned long flags; |
1991 | 1991 | ||
1992 | QETH_DBF_TEXT(TRACE, 4, "vlanreg"); | 1992 | QETH_CARD_TEXT(card, 4, "vlanreg"); |
1993 | spin_lock_irqsave(&card->vlanlock, flags); | 1993 | spin_lock_irqsave(&card->vlanlock, flags); |
1994 | card->vlangrp = grp; | 1994 | card->vlangrp = grp; |
1995 | spin_unlock_irqrestore(&card->vlanlock, flags); | 1995 | spin_unlock_irqrestore(&card->vlanlock, flags); |
@@ -2005,9 +2005,9 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2005 | struct qeth_card *card = dev->ml_priv; | 2005 | struct qeth_card *card = dev->ml_priv; |
2006 | unsigned long flags; | 2006 | unsigned long flags; |
2007 | 2007 | ||
2008 | QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); | 2008 | QETH_CARD_TEXT_(card, 4, "kid:%d", vid); |
2009 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { | 2009 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { |
2010 | QETH_DBF_TEXT(TRACE, 3, "kidREC"); | 2010 | QETH_CARD_TEXT(card, 3, "kidREC"); |
2011 | return; | 2011 | return; |
2012 | } | 2012 | } |
2013 | spin_lock_irqsave(&card->vlanlock, flags); | 2013 | spin_lock_irqsave(&card->vlanlock, flags); |
@@ -2162,7 +2162,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card, | |||
2162 | break; | 2162 | break; |
2163 | default: | 2163 | default: |
2164 | dev_kfree_skb_any(skb); | 2164 | dev_kfree_skb_any(skb); |
2165 | QETH_DBF_TEXT(TRACE, 3, "inbunkno"); | 2165 | QETH_CARD_TEXT(card, 3, "inbunkno"); |
2166 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); | 2166 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); |
2167 | continue; | 2167 | continue; |
2168 | } | 2168 | } |
@@ -2229,7 +2229,8 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) | |||
2229 | card = vlan_dev_real_dev(dev)->ml_priv; | 2229 | card = vlan_dev_real_dev(dev)->ml_priv; |
2230 | if (card && card->options.layer2) | 2230 | if (card && card->options.layer2) |
2231 | card = NULL; | 2231 | card = NULL; |
2232 | QETH_DBF_TEXT_(TRACE, 4, "%d", rc); | 2232 | if (card) |
2233 | QETH_CARD_TEXT_(card, 4, "%d", rc); | ||
2233 | return card ; | 2234 | return card ; |
2234 | } | 2235 | } |
2235 | 2236 | ||
@@ -2307,10 +2308,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card) | |||
2307 | } else if (card->options.sniffer && /* HiperSockets trace */ | 2308 | } else if (card->options.sniffer && /* HiperSockets trace */ |
2308 | qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { | 2309 | qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { |
2309 | if (dev->flags & IFF_PROMISC) { | 2310 | if (dev->flags & IFF_PROMISC) { |
2310 | QETH_DBF_TEXT(TRACE, 3, "+promisc"); | 2311 | QETH_CARD_TEXT(card, 3, "+promisc"); |
2311 | qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); | 2312 | qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); |
2312 | } else { | 2313 | } else { |
2313 | QETH_DBF_TEXT(TRACE, 3, "-promisc"); | 2314 | QETH_CARD_TEXT(card, 3, "-promisc"); |
2314 | qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); | 2315 | qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); |
2315 | } | 2316 | } |
2316 | } | 2317 | } |
@@ -2320,7 +2321,7 @@ static void qeth_l3_set_multicast_list(struct net_device *dev) | |||
2320 | { | 2321 | { |
2321 | struct qeth_card *card = dev->ml_priv; | 2322 | struct qeth_card *card = dev->ml_priv; |
2322 | 2323 | ||
2323 | QETH_DBF_TEXT(TRACE, 3, "setmulti"); | 2324 | QETH_CARD_TEXT(card, 3, "setmulti"); |
2324 | if (qeth_threads_running(card, QETH_RECOVER_THREAD) && | 2325 | if (qeth_threads_running(card, QETH_RECOVER_THREAD) && |
2325 | (card->state != CARD_STATE_UP)) | 2326 | (card->state != CARD_STATE_UP)) |
2326 | return; | 2327 | return; |
@@ -2365,7 +2366,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) | |||
2365 | int tmp; | 2366 | int tmp; |
2366 | int rc; | 2367 | int rc; |
2367 | 2368 | ||
2368 | QETH_DBF_TEXT(TRACE, 3, "arpstnoe"); | 2369 | QETH_CARD_TEXT(card, 3, "arpstnoe"); |
2369 | 2370 | ||
2370 | /* | 2371 | /* |
2371 | * currently GuestLAN only supports the ARP assist function | 2372 | * currently GuestLAN only supports the ARP assist function |
@@ -2417,17 +2418,17 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, | |||
2417 | int uentry_size; | 2418 | int uentry_size; |
2418 | int i; | 2419 | int i; |
2419 | 2420 | ||
2420 | QETH_DBF_TEXT(TRACE, 4, "arpquecb"); | 2421 | QETH_CARD_TEXT(card, 4, "arpquecb"); |
2421 | 2422 | ||
2422 | qinfo = (struct qeth_arp_query_info *) reply->param; | 2423 | qinfo = (struct qeth_arp_query_info *) reply->param; |
2423 | cmd = (struct qeth_ipa_cmd *) data; | 2424 | cmd = (struct qeth_ipa_cmd *) data; |
2424 | if (cmd->hdr.return_code) { | 2425 | if (cmd->hdr.return_code) { |
2425 | QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code); | 2426 | QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code); |
2426 | return 0; | 2427 | return 0; |
2427 | } | 2428 | } |
2428 | if (cmd->data.setassparms.hdr.return_code) { | 2429 | if (cmd->data.setassparms.hdr.return_code) { |
2429 | cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; | 2430 | cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; |
2430 | QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code); | 2431 | QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code); |
2431 | return 0; | 2432 | return 0; |
2432 | } | 2433 | } |
2433 | qdata = &cmd->data.setassparms.data.query_arp; | 2434 | qdata = &cmd->data.setassparms.data.query_arp; |
@@ -2449,14 +2450,14 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, | |||
2449 | /* check if there is enough room in userspace */ | 2450 | /* check if there is enough room in userspace */ |
2450 | if ((qinfo->udata_len - qinfo->udata_offset) < | 2451 | if ((qinfo->udata_len - qinfo->udata_offset) < |
2451 | qdata->no_entries * uentry_size){ | 2452 | qdata->no_entries * uentry_size){ |
2452 | QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); | 2453 | QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM); |
2453 | cmd->hdr.return_code = -ENOMEM; | 2454 | cmd->hdr.return_code = -ENOMEM; |
2454 | goto out_error; | 2455 | goto out_error; |
2455 | } | 2456 | } |
2456 | QETH_DBF_TEXT_(TRACE, 4, "anore%i", | 2457 | QETH_CARD_TEXT_(card, 4, "anore%i", |
2457 | cmd->data.setassparms.hdr.number_of_replies); | 2458 | cmd->data.setassparms.hdr.number_of_replies); |
2458 | QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); | 2459 | QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); |
2459 | QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries); | 2460 | QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); |
2460 | 2461 | ||
2461 | if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { | 2462 | if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { |
2462 | /* strip off "media specific information" */ | 2463 | /* strip off "media specific information" */ |
@@ -2492,7 +2493,7 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, | |||
2492 | unsigned long), | 2493 | unsigned long), |
2493 | void *reply_param) | 2494 | void *reply_param) |
2494 | { | 2495 | { |
2495 | QETH_DBF_TEXT(TRACE, 4, "sendarp"); | 2496 | QETH_CARD_TEXT(card, 4, "sendarp"); |
2496 | 2497 | ||
2497 | memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); | 2498 | memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); |
2498 | memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), | 2499 | memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), |
@@ -2508,7 +2509,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) | |||
2508 | int tmp; | 2509 | int tmp; |
2509 | int rc; | 2510 | int rc; |
2510 | 2511 | ||
2511 | QETH_DBF_TEXT(TRACE, 3, "arpquery"); | 2512 | QETH_CARD_TEXT(card, 3, "arpquery"); |
2512 | 2513 | ||
2513 | if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ | 2514 | if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ |
2514 | IPA_ARP_PROCESSING)) { | 2515 | IPA_ARP_PROCESSING)) { |
@@ -2551,7 +2552,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
2551 | int tmp; | 2552 | int tmp; |
2552 | int rc; | 2553 | int rc; |
2553 | 2554 | ||
2554 | QETH_DBF_TEXT(TRACE, 3, "arpadent"); | 2555 | QETH_CARD_TEXT(card, 3, "arpadent"); |
2555 | 2556 | ||
2556 | /* | 2557 | /* |
2557 | * currently GuestLAN only supports the ARP assist function | 2558 | * currently GuestLAN only supports the ARP assist function |
@@ -2590,7 +2591,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
2590 | int tmp; | 2591 | int tmp; |
2591 | int rc; | 2592 | int rc; |
2592 | 2593 | ||
2593 | QETH_DBF_TEXT(TRACE, 3, "arprment"); | 2594 | QETH_CARD_TEXT(card, 3, "arprment"); |
2594 | 2595 | ||
2595 | /* | 2596 | /* |
2596 | * currently GuestLAN only supports the ARP assist function | 2597 | * currently GuestLAN only supports the ARP assist function |
@@ -2626,7 +2627,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) | |||
2626 | int rc; | 2627 | int rc; |
2627 | int tmp; | 2628 | int tmp; |
2628 | 2629 | ||
2629 | QETH_DBF_TEXT(TRACE, 3, "arpflush"); | 2630 | QETH_CARD_TEXT(card, 3, "arpflush"); |
2630 | 2631 | ||
2631 | /* | 2632 | /* |
2632 | * currently GuestLAN only supports the ARP assist function | 2633 | * currently GuestLAN only supports the ARP assist function |
@@ -2734,7 +2735,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
2734 | rc = -EOPNOTSUPP; | 2735 | rc = -EOPNOTSUPP; |
2735 | } | 2736 | } |
2736 | if (rc) | 2737 | if (rc) |
2737 | QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); | 2738 | QETH_CARD_TEXT_(card, 2, "ioce%d", rc); |
2738 | return rc; | 2739 | return rc; |
2739 | } | 2740 | } |
2740 | 2741 | ||
@@ -2903,19 +2904,11 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb) | |||
2903 | unsigned long tcpd = (unsigned long)tcp_hdr(skb) + | 2904 | unsigned long tcpd = (unsigned long)tcp_hdr(skb) + |
2904 | tcp_hdr(skb)->doff * 4; | 2905 | tcp_hdr(skb)->doff * 4; |
2905 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); | 2906 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); |
2906 | int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd); | 2907 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); |
2907 | elements += skb_shinfo(skb)->nr_frags; | 2908 | elements += skb_shinfo(skb)->nr_frags; |
2908 | return elements; | 2909 | return elements; |
2909 | } | 2910 | } |
2910 | 2911 | ||
2911 | static inline int qeth_l3_tso_check(struct sk_buff *skb) | ||
2912 | { | ||
2913 | int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) - | ||
2914 | (unsigned long)skb->data; | ||
2915 | return (((unsigned long)skb->data & PAGE_MASK) != | ||
2916 | (((unsigned long)skb->data + len) & PAGE_MASK)); | ||
2917 | } | ||
2918 | |||
2919 | static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | 2912 | static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2920 | { | 2913 | { |
2921 | int rc; | 2914 | int rc; |
@@ -3015,8 +3008,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3015 | (cast_type == RTN_UNSPEC)) { | 3008 | (cast_type == RTN_UNSPEC)) { |
3016 | hdr = (struct qeth_hdr *)skb_push(new_skb, | 3009 | hdr = (struct qeth_hdr *)skb_push(new_skb, |
3017 | sizeof(struct qeth_hdr_tso)); | 3010 | sizeof(struct qeth_hdr_tso)); |
3018 | if (qeth_l3_tso_check(new_skb)) | ||
3019 | QETH_DBF_MESSAGE(2, "tso skb misaligned\n"); | ||
3020 | memset(hdr, 0, sizeof(struct qeth_hdr_tso)); | 3011 | memset(hdr, 0, sizeof(struct qeth_hdr_tso)); |
3021 | qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); | 3012 | qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); |
3022 | qeth_tso_fill_header(card, hdr, new_skb); | 3013 | qeth_tso_fill_header(card, hdr, new_skb); |
@@ -3047,10 +3038,20 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3047 | elements_needed += elems; | 3038 | elements_needed += elems; |
3048 | nr_frags = skb_shinfo(new_skb)->nr_frags; | 3039 | nr_frags = skb_shinfo(new_skb)->nr_frags; |
3049 | 3040 | ||
3050 | if (card->info.type != QETH_CARD_TYPE_IQD) | 3041 | if (card->info.type != QETH_CARD_TYPE_IQD) { |
3042 | int len; | ||
3043 | if (large_send == QETH_LARGE_SEND_TSO) | ||
3044 | len = ((unsigned long)tcp_hdr(new_skb) + | ||
3045 | tcp_hdr(new_skb)->doff * 4) - | ||
3046 | (unsigned long)new_skb->data; | ||
3047 | else | ||
3048 | len = sizeof(struct qeth_hdr_layer3); | ||
3049 | |||
3050 | if (qeth_hdr_chk_and_bounce(new_skb, len)) | ||
3051 | goto tx_drop; | ||
3051 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, | 3052 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, |
3052 | elements_needed); | 3053 | elements_needed); |
3053 | else | 3054 | } else |
3054 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, | 3055 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, |
3055 | elements_needed, data_offset, 0); | 3056 | elements_needed, data_offset, 0); |
3056 | 3057 | ||
@@ -3103,7 +3104,7 @@ static int qeth_l3_open(struct net_device *dev) | |||
3103 | { | 3104 | { |
3104 | struct qeth_card *card = dev->ml_priv; | 3105 | struct qeth_card *card = dev->ml_priv; |
3105 | 3106 | ||
3106 | QETH_DBF_TEXT(TRACE, 4, "qethopen"); | 3107 | QETH_CARD_TEXT(card, 4, "qethopen"); |
3107 | if (card->state != CARD_STATE_SOFTSETUP) | 3108 | if (card->state != CARD_STATE_SOFTSETUP) |
3108 | return -ENODEV; | 3109 | return -ENODEV; |
3109 | card->data.state = CH_STATE_UP; | 3110 | card->data.state = CH_STATE_UP; |
@@ -3119,7 +3120,7 @@ static int qeth_l3_stop(struct net_device *dev) | |||
3119 | { | 3120 | { |
3120 | struct qeth_card *card = dev->ml_priv; | 3121 | struct qeth_card *card = dev->ml_priv; |
3121 | 3122 | ||
3122 | QETH_DBF_TEXT(TRACE, 4, "qethstop"); | 3123 | QETH_CARD_TEXT(card, 4, "qethstop"); |
3123 | netif_tx_disable(dev); | 3124 | netif_tx_disable(dev); |
3124 | if (card->state == CARD_STATE_UP) | 3125 | if (card->state == CARD_STATE_UP) |
3125 | card->state = CARD_STATE_SOFTSETUP; | 3126 | card->state = CARD_STATE_SOFTSETUP; |
@@ -3312,11 +3313,10 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, | |||
3312 | card->perf_stats.inbound_start_time = qeth_get_micros(); | 3313 | card->perf_stats.inbound_start_time = qeth_get_micros(); |
3313 | } | 3314 | } |
3314 | if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { | 3315 | if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { |
3315 | QETH_DBF_TEXT(TRACE, 1, "qdinchk"); | 3316 | QETH_CARD_TEXT(card, 1, "qdinchk"); |
3316 | QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); | 3317 | QETH_CARD_TEXT_(card, 1, "%04X%04X", |
3317 | QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", | ||
3318 | first_element, count); | 3318 | first_element, count); |
3319 | QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); | 3319 | QETH_CARD_TEXT_(card, 1, "%04X", queue); |
3320 | qeth_schedule_recovery(card); | 3320 | qeth_schedule_recovery(card); |
3321 | return; | 3321 | return; |
3322 | } | 3322 | } |
@@ -3354,6 +3354,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) | |||
3354 | { | 3354 | { |
3355 | struct qeth_card *card = dev_get_drvdata(&cgdev->dev); | 3355 | struct qeth_card *card = dev_get_drvdata(&cgdev->dev); |
3356 | 3356 | ||
3357 | qeth_l3_remove_device_attributes(&cgdev->dev); | ||
3358 | |||
3357 | qeth_set_allowed_threads(card, 0, 1); | 3359 | qeth_set_allowed_threads(card, 0, 1); |
3358 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); | 3360 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); |
3359 | 3361 | ||
@@ -3367,7 +3369,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) | |||
3367 | card->dev = NULL; | 3369 | card->dev = NULL; |
3368 | } | 3370 | } |
3369 | 3371 | ||
3370 | qeth_l3_remove_device_attributes(&cgdev->dev); | ||
3371 | qeth_l3_clear_ip_list(card, 0, 0); | 3372 | qeth_l3_clear_ip_list(card, 0, 0); |
3372 | qeth_l3_clear_ipato_list(card); | 3373 | qeth_l3_clear_ipato_list(card); |
3373 | return; | 3374 | return; |
@@ -3380,6 +3381,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3380 | enum qeth_card_states recover_flag; | 3381 | enum qeth_card_states recover_flag; |
3381 | 3382 | ||
3382 | BUG_ON(!card); | 3383 | BUG_ON(!card); |
3384 | mutex_lock(&card->discipline_mutex); | ||
3383 | mutex_lock(&card->conf_mutex); | 3385 | mutex_lock(&card->conf_mutex); |
3384 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); | 3386 | QETH_DBF_TEXT(SETUP, 2, "setonlin"); |
3385 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); | 3387 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
@@ -3461,6 +3463,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3461 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 3463 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
3462 | out: | 3464 | out: |
3463 | mutex_unlock(&card->conf_mutex); | 3465 | mutex_unlock(&card->conf_mutex); |
3466 | mutex_unlock(&card->discipline_mutex); | ||
3464 | return 0; | 3467 | return 0; |
3465 | out_remove: | 3468 | out_remove: |
3466 | card->use_hard_stop = 1; | 3469 | card->use_hard_stop = 1; |
@@ -3473,6 +3476,7 @@ out_remove: | |||
3473 | else | 3476 | else |
3474 | card->state = CARD_STATE_DOWN; | 3477 | card->state = CARD_STATE_DOWN; |
3475 | mutex_unlock(&card->conf_mutex); | 3478 | mutex_unlock(&card->conf_mutex); |
3479 | mutex_unlock(&card->discipline_mutex); | ||
3476 | return rc; | 3480 | return rc; |
3477 | } | 3481 | } |
3478 | 3482 | ||
@@ -3488,6 +3492,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, | |||
3488 | int rc = 0, rc2 = 0, rc3 = 0; | 3492 | int rc = 0, rc2 = 0, rc3 = 0; |
3489 | enum qeth_card_states recover_flag; | 3493 | enum qeth_card_states recover_flag; |
3490 | 3494 | ||
3495 | mutex_lock(&card->discipline_mutex); | ||
3491 | mutex_lock(&card->conf_mutex); | 3496 | mutex_lock(&card->conf_mutex); |
3492 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); | 3497 | QETH_DBF_TEXT(SETUP, 3, "setoffl"); |
3493 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); | 3498 | QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); |
@@ -3508,6 +3513,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, | |||
3508 | /* let user_space know that device is offline */ | 3513 | /* let user_space know that device is offline */ |
3509 | kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); | 3514 | kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); |
3510 | mutex_unlock(&card->conf_mutex); | 3515 | mutex_unlock(&card->conf_mutex); |
3516 | mutex_unlock(&card->discipline_mutex); | ||
3511 | return 0; | 3517 | return 0; |
3512 | } | 3518 | } |
3513 | 3519 | ||
@@ -3522,11 +3528,11 @@ static int qeth_l3_recover(void *ptr) | |||
3522 | int rc = 0; | 3528 | int rc = 0; |
3523 | 3529 | ||
3524 | card = (struct qeth_card *) ptr; | 3530 | card = (struct qeth_card *) ptr; |
3525 | QETH_DBF_TEXT(TRACE, 2, "recover1"); | 3531 | QETH_CARD_TEXT(card, 2, "recover1"); |
3526 | QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); | 3532 | QETH_CARD_HEX(card, 2, &card, sizeof(void *)); |
3527 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 3533 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
3528 | return 0; | 3534 | return 0; |
3529 | QETH_DBF_TEXT(TRACE, 2, "recover2"); | 3535 | QETH_CARD_TEXT(card, 2, "recover2"); |
3530 | dev_warn(&card->gdev->dev, | 3536 | dev_warn(&card->gdev->dev, |
3531 | "A recovery process has been started for the device\n"); | 3537 | "A recovery process has been started for the device\n"); |
3532 | card->use_hard_stop = 1; | 3538 | card->use_hard_stop = 1; |
@@ -3624,8 +3630,8 @@ static int qeth_l3_ip_event(struct notifier_block *this, | |||
3624 | if (dev_net(dev) != &init_net) | 3630 | if (dev_net(dev) != &init_net) |
3625 | return NOTIFY_DONE; | 3631 | return NOTIFY_DONE; |
3626 | 3632 | ||
3627 | QETH_DBF_TEXT(TRACE, 3, "ipevent"); | ||
3628 | card = qeth_l3_get_card_from_dev(dev); | 3633 | card = qeth_l3_get_card_from_dev(dev); |
3634 | QETH_CARD_TEXT(card, 3, "ipevent"); | ||
3629 | if (!card) | 3635 | if (!card) |
3630 | return NOTIFY_DONE; | 3636 | return NOTIFY_DONE; |
3631 | 3637 | ||
@@ -3671,11 +3677,11 @@ static int qeth_l3_ip6_event(struct notifier_block *this, | |||
3671 | struct qeth_ipaddr *addr; | 3677 | struct qeth_ipaddr *addr; |
3672 | struct qeth_card *card; | 3678 | struct qeth_card *card; |
3673 | 3679 | ||
3674 | QETH_DBF_TEXT(TRACE, 3, "ip6event"); | ||
3675 | 3680 | ||
3676 | card = qeth_l3_get_card_from_dev(dev); | 3681 | card = qeth_l3_get_card_from_dev(dev); |
3677 | if (!card) | 3682 | if (!card) |
3678 | return NOTIFY_DONE; | 3683 | return NOTIFY_DONE; |
3684 | QETH_CARD_TEXT(card, 3, "ip6event"); | ||
3679 | if (!qeth_is_supported(card, IPA_IPV6)) | 3685 | if (!qeth_is_supported(card, IPA_IPV6)) |
3680 | return NOTIFY_DONE; | 3686 | return NOTIFY_DONE; |
3681 | 3687 | ||
@@ -3714,7 +3720,7 @@ static int qeth_l3_register_notifiers(void) | |||
3714 | { | 3720 | { |
3715 | int rc; | 3721 | int rc; |
3716 | 3722 | ||
3717 | QETH_DBF_TEXT(TRACE, 5, "regnotif"); | 3723 | QETH_DBF_TEXT(SETUP, 5, "regnotif"); |
3718 | rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); | 3724 | rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); |
3719 | if (rc) | 3725 | if (rc) |
3720 | return rc; | 3726 | return rc; |
@@ -3733,7 +3739,7 @@ static int qeth_l3_register_notifiers(void) | |||
3733 | static void qeth_l3_unregister_notifiers(void) | 3739 | static void qeth_l3_unregister_notifiers(void) |
3734 | { | 3740 | { |
3735 | 3741 | ||
3736 | QETH_DBF_TEXT(TRACE, 5, "unregnot"); | 3742 | QETH_DBF_TEXT(SETUP, 5, "unregnot"); |
3737 | BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); | 3743 | BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); |
3738 | #ifdef CONFIG_QETH_IPV6 | 3744 | #ifdef CONFIG_QETH_IPV6 |
3739 | BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); | 3745 | BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index fb5318b30e99..67cfa68dcf1b 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -479,6 +479,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, | |||
479 | struct device_attribute *attr, const char *buf, size_t count) | 479 | struct device_attribute *attr, const char *buf, size_t count) |
480 | { | 480 | { |
481 | struct qeth_card *card = dev_get_drvdata(dev); | 481 | struct qeth_card *card = dev_get_drvdata(dev); |
482 | struct qeth_ipaddr *tmpipa, *t; | ||
482 | char *tmp; | 483 | char *tmp; |
483 | int rc = 0; | 484 | int rc = 0; |
484 | 485 | ||
@@ -497,8 +498,21 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, | |||
497 | card->ipato.enabled = (card->ipato.enabled)? 0 : 1; | 498 | card->ipato.enabled = (card->ipato.enabled)? 0 : 1; |
498 | } else if (!strcmp(tmp, "1")) { | 499 | } else if (!strcmp(tmp, "1")) { |
499 | card->ipato.enabled = 1; | 500 | card->ipato.enabled = 1; |
501 | list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { | ||
502 | if ((tmpipa->type == QETH_IP_TYPE_NORMAL) && | ||
503 | qeth_l3_is_addr_covered_by_ipato(card, tmpipa)) | ||
504 | tmpipa->set_flags |= | ||
505 | QETH_IPA_SETIP_TAKEOVER_FLAG; | ||
506 | } | ||
507 | |||
500 | } else if (!strcmp(tmp, "0")) { | 508 | } else if (!strcmp(tmp, "0")) { |
501 | card->ipato.enabled = 0; | 509 | card->ipato.enabled = 0; |
510 | list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { | ||
511 | if (tmpipa->set_flags & | ||
512 | QETH_IPA_SETIP_TAKEOVER_FLAG) | ||
513 | tmpipa->set_flags &= | ||
514 | ~QETH_IPA_SETIP_TAKEOVER_FLAG; | ||
515 | } | ||
502 | } else | 516 | } else |
503 | rc = -EINVAL; | 517 | rc = -EINVAL; |
504 | out: | 518 | out: |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 70491274da16..65e1cf104943 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -47,6 +47,7 @@ static struct device *smsg_dev; | |||
47 | 47 | ||
48 | static DEFINE_SPINLOCK(smsg_list_lock); | 48 | static DEFINE_SPINLOCK(smsg_list_lock); |
49 | static LIST_HEAD(smsg_list); | 49 | static LIST_HEAD(smsg_list); |
50 | static int iucv_path_connected; | ||
50 | 51 | ||
51 | static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); | 52 | static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); |
52 | static void smsg_message_pending(struct iucv_path *, struct iucv_message *); | 53 | static void smsg_message_pending(struct iucv_path *, struct iucv_message *); |
@@ -142,8 +143,10 @@ static int smsg_pm_freeze(struct device *dev) | |||
142 | #ifdef CONFIG_PM_DEBUG | 143 | #ifdef CONFIG_PM_DEBUG |
143 | printk(KERN_WARNING "smsg_pm_freeze\n"); | 144 | printk(KERN_WARNING "smsg_pm_freeze\n"); |
144 | #endif | 145 | #endif |
145 | if (smsg_path) | 146 | if (smsg_path && iucv_path_connected) { |
146 | iucv_path_sever(smsg_path, NULL); | 147 | iucv_path_sever(smsg_path, NULL); |
148 | iucv_path_connected = 0; | ||
149 | } | ||
147 | return 0; | 150 | return 0; |
148 | } | 151 | } |
149 | 152 | ||
@@ -154,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev) | |||
154 | #ifdef CONFIG_PM_DEBUG | 157 | #ifdef CONFIG_PM_DEBUG |
155 | printk(KERN_WARNING "smsg_pm_restore_thaw\n"); | 158 | printk(KERN_WARNING "smsg_pm_restore_thaw\n"); |
156 | #endif | 159 | #endif |
157 | if (smsg_path) { | 160 | if (smsg_path && iucv_path_connected) { |
158 | memset(smsg_path, 0, sizeof(*smsg_path)); | 161 | memset(smsg_path, 0, sizeof(*smsg_path)); |
159 | smsg_path->msglim = 255; | 162 | smsg_path->msglim = 255; |
160 | smsg_path->flags = 0; | 163 | smsg_path->flags = 0; |
@@ -165,6 +168,8 @@ static int smsg_pm_restore_thaw(struct device *dev) | |||
165 | printk(KERN_ERR | 168 | printk(KERN_ERR |
166 | "iucv_path_connect returned with rc %i\n", rc); | 169 | "iucv_path_connect returned with rc %i\n", rc); |
167 | #endif | 170 | #endif |
171 | if (!rc) | ||
172 | iucv_path_connected = 1; | ||
168 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 173 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
169 | } | 174 | } |
170 | return 0; | 175 | return 0; |
@@ -214,6 +219,8 @@ static int __init smsg_init(void) | |||
214 | NULL, NULL, NULL); | 219 | NULL, NULL, NULL); |
215 | if (rc) | 220 | if (rc) |
216 | goto out_free_path; | 221 | goto out_free_path; |
222 | else | ||
223 | iucv_path_connected = 1; | ||
217 | smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); | 224 | smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
218 | if (!smsg_dev) { | 225 | if (!smsg_dev) { |
219 | rc = -ENOMEM; | 226 | rc = -ENOMEM; |
diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c index 137688790207..4d2ea4000422 100644 --- a/drivers/s390/net/smsgiucv_app.c +++ b/drivers/s390/net/smsgiucv_app.c | |||
@@ -180,6 +180,13 @@ static int __init smsgiucv_app_init(void) | |||
180 | goto fail_put_driver; | 180 | goto fail_put_driver; |
181 | } | 181 | } |
182 | 182 | ||
183 | /* convert sender to uppercase characters */ | ||
184 | if (sender) { | ||
185 | int len = strlen(sender); | ||
186 | while (len--) | ||
187 | sender[len] = toupper(sender[len]); | ||
188 | } | ||
189 | |||
183 | /* register with the smsgiucv device driver */ | 190 | /* register with the smsgiucv device driver */ |
184 | rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback); | 191 | rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback); |
185 | if (rc) { | 192 | if (rc) { |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index e331df2122f7..96fa1f536394 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -98,13 +98,11 @@ static void __init zfcp_init_device_setup(char *devstr) | |||
98 | u64 wwpn, lun; | 98 | u64 wwpn, lun; |
99 | 99 | ||
100 | /* duplicate devstr and keep the original for sysfs presentation*/ | 100 | /* duplicate devstr and keep the original for sysfs presentation*/ |
101 | str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL); | 101 | str_saved = kstrdup(devstr, GFP_KERNEL); |
102 | str = str_saved; | 102 | str = str_saved; |
103 | if (!str) | 103 | if (!str) |
104 | return; | 104 | return; |
105 | 105 | ||
106 | strcpy(str, devstr); | ||
107 | |||
108 | token = strsep(&str, ","); | 106 | token = strsep(&str, ","); |
109 | if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) | 107 | if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) |
110 | goto err_out; | 108 | goto err_out; |
@@ -314,7 +312,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) | |||
314 | } | 312 | } |
315 | retval = -EINVAL; | 313 | retval = -EINVAL; |
316 | 314 | ||
317 | INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); | 315 | INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work); |
318 | 316 | ||
319 | spin_lock_init(&unit->latencies.lock); | 317 | spin_lock_init(&unit->latencies.lock); |
320 | unit->latencies.write.channel.min = 0xFFFFFFFF; | 318 | unit->latencies.write.channel.min = 0xFFFFFFFF; |
@@ -526,6 +524,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
526 | rwlock_init(&adapter->port_list_lock); | 524 | rwlock_init(&adapter->port_list_lock); |
527 | INIT_LIST_HEAD(&adapter->port_list); | 525 | INIT_LIST_HEAD(&adapter->port_list); |
528 | 526 | ||
527 | INIT_LIST_HEAD(&adapter->events.list); | ||
528 | INIT_WORK(&adapter->events.work, zfcp_fc_post_event); | ||
529 | spin_lock_init(&adapter->events.list_lock); | ||
530 | |||
529 | init_waitqueue_head(&adapter->erp_ready_wq); | 531 | init_waitqueue_head(&adapter->erp_ready_wq); |
530 | init_waitqueue_head(&adapter->erp_done_wqh); | 532 | init_waitqueue_head(&adapter->erp_done_wqh); |
531 | 533 | ||
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index 1a2db0a35737..fcbd2b756da4 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c | |||
@@ -189,18 +189,12 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, | |||
189 | if (!fsf_cfdc) | 189 | if (!fsf_cfdc) |
190 | return -ENOMEM; | 190 | return -ENOMEM; |
191 | 191 | ||
192 | data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL); | 192 | data = memdup_user(data_user, sizeof(*data_user)); |
193 | if (!data) { | 193 | if (IS_ERR(data)) { |
194 | retval = -ENOMEM; | 194 | retval = PTR_ERR(data); |
195 | goto no_mem_sense; | 195 | goto no_mem_sense; |
196 | } | 196 | } |
197 | 197 | ||
198 | retval = copy_from_user(data, data_user, sizeof(*data)); | ||
199 | if (retval) { | ||
200 | retval = -EFAULT; | ||
201 | goto free_buffer; | ||
202 | } | ||
203 | |||
204 | if (data->signature != 0xCFDCACDF) { | 198 | if (data->signature != 0xCFDCACDF) { |
205 | retval = -EINVAL; | 199 | retval = -EINVAL; |
206 | goto free_buffer; | 200 | goto free_buffer; |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 075852f6968c..a86117b0d6e1 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -155,6 +155,8 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, | |||
155 | if (scsi_cmnd) { | 155 | if (scsi_cmnd) { |
156 | response->u.fcp.cmnd = (unsigned long)scsi_cmnd; | 156 | response->u.fcp.cmnd = (unsigned long)scsi_cmnd; |
157 | response->u.fcp.serial = scsi_cmnd->serial_number; | 157 | response->u.fcp.serial = scsi_cmnd->serial_number; |
158 | response->u.fcp.data_dir = | ||
159 | qtcb->bottom.io.data_direction; | ||
158 | } | 160 | } |
159 | break; | 161 | break; |
160 | 162 | ||
@@ -326,6 +328,7 @@ static void zfcp_dbf_hba_view_response(char **p, | |||
326 | case FSF_QTCB_FCP_CMND: | 328 | case FSF_QTCB_FCP_CMND: |
327 | if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) | 329 | if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) |
328 | break; | 330 | break; |
331 | zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir); | ||
329 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); | 332 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); |
330 | zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); | 333 | zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); |
331 | *p += sprintf(*p, "\n"); | 334 | *p += sprintf(*p, "\n"); |
@@ -1005,7 +1008,7 @@ int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) | |||
1005 | char dbf_name[DEBUG_MAX_NAME_LEN]; | 1008 | char dbf_name[DEBUG_MAX_NAME_LEN]; |
1006 | struct zfcp_dbf *dbf; | 1009 | struct zfcp_dbf *dbf; |
1007 | 1010 | ||
1008 | dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); | 1011 | dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); |
1009 | if (!dbf) | 1012 | if (!dbf) |
1010 | return -ENOMEM; | 1013 | return -ENOMEM; |
1011 | 1014 | ||
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 457e046f2d28..2bcc3403126a 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h | |||
@@ -111,6 +111,7 @@ struct zfcp_dbf_hba_record_response { | |||
111 | struct { | 111 | struct { |
112 | u64 cmnd; | 112 | u64 cmnd; |
113 | u64 serial; | 113 | u64 serial; |
114 | u32 data_dir; | ||
114 | } fcp; | 115 | } fcp; |
115 | struct { | 116 | struct { |
116 | u64 wwpn; | 117 | u64 wwpn; |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9fa1b064893e..e1c6b6e05a75 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/ebcdic.h> | 37 | #include <asm/ebcdic.h> |
38 | #include <asm/sysinfo.h> | 38 | #include <asm/sysinfo.h> |
39 | #include "zfcp_fsf.h" | 39 | #include "zfcp_fsf.h" |
40 | #include "zfcp_fc.h" | ||
40 | #include "zfcp_qdio.h" | 41 | #include "zfcp_qdio.h" |
41 | 42 | ||
42 | struct zfcp_reqlist; | 43 | struct zfcp_reqlist; |
@@ -72,10 +73,12 @@ struct zfcp_reqlist; | |||
72 | 73 | ||
73 | /* adapter status */ | 74 | /* adapter status */ |
74 | #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 | 75 | #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 |
76 | #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 | ||
75 | #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 | 77 | #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 |
76 | #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 | 78 | #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 |
77 | #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 | 79 | #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 |
78 | #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 | 80 | #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 |
81 | #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400 | ||
79 | 82 | ||
80 | /* remote port status */ | 83 | /* remote port status */ |
81 | #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 | 84 | #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 |
@@ -190,6 +193,7 @@ struct zfcp_adapter { | |||
190 | struct service_level service_level; | 193 | struct service_level service_level; |
191 | struct workqueue_struct *work_queue; | 194 | struct workqueue_struct *work_queue; |
192 | struct device_dma_parameters dma_parms; | 195 | struct device_dma_parameters dma_parms; |
196 | struct zfcp_fc_events events; | ||
193 | }; | 197 | }; |
194 | 198 | ||
195 | struct zfcp_port { | 199 | struct zfcp_port { |
@@ -212,6 +216,7 @@ struct zfcp_port { | |||
212 | struct work_struct test_link_work; | 216 | struct work_struct test_link_work; |
213 | struct work_struct rport_work; | 217 | struct work_struct rport_work; |
214 | enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task; | 218 | enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task; |
219 | unsigned int starget_id; | ||
215 | }; | 220 | }; |
216 | 221 | ||
217 | struct zfcp_unit { | 222 | struct zfcp_unit { |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index fd068bc1bd0a..160b432c907f 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -141,9 +141,13 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, | |||
141 | if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) | 141 | if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) |
142 | need = ZFCP_ERP_ACTION_REOPEN_PORT; | 142 | need = ZFCP_ERP_ACTION_REOPEN_PORT; |
143 | /* fall through */ | 143 | /* fall through */ |
144 | case ZFCP_ERP_ACTION_REOPEN_PORT: | ||
145 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 144 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
146 | p_status = atomic_read(&port->status); | 145 | p_status = atomic_read(&port->status); |
146 | if (!(p_status & ZFCP_STATUS_COMMON_OPEN)) | ||
147 | need = ZFCP_ERP_ACTION_REOPEN_PORT; | ||
148 | /* fall through */ | ||
149 | case ZFCP_ERP_ACTION_REOPEN_PORT: | ||
150 | p_status = atomic_read(&port->status); | ||
147 | if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) | 151 | if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) |
148 | return 0; | 152 | return 0; |
149 | a_status = atomic_read(&adapter->status); | 153 | a_status = atomic_read(&adapter->status); |
@@ -893,8 +897,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) | |||
893 | } | 897 | } |
894 | if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { | 898 | if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { |
895 | port->d_id = 0; | 899 | port->d_id = 0; |
896 | _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL); | 900 | return ZFCP_ERP_FAILED; |
897 | return ZFCP_ERP_EXIT; | ||
898 | } | 901 | } |
899 | /* fall through otherwise */ | 902 | /* fall through otherwise */ |
900 | } | 903 | } |
@@ -1188,19 +1191,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) | |||
1188 | 1191 | ||
1189 | switch (act->action) { | 1192 | switch (act->action) { |
1190 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 1193 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
1191 | if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { | ||
1192 | get_device(&unit->dev); | ||
1193 | if (scsi_queue_work(unit->port->adapter->scsi_host, | ||
1194 | &unit->scsi_work) <= 0) | ||
1195 | put_device(&unit->dev); | ||
1196 | } | ||
1197 | put_device(&unit->dev); | 1194 | put_device(&unit->dev); |
1198 | break; | 1195 | break; |
1199 | 1196 | ||
1200 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | ||
1201 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 1197 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
1202 | if (result == ZFCP_ERP_SUCCEEDED) | 1198 | if (result == ZFCP_ERP_SUCCEEDED) |
1203 | zfcp_scsi_schedule_rport_register(port); | 1199 | zfcp_scsi_schedule_rport_register(port); |
1200 | /* fall through */ | ||
1201 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | ||
1204 | put_device(&port->dev); | 1202 | put_device(&port->dev); |
1205 | break; | 1203 | break; |
1206 | 1204 | ||
@@ -1247,6 +1245,11 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1247 | goto unlock; | 1245 | goto unlock; |
1248 | } | 1246 | } |
1249 | 1247 | ||
1248 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { | ||
1249 | retval = ZFCP_ERP_FAILED; | ||
1250 | goto check_target; | ||
1251 | } | ||
1252 | |||
1250 | zfcp_erp_action_to_running(erp_action); | 1253 | zfcp_erp_action_to_running(erp_action); |
1251 | 1254 | ||
1252 | /* no lock to allow for blocking operations */ | 1255 | /* no lock to allow for blocking operations */ |
@@ -1279,6 +1282,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1279 | goto unlock; | 1282 | goto unlock; |
1280 | } | 1283 | } |
1281 | 1284 | ||
1285 | check_target: | ||
1282 | retval = zfcp_erp_strategy_check_target(erp_action, retval); | 1286 | retval = zfcp_erp_strategy_check_target(erp_action, retval); |
1283 | zfcp_erp_action_dequeue(erp_action); | 1287 | zfcp_erp_action_dequeue(erp_action); |
1284 | retval = zfcp_erp_strategy_statechange(erp_action, retval); | 1288 | retval = zfcp_erp_strategy_statechange(erp_action, retval); |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 48a8f93b72f5..3b93239c6f69 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -96,6 +96,9 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, | |||
96 | extern void zfcp_erp_timeout_handler(unsigned long); | 96 | extern void zfcp_erp_timeout_handler(unsigned long); |
97 | 97 | ||
98 | /* zfcp_fc.c */ | 98 | /* zfcp_fc.c */ |
99 | extern void zfcp_fc_enqueue_event(struct zfcp_adapter *, | ||
100 | enum fc_host_event_code event_code, u32); | ||
101 | extern void zfcp_fc_post_event(struct work_struct *); | ||
99 | extern void zfcp_fc_scan_ports(struct work_struct *); | 102 | extern void zfcp_fc_scan_ports(struct work_struct *); |
100 | extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); | 103 | extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); |
101 | extern void zfcp_fc_port_did_lookup(struct work_struct *); | 104 | extern void zfcp_fc_port_did_lookup(struct work_struct *); |
@@ -146,9 +149,10 @@ extern void zfcp_qdio_destroy(struct zfcp_qdio *); | |||
146 | extern int zfcp_qdio_sbal_get(struct zfcp_qdio *); | 149 | extern int zfcp_qdio_sbal_get(struct zfcp_qdio *); |
147 | extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); | 150 | extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); |
148 | extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *, | 151 | extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *, |
149 | struct scatterlist *, int); | 152 | struct scatterlist *); |
150 | extern int zfcp_qdio_open(struct zfcp_qdio *); | 153 | extern int zfcp_qdio_open(struct zfcp_qdio *); |
151 | extern void zfcp_qdio_close(struct zfcp_qdio *); | 154 | extern void zfcp_qdio_close(struct zfcp_qdio *); |
155 | extern void zfcp_qdio_siosl(struct zfcp_adapter *); | ||
152 | 156 | ||
153 | /* zfcp_scsi.c */ | 157 | /* zfcp_scsi.c */ |
154 | extern struct zfcp_data zfcp_data; | 158 | extern struct zfcp_data zfcp_data; |
@@ -159,7 +163,10 @@ extern void zfcp_scsi_rport_work(struct work_struct *); | |||
159 | extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); | 163 | extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); |
160 | extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); | 164 | extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); |
161 | extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); | 165 | extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); |
162 | extern void zfcp_scsi_scan(struct work_struct *); | 166 | extern void zfcp_scsi_scan(struct zfcp_unit *); |
167 | extern void zfcp_scsi_scan_work(struct work_struct *); | ||
168 | extern void zfcp_scsi_set_prot(struct zfcp_adapter *); | ||
169 | extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); | ||
163 | 170 | ||
164 | /* zfcp_sysfs.c */ | 171 | /* zfcp_sysfs.c */ |
165 | extern struct attribute_group zfcp_sysfs_unit_attrs; | 172 | extern struct attribute_group zfcp_sysfs_unit_attrs; |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6f8ab43a4856..6f3ed2b9a349 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -23,6 +23,58 @@ static u32 zfcp_fc_rscn_range_mask[] = { | |||
23 | [ELS_ADDR_FMT_FAB] = 0x000000, | 23 | [ELS_ADDR_FMT_FAB] = 0x000000, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | /** | ||
27 | * zfcp_fc_post_event - post event to userspace via fc_transport | ||
28 | * @work: work struct with enqueued events | ||
29 | */ | ||
30 | void zfcp_fc_post_event(struct work_struct *work) | ||
31 | { | ||
32 | struct zfcp_fc_event *event = NULL, *tmp = NULL; | ||
33 | LIST_HEAD(tmp_lh); | ||
34 | struct zfcp_fc_events *events = container_of(work, | ||
35 | struct zfcp_fc_events, work); | ||
36 | struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter, | ||
37 | events); | ||
38 | |||
39 | spin_lock_bh(&events->list_lock); | ||
40 | list_splice_init(&events->list, &tmp_lh); | ||
41 | spin_unlock_bh(&events->list_lock); | ||
42 | |||
43 | list_for_each_entry_safe(event, tmp, &tmp_lh, list) { | ||
44 | fc_host_post_event(adapter->scsi_host, fc_get_event_number(), | ||
45 | event->code, event->data); | ||
46 | list_del(&event->list); | ||
47 | kfree(event); | ||
48 | } | ||
49 | |||
50 | } | ||
51 | |||
52 | /** | ||
53 | * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context | ||
54 | * @adapter: The adapter where to enqueue the event | ||
55 | * @event_code: The event code (as defined in fc_host_event_code in | ||
56 | * scsi_transport_fc.h) | ||
57 | * @event_data: The event data (e.g. n_port page in case of els) | ||
58 | */ | ||
59 | void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter, | ||
60 | enum fc_host_event_code event_code, u32 event_data) | ||
61 | { | ||
62 | struct zfcp_fc_event *event; | ||
63 | |||
64 | event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC); | ||
65 | if (!event) | ||
66 | return; | ||
67 | |||
68 | event->code = event_code; | ||
69 | event->data = event_data; | ||
70 | |||
71 | spin_lock(&adapter->events.list_lock); | ||
72 | list_add_tail(&event->list, &adapter->events.list); | ||
73 | spin_unlock(&adapter->events.list_lock); | ||
74 | |||
75 | queue_work(adapter->work_queue, &adapter->events.work); | ||
76 | } | ||
77 | |||
26 | static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) | 78 | static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) |
27 | { | 79 | { |
28 | if (mutex_lock_interruptible(&wka_port->mutex)) | 80 | if (mutex_lock_interruptible(&wka_port->mutex)) |
@@ -148,6 +200,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) | |||
148 | afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK; | 200 | afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK; |
149 | _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt], | 201 | _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt], |
150 | page); | 202 | page); |
203 | zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN, | ||
204 | *(u32 *)page); | ||
151 | } | 205 | } |
152 | queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); | 206 | queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); |
153 | } | 207 | } |
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 0747b087390d..938d50360166 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h | |||
@@ -30,6 +30,30 @@ | |||
30 | #define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) | 30 | #define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context | ||
34 | * @code: Event code | ||
35 | * @data: Event data | ||
36 | * @list: list_head for zfcp_fc_events list | ||
37 | */ | ||
38 | struct zfcp_fc_event { | ||
39 | enum fc_host_event_code code; | ||
40 | u32 data; | ||
41 | struct list_head list; | ||
42 | }; | ||
43 | |||
44 | /** | ||
45 | * struct zfcp_fc_events - Infrastructure for posting FC events from irq context | ||
46 | * @list: List for queueing of events from irq context to workqueue | ||
47 | * @list_lock: Lock for event list | ||
48 | * @work: work_struct for forwarding events in workqueue | ||
49 | */ | ||
50 | struct zfcp_fc_events { | ||
51 | struct list_head list; | ||
52 | spinlock_t list_lock; | ||
53 | struct work_struct work; | ||
54 | }; | ||
55 | |||
56 | /** | ||
33 | * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request | 57 | * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request |
34 | * @ct_hdr: FC GS common transport header | 58 | * @ct_hdr: FC GS common transport header |
35 | * @gid_pn: GID_PN request | 59 | * @gid_pn: GID_PN request |
@@ -196,6 +220,9 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) | |||
196 | memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len); | 220 | memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len); |
197 | 221 | ||
198 | fcp->fc_dl = scsi_bufflen(scsi); | 222 | fcp->fc_dl = scsi_bufflen(scsi); |
223 | |||
224 | if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1) | ||
225 | fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8; | ||
199 | } | 226 | } |
200 | 227 | ||
201 | /** | 228 | /** |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 71663fb77310..9d1d7d1842ce 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -21,6 +21,7 @@ | |||
21 | static void zfcp_fsf_request_timeout_handler(unsigned long data) | 21 | static void zfcp_fsf_request_timeout_handler(unsigned long data) |
22 | { | 22 | { |
23 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; | 23 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; |
24 | zfcp_qdio_siosl(adapter); | ||
24 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 25 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
25 | "fsrth_1", NULL); | 26 | "fsrth_1", NULL); |
26 | } | 27 | } |
@@ -274,6 +275,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
274 | break; | 275 | break; |
275 | case FSF_STATUS_READ_LINK_DOWN: | 276 | case FSF_STATUS_READ_LINK_DOWN: |
276 | zfcp_fsf_status_read_link_down(req); | 277 | zfcp_fsf_status_read_link_down(req); |
278 | zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); | ||
277 | break; | 279 | break; |
278 | case FSF_STATUS_READ_LINK_UP: | 280 | case FSF_STATUS_READ_LINK_UP: |
279 | dev_info(&adapter->ccw_device->dev, | 281 | dev_info(&adapter->ccw_device->dev, |
@@ -286,6 +288,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
286 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | 288 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
287 | ZFCP_STATUS_COMMON_ERP_FAILED, | 289 | ZFCP_STATUS_COMMON_ERP_FAILED, |
288 | "fssrh_2", req); | 290 | "fssrh_2", req); |
291 | zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); | ||
292 | |||
289 | break; | 293 | break; |
290 | case FSF_STATUS_READ_NOTIFICATION_LOST: | 294 | case FSF_STATUS_READ_NOTIFICATION_LOST: |
291 | if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) | 295 | if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) |
@@ -323,6 +327,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) | |||
323 | dev_err(&req->adapter->ccw_device->dev, | 327 | dev_err(&req->adapter->ccw_device->dev, |
324 | "The FCP adapter reported a problem " | 328 | "The FCP adapter reported a problem " |
325 | "that cannot be recovered\n"); | 329 | "that cannot be recovered\n"); |
330 | zfcp_qdio_siosl(req->adapter); | ||
326 | zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); | 331 | zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); |
327 | break; | 332 | break; |
328 | } | 333 | } |
@@ -413,6 +418,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
413 | dev_err(&adapter->ccw_device->dev, | 418 | dev_err(&adapter->ccw_device->dev, |
414 | "0x%x is not a valid transfer protocol status\n", | 419 | "0x%x is not a valid transfer protocol status\n", |
415 | qtcb->prefix.prot_status); | 420 | qtcb->prefix.prot_status); |
421 | zfcp_qdio_siosl(adapter); | ||
416 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); | 422 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); |
417 | } | 423 | } |
418 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 424 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
@@ -495,7 +501,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) | |||
495 | fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; | 501 | fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; |
496 | 502 | ||
497 | adapter->hydra_version = bottom->adapter_type; | 503 | adapter->hydra_version = bottom->adapter_type; |
498 | adapter->timer_ticks = bottom->timer_interval; | 504 | adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; |
499 | adapter->stat_read_buf_num = max(bottom->status_read_buf_num, | 505 | adapter->stat_read_buf_num = max(bottom->status_read_buf_num, |
500 | (u16)FSF_STATUS_READS_RECOM); | 506 | (u16)FSF_STATUS_READS_RECOM); |
501 | 507 | ||
@@ -523,6 +529,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) | |||
523 | return -EIO; | 529 | return -EIO; |
524 | } | 530 | } |
525 | 531 | ||
532 | zfcp_scsi_set_prot(adapter); | ||
533 | |||
526 | return 0; | 534 | return 0; |
527 | } | 535 | } |
528 | 536 | ||
@@ -732,7 +740,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | |||
732 | 740 | ||
733 | zfcp_reqlist_add(adapter->req_list, req); | 741 | zfcp_reqlist_add(adapter->req_list, req); |
734 | 742 | ||
735 | req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); | 743 | req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); |
736 | req->issued = get_clock(); | 744 | req->issued = get_clock(); |
737 | if (zfcp_qdio_send(qdio, &req->qdio_req)) { | 745 | if (zfcp_qdio_send(qdio, &req->qdio_req)) { |
738 | del_timer(&req->timer); | 746 | del_timer(&req->timer); |
@@ -959,8 +967,7 @@ static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, | |||
959 | 967 | ||
960 | static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | 968 | static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, |
961 | struct scatterlist *sg_req, | 969 | struct scatterlist *sg_req, |
962 | struct scatterlist *sg_resp, | 970 | struct scatterlist *sg_resp) |
963 | int max_sbals) | ||
964 | { | 971 | { |
965 | struct zfcp_adapter *adapter = req->adapter; | 972 | struct zfcp_adapter *adapter = req->adapter; |
966 | u32 feat = adapter->adapter_features; | 973 | u32 feat = adapter->adapter_features; |
@@ -983,18 +990,19 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
983 | return 0; | 990 | return 0; |
984 | } | 991 | } |
985 | 992 | ||
986 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, | 993 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req); |
987 | sg_req, max_sbals); | ||
988 | if (bytes <= 0) | 994 | if (bytes <= 0) |
989 | return -EIO; | 995 | return -EIO; |
996 | zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); | ||
990 | req->qtcb->bottom.support.req_buf_length = bytes; | 997 | req->qtcb->bottom.support.req_buf_length = bytes; |
991 | zfcp_qdio_skip_to_last_sbale(&req->qdio_req); | 998 | zfcp_qdio_skip_to_last_sbale(&req->qdio_req); |
992 | 999 | ||
993 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, | 1000 | bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, |
994 | sg_resp, max_sbals); | 1001 | sg_resp); |
995 | req->qtcb->bottom.support.resp_buf_length = bytes; | 1002 | req->qtcb->bottom.support.resp_buf_length = bytes; |
996 | if (bytes <= 0) | 1003 | if (bytes <= 0) |
997 | return -EIO; | 1004 | return -EIO; |
1005 | zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); | ||
998 | 1006 | ||
999 | return 0; | 1007 | return 0; |
1000 | } | 1008 | } |
@@ -1002,11 +1010,11 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1002 | static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, | 1010 | static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, |
1003 | struct scatterlist *sg_req, | 1011 | struct scatterlist *sg_req, |
1004 | struct scatterlist *sg_resp, | 1012 | struct scatterlist *sg_resp, |
1005 | int max_sbals, unsigned int timeout) | 1013 | unsigned int timeout) |
1006 | { | 1014 | { |
1007 | int ret; | 1015 | int ret; |
1008 | 1016 | ||
1009 | ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); | 1017 | ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); |
1010 | if (ret) | 1018 | if (ret) |
1011 | return ret; | 1019 | return ret; |
1012 | 1020 | ||
@@ -1046,8 +1054,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, | |||
1046 | } | 1054 | } |
1047 | 1055 | ||
1048 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1056 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1049 | ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, | 1057 | ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); |
1050 | ZFCP_FSF_MAX_SBALS_PER_REQ, timeout); | ||
1051 | if (ret) | 1058 | if (ret) |
1052 | goto failed_send; | 1059 | goto failed_send; |
1053 | 1060 | ||
@@ -1143,7 +1150,10 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, | |||
1143 | } | 1150 | } |
1144 | 1151 | ||
1145 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1152 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1146 | ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout); | 1153 | |
1154 | zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); | ||
1155 | |||
1156 | ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); | ||
1147 | 1157 | ||
1148 | if (ret) | 1158 | if (ret) |
1149 | goto failed_send; | 1159 | goto failed_send; |
@@ -2025,7 +2035,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
2025 | blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; | 2035 | blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; |
2026 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) | 2036 | if (req->status & ZFCP_STATUS_FSFREQ_ERROR) |
2027 | blktrc.flags |= ZFCP_BLK_REQ_ERROR; | 2037 | blktrc.flags |= ZFCP_BLK_REQ_ERROR; |
2028 | blktrc.inb_usage = req->qdio_req.qdio_inb_usage; | 2038 | blktrc.inb_usage = 0; |
2029 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; | 2039 | blktrc.outb_usage = req->qdio_req.qdio_outb_usage; |
2030 | 2040 | ||
2031 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && | 2041 | if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && |
@@ -2035,9 +2045,13 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) | |||
2035 | blktrc.fabric_lat = lat_in->fabric_lat * ticks; | 2045 | blktrc.fabric_lat = lat_in->fabric_lat * ticks; |
2036 | 2046 | ||
2037 | switch (req->qtcb->bottom.io.data_direction) { | 2047 | switch (req->qtcb->bottom.io.data_direction) { |
2048 | case FSF_DATADIR_DIF_READ_STRIP: | ||
2049 | case FSF_DATADIR_DIF_READ_CONVERT: | ||
2038 | case FSF_DATADIR_READ: | 2050 | case FSF_DATADIR_READ: |
2039 | lat = &unit->latencies.read; | 2051 | lat = &unit->latencies.read; |
2040 | break; | 2052 | break; |
2053 | case FSF_DATADIR_DIF_WRITE_INSERT: | ||
2054 | case FSF_DATADIR_DIF_WRITE_CONVERT: | ||
2041 | case FSF_DATADIR_WRITE: | 2055 | case FSF_DATADIR_WRITE: |
2042 | lat = &unit->latencies.write; | 2056 | lat = &unit->latencies.write; |
2043 | break; | 2057 | break; |
@@ -2078,6 +2092,21 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) | |||
2078 | goto skip_fsfstatus; | 2092 | goto skip_fsfstatus; |
2079 | } | 2093 | } |
2080 | 2094 | ||
2095 | switch (req->qtcb->header.fsf_status) { | ||
2096 | case FSF_INCONSISTENT_PROT_DATA: | ||
2097 | case FSF_INVALID_PROT_PARM: | ||
2098 | set_host_byte(scpnt, DID_ERROR); | ||
2099 | goto skip_fsfstatus; | ||
2100 | case FSF_BLOCK_GUARD_CHECK_FAILURE: | ||
2101 | zfcp_scsi_dif_sense_error(scpnt, 0x1); | ||
2102 | goto skip_fsfstatus; | ||
2103 | case FSF_APP_TAG_CHECK_FAILURE: | ||
2104 | zfcp_scsi_dif_sense_error(scpnt, 0x2); | ||
2105 | goto skip_fsfstatus; | ||
2106 | case FSF_REF_TAG_CHECK_FAILURE: | ||
2107 | zfcp_scsi_dif_sense_error(scpnt, 0x3); | ||
2108 | goto skip_fsfstatus; | ||
2109 | } | ||
2081 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; | 2110 | fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; |
2082 | zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); | 2111 | zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); |
2083 | 2112 | ||
@@ -2187,6 +2216,44 @@ skip_fsfstatus: | |||
2187 | } | 2216 | } |
2188 | } | 2217 | } |
2189 | 2218 | ||
2219 | static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) | ||
2220 | { | ||
2221 | switch (scsi_get_prot_op(scsi_cmnd)) { | ||
2222 | case SCSI_PROT_NORMAL: | ||
2223 | switch (scsi_cmnd->sc_data_direction) { | ||
2224 | case DMA_NONE: | ||
2225 | *data_dir = FSF_DATADIR_CMND; | ||
2226 | break; | ||
2227 | case DMA_FROM_DEVICE: | ||
2228 | *data_dir = FSF_DATADIR_READ; | ||
2229 | break; | ||
2230 | case DMA_TO_DEVICE: | ||
2231 | *data_dir = FSF_DATADIR_WRITE; | ||
2232 | break; | ||
2233 | case DMA_BIDIRECTIONAL: | ||
2234 | return -EINVAL; | ||
2235 | } | ||
2236 | break; | ||
2237 | |||
2238 | case SCSI_PROT_READ_STRIP: | ||
2239 | *data_dir = FSF_DATADIR_DIF_READ_STRIP; | ||
2240 | break; | ||
2241 | case SCSI_PROT_WRITE_INSERT: | ||
2242 | *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; | ||
2243 | break; | ||
2244 | case SCSI_PROT_READ_PASS: | ||
2245 | *data_dir = FSF_DATADIR_DIF_READ_CONVERT; | ||
2246 | break; | ||
2247 | case SCSI_PROT_WRITE_PASS: | ||
2248 | *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; | ||
2249 | break; | ||
2250 | default: | ||
2251 | return -EINVAL; | ||
2252 | } | ||
2253 | |||
2254 | return 0; | ||
2255 | } | ||
2256 | |||
2190 | /** | 2257 | /** |
2191 | * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) | 2258 | * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) |
2192 | * @unit: unit where command is sent to | 2259 | * @unit: unit where command is sent to |
@@ -2198,16 +2265,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2198 | struct zfcp_fsf_req *req; | 2265 | struct zfcp_fsf_req *req; |
2199 | struct fcp_cmnd *fcp_cmnd; | 2266 | struct fcp_cmnd *fcp_cmnd; |
2200 | unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; | 2267 | unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; |
2201 | int real_bytes, retval = -EIO; | 2268 | int real_bytes, retval = -EIO, dix_bytes = 0; |
2202 | struct zfcp_adapter *adapter = unit->port->adapter; | 2269 | struct zfcp_adapter *adapter = unit->port->adapter; |
2203 | struct zfcp_qdio *qdio = adapter->qdio; | 2270 | struct zfcp_qdio *qdio = adapter->qdio; |
2271 | struct fsf_qtcb_bottom_io *io; | ||
2204 | 2272 | ||
2205 | if (unlikely(!(atomic_read(&unit->status) & | 2273 | if (unlikely(!(atomic_read(&unit->status) & |
2206 | ZFCP_STATUS_COMMON_UNBLOCKED))) | 2274 | ZFCP_STATUS_COMMON_UNBLOCKED))) |
2207 | return -EBUSY; | 2275 | return -EBUSY; |
2208 | 2276 | ||
2209 | spin_lock(&qdio->req_q_lock); | 2277 | spin_lock(&qdio->req_q_lock); |
2210 | if (atomic_read(&qdio->req_q.count) <= 0) { | 2278 | if (atomic_read(&qdio->req_q_free) <= 0) { |
2211 | atomic_inc(&qdio->req_q_full); | 2279 | atomic_inc(&qdio->req_q_full); |
2212 | goto out; | 2280 | goto out; |
2213 | } | 2281 | } |
@@ -2223,56 +2291,45 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, | |||
2223 | goto out; | 2291 | goto out; |
2224 | } | 2292 | } |
2225 | 2293 | ||
2294 | scsi_cmnd->host_scribble = (unsigned char *) req->req_id; | ||
2295 | |||
2296 | io = &req->qtcb->bottom.io; | ||
2226 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 2297 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
2227 | req->unit = unit; | 2298 | req->unit = unit; |
2228 | req->data = scsi_cmnd; | 2299 | req->data = scsi_cmnd; |
2229 | req->handler = zfcp_fsf_send_fcp_command_handler; | 2300 | req->handler = zfcp_fsf_send_fcp_command_handler; |
2230 | req->qtcb->header.lun_handle = unit->handle; | 2301 | req->qtcb->header.lun_handle = unit->handle; |
2231 | req->qtcb->header.port_handle = unit->port->handle; | 2302 | req->qtcb->header.port_handle = unit->port->handle; |
2232 | req->qtcb->bottom.io.service_class = FSF_CLASS_3; | 2303 | io->service_class = FSF_CLASS_3; |
2233 | req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; | 2304 | io->fcp_cmnd_length = FCP_CMND_LEN; |
2234 | 2305 | ||
2235 | scsi_cmnd->host_scribble = (unsigned char *) req->req_id; | 2306 | if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { |
2236 | 2307 | io->data_block_length = scsi_cmnd->device->sector_size; | |
2237 | /* | 2308 | io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; |
2238 | * set depending on data direction: | ||
2239 | * data direction bits in SBALE (SB Type) | ||
2240 | * data direction bits in QTCB | ||
2241 | */ | ||
2242 | switch (scsi_cmnd->sc_data_direction) { | ||
2243 | case DMA_NONE: | ||
2244 | req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; | ||
2245 | break; | ||
2246 | case DMA_FROM_DEVICE: | ||
2247 | req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; | ||
2248 | break; | ||
2249 | case DMA_TO_DEVICE: | ||
2250 | req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; | ||
2251 | break; | ||
2252 | case DMA_BIDIRECTIONAL: | ||
2253 | goto failed_scsi_cmnd; | ||
2254 | } | 2309 | } |
2255 | 2310 | ||
2311 | zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); | ||
2312 | |||
2256 | get_device(&unit->dev); | 2313 | get_device(&unit->dev); |
2257 | 2314 | ||
2258 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; | 2315 | fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; |
2259 | zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); | 2316 | zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); |
2260 | 2317 | ||
2318 | if (scsi_prot_sg_count(scsi_cmnd)) { | ||
2319 | zfcp_qdio_set_data_div(qdio, &req->qdio_req, | ||
2320 | scsi_prot_sg_count(scsi_cmnd)); | ||
2321 | dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, | ||
2322 | scsi_prot_sglist(scsi_cmnd)); | ||
2323 | io->prot_data_length = dix_bytes; | ||
2324 | } | ||
2325 | |||
2261 | real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, | 2326 | real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, |
2262 | scsi_sglist(scsi_cmnd), | 2327 | scsi_sglist(scsi_cmnd)); |
2263 | ZFCP_FSF_MAX_SBALS_PER_REQ); | 2328 | |
2264 | if (unlikely(real_bytes < 0)) { | 2329 | if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0)) |
2265 | if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) { | ||
2266 | dev_err(&adapter->ccw_device->dev, | ||
2267 | "Oversize data package, unit 0x%016Lx " | ||
2268 | "on port 0x%016Lx closed\n", | ||
2269 | (unsigned long long)unit->fcp_lun, | ||
2270 | (unsigned long long)unit->port->wwpn); | ||
2271 | zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req); | ||
2272 | retval = -EINVAL; | ||
2273 | } | ||
2274 | goto failed_scsi_cmnd; | 2330 | goto failed_scsi_cmnd; |
2275 | } | 2331 | |
2332 | zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); | ||
2276 | 2333 | ||
2277 | retval = zfcp_fsf_req_send(req); | 2334 | retval = zfcp_fsf_req_send(req); |
2278 | if (unlikely(retval)) | 2335 | if (unlikely(retval)) |
@@ -2391,13 +2448,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2391 | bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; | 2448 | bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; |
2392 | bottom->option = fsf_cfdc->option; | 2449 | bottom->option = fsf_cfdc->option; |
2393 | 2450 | ||
2394 | bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, | 2451 | bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg); |
2395 | fsf_cfdc->sg, | 2452 | |
2396 | ZFCP_FSF_MAX_SBALS_PER_REQ); | ||
2397 | if (bytes != ZFCP_CFDC_MAX_SIZE) { | 2453 | if (bytes != ZFCP_CFDC_MAX_SIZE) { |
2398 | zfcp_fsf_req_free(req); | 2454 | zfcp_fsf_req_free(req); |
2399 | goto out; | 2455 | goto out; |
2400 | } | 2456 | } |
2457 | zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); | ||
2401 | 2458 | ||
2402 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); | 2459 | zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); |
2403 | retval = zfcp_fsf_req_send(req); | 2460 | retval = zfcp_fsf_req_send(req); |
@@ -2419,7 +2476,7 @@ out: | |||
2419 | void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) | 2476 | void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) |
2420 | { | 2477 | { |
2421 | struct zfcp_adapter *adapter = qdio->adapter; | 2478 | struct zfcp_adapter *adapter = qdio->adapter; |
2422 | struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; | 2479 | struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; |
2423 | struct qdio_buffer_element *sbale; | 2480 | struct qdio_buffer_element *sbale; |
2424 | struct zfcp_fsf_req *fsf_req; | 2481 | struct zfcp_fsf_req *fsf_req; |
2425 | unsigned long req_id; | 2482 | unsigned long req_id; |
@@ -2431,17 +2488,17 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) | |||
2431 | req_id = (unsigned long) sbale->addr; | 2488 | req_id = (unsigned long) sbale->addr; |
2432 | fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); | 2489 | fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); |
2433 | 2490 | ||
2434 | if (!fsf_req) | 2491 | if (!fsf_req) { |
2435 | /* | 2492 | /* |
2436 | * Unknown request means that we have potentially memory | 2493 | * Unknown request means that we have potentially memory |
2437 | * corruption and must stop the machine immediately. | 2494 | * corruption and must stop the machine immediately. |
2438 | */ | 2495 | */ |
2496 | zfcp_qdio_siosl(adapter); | ||
2439 | panic("error: unknown req_id (%lx) on adapter %s.\n", | 2497 | panic("error: unknown req_id (%lx) on adapter %s.\n", |
2440 | req_id, dev_name(&adapter->ccw_device->dev)); | 2498 | req_id, dev_name(&adapter->ccw_device->dev)); |
2499 | } | ||
2441 | 2500 | ||
2442 | fsf_req->qdio_req.sbal_response = sbal_idx; | 2501 | fsf_req->qdio_req.sbal_response = sbal_idx; |
2443 | fsf_req->qdio_req.qdio_inb_usage = | ||
2444 | atomic_read(&qdio->resp_q.count); | ||
2445 | zfcp_fsf_req_complete(fsf_req); | 2502 | zfcp_fsf_req_complete(fsf_req); |
2446 | 2503 | ||
2447 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) | 2504 | if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) |
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 519083fd6e89..db8c85382dca 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h | |||
@@ -80,11 +80,15 @@ | |||
80 | #define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 | 80 | #define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 |
81 | #define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 | 81 | #define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 |
82 | #define FSF_SBAL_MISMATCH 0x00000063 | 82 | #define FSF_SBAL_MISMATCH 0x00000063 |
83 | #define FSF_INCONSISTENT_PROT_DATA 0x00000070 | ||
84 | #define FSF_INVALID_PROT_PARM 0x00000071 | ||
85 | #define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081 | ||
86 | #define FSF_APP_TAG_CHECK_FAILURE 0x00000082 | ||
87 | #define FSF_REF_TAG_CHECK_FAILURE 0x00000083 | ||
83 | #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD | 88 | #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD |
84 | #define FSF_UNKNOWN_COMMAND 0x000000E2 | 89 | #define FSF_UNKNOWN_COMMAND 0x000000E2 |
85 | #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 | 90 | #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 |
86 | #define FSF_INVALID_COMMAND_OPTION 0x000000E5 | 91 | #define FSF_INVALID_COMMAND_OPTION 0x000000E5 |
87 | /* #define FSF_ERROR 0x000000FF */ | ||
88 | 92 | ||
89 | #define FSF_PROT_STATUS_QUAL_SIZE 16 | 93 | #define FSF_PROT_STATUS_QUAL_SIZE 16 |
90 | #define FSF_STATUS_QUALIFIER_SIZE 16 | 94 | #define FSF_STATUS_QUALIFIER_SIZE 16 |
@@ -147,18 +151,17 @@ | |||
147 | #define FSF_DATADIR_WRITE 0x00000001 | 151 | #define FSF_DATADIR_WRITE 0x00000001 |
148 | #define FSF_DATADIR_READ 0x00000002 | 152 | #define FSF_DATADIR_READ 0x00000002 |
149 | #define FSF_DATADIR_CMND 0x00000004 | 153 | #define FSF_DATADIR_CMND 0x00000004 |
154 | #define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009 | ||
155 | #define FSF_DATADIR_DIF_READ_STRIP 0x0000000a | ||
156 | #define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b | ||
157 | #define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c | ||
158 | |||
159 | /* data protection control flags */ | ||
160 | #define FSF_APP_TAG_CHECK_ENABLE 0x10 | ||
150 | 161 | ||
151 | /* fc service class */ | 162 | /* fc service class */ |
152 | #define FSF_CLASS_3 0x00000003 | 163 | #define FSF_CLASS_3 0x00000003 |
153 | 164 | ||
154 | /* SBAL chaining */ | ||
155 | #define ZFCP_FSF_MAX_SBALS_PER_REQ 36 | ||
156 | |||
157 | /* max. number of (data buffer) SBALEs in largest SBAL chain | ||
158 | * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ | ||
159 | #define ZFCP_FSF_MAX_SBALES_PER_REQ \ | ||
160 | (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2) | ||
161 | |||
162 | /* logging space behind QTCB */ | 165 | /* logging space behind QTCB */ |
163 | #define FSF_QTCB_LOG_SIZE 1024 | 166 | #define FSF_QTCB_LOG_SIZE 1024 |
164 | 167 | ||
@@ -170,6 +173,8 @@ | |||
170 | #define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 | 173 | #define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 |
171 | #define FSF_FEATURE_UPDATE_ALERT 0x00000100 | 174 | #define FSF_FEATURE_UPDATE_ALERT 0x00000100 |
172 | #define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 | 175 | #define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 |
176 | #define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000 | ||
177 | #define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000 | ||
173 | 178 | ||
174 | /* host connection features */ | 179 | /* host connection features */ |
175 | #define FSF_FEATURE_NPIV_MODE 0x00000001 | 180 | #define FSF_FEATURE_NPIV_MODE 0x00000001 |
@@ -324,9 +329,14 @@ struct fsf_qtcb_header { | |||
324 | struct fsf_qtcb_bottom_io { | 329 | struct fsf_qtcb_bottom_io { |
325 | u32 data_direction; | 330 | u32 data_direction; |
326 | u32 service_class; | 331 | u32 service_class; |
327 | u8 res1[8]; | 332 | u8 res1; |
333 | u8 data_prot_flags; | ||
334 | u16 app_tag_value; | ||
335 | u32 ref_tag_value; | ||
328 | u32 fcp_cmnd_length; | 336 | u32 fcp_cmnd_length; |
329 | u8 res2[12]; | 337 | u32 data_block_length; |
338 | u32 prot_data_length; | ||
339 | u8 res2[4]; | ||
330 | u8 fcp_cmnd[FSF_FCP_CMND_SIZE]; | 340 | u8 fcp_cmnd[FSF_FCP_CMND_SIZE]; |
331 | u8 fcp_rsp[FSF_FCP_RSP_SIZE]; | 341 | u8 fcp_rsp[FSF_FCP_RSP_SIZE]; |
332 | u8 res3[64]; | 342 | u8 res3[64]; |
@@ -352,6 +362,8 @@ struct fsf_qtcb_bottom_support { | |||
352 | u8 els[256]; | 362 | u8 els[256]; |
353 | } __attribute__ ((packed)); | 363 | } __attribute__ ((packed)); |
354 | 364 | ||
365 | #define ZFCP_FSF_TIMER_INT_MASK 0x3FFF | ||
366 | |||
355 | struct fsf_qtcb_bottom_config { | 367 | struct fsf_qtcb_bottom_config { |
356 | u32 lic_version; | 368 | u32 lic_version; |
357 | u32 feature_selection; | 369 | u32 feature_selection; |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 6fa5e0453176..b2635759721c 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -30,12 +30,15 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) | |||
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) | 33 | static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, |
34 | unsigned int qdio_err) | ||
34 | { | 35 | { |
35 | struct zfcp_adapter *adapter = qdio->adapter; | 36 | struct zfcp_adapter *adapter = qdio->adapter; |
36 | 37 | ||
37 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); | 38 | dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); |
38 | 39 | ||
40 | if (qdio_err & QDIO_ERROR_SLSB_STATE) | ||
41 | zfcp_qdio_siosl(adapter); | ||
39 | zfcp_erp_adapter_reopen(adapter, | 42 | zfcp_erp_adapter_reopen(adapter, |
40 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | 43 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
41 | ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); | 44 | ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); |
@@ -55,72 +58,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) | |||
55 | static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) | 58 | static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) |
56 | { | 59 | { |
57 | unsigned long long now, span; | 60 | unsigned long long now, span; |
58 | int free, used; | 61 | int used; |
59 | 62 | ||
60 | spin_lock(&qdio->stat_lock); | 63 | spin_lock(&qdio->stat_lock); |
61 | now = get_clock_monotonic(); | 64 | now = get_clock_monotonic(); |
62 | span = (now - qdio->req_q_time) >> 12; | 65 | span = (now - qdio->req_q_time) >> 12; |
63 | free = atomic_read(&qdio->req_q.count); | 66 | used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); |
64 | used = QDIO_MAX_BUFFERS_PER_Q - free; | ||
65 | qdio->req_q_util += used * span; | 67 | qdio->req_q_util += used * span; |
66 | qdio->req_q_time = now; | 68 | qdio->req_q_time = now; |
67 | spin_unlock(&qdio->stat_lock); | 69 | spin_unlock(&qdio->stat_lock); |
68 | } | 70 | } |
69 | 71 | ||
70 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | 72 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, |
71 | int queue_no, int first, int count, | 73 | int queue_no, int idx, int count, |
72 | unsigned long parm) | 74 | unsigned long parm) |
73 | { | 75 | { |
74 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; | 76 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
75 | struct zfcp_qdio_queue *queue = &qdio->req_q; | ||
76 | 77 | ||
77 | if (unlikely(qdio_err)) { | 78 | if (unlikely(qdio_err)) { |
78 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, | 79 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count); |
79 | count); | 80 | zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); |
80 | zfcp_qdio_handler_error(qdio, "qdireq1"); | ||
81 | return; | 81 | return; |
82 | } | 82 | } |
83 | 83 | ||
84 | /* cleanup all SBALs being program-owned now */ | 84 | /* cleanup all SBALs being program-owned now */ |
85 | zfcp_qdio_zero_sbals(queue->sbal, first, count); | 85 | zfcp_qdio_zero_sbals(qdio->req_q, idx, count); |
86 | 86 | ||
87 | zfcp_qdio_account(qdio); | 87 | zfcp_qdio_account(qdio); |
88 | atomic_add(count, &queue->count); | 88 | atomic_add(count, &qdio->req_q_free); |
89 | wake_up(&qdio->req_q_wq); | 89 | wake_up(&qdio->req_q_wq); |
90 | } | 90 | } |
91 | 91 | ||
92 | static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) | ||
93 | { | ||
94 | struct zfcp_qdio_queue *queue = &qdio->resp_q; | ||
95 | struct ccw_device *cdev = qdio->adapter->ccw_device; | ||
96 | u8 count, start = queue->first; | ||
97 | unsigned int retval; | ||
98 | |||
99 | count = atomic_read(&queue->count) + processed; | ||
100 | |||
101 | retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count); | ||
102 | |||
103 | if (unlikely(retval)) { | ||
104 | atomic_set(&queue->count, count); | ||
105 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL); | ||
106 | } else { | ||
107 | queue->first += count; | ||
108 | queue->first %= QDIO_MAX_BUFFERS_PER_Q; | ||
109 | atomic_set(&queue->count, 0); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | 92 | static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, |
114 | int queue_no, int first, int count, | 93 | int queue_no, int idx, int count, |
115 | unsigned long parm) | 94 | unsigned long parm) |
116 | { | 95 | { |
117 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; | 96 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
118 | int sbal_idx, sbal_no; | 97 | int sbal_idx, sbal_no; |
119 | 98 | ||
120 | if (unlikely(qdio_err)) { | 99 | if (unlikely(qdio_err)) { |
121 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, | 100 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count); |
122 | count); | 101 | zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); |
123 | zfcp_qdio_handler_error(qdio, "qdires1"); | ||
124 | return; | 102 | return; |
125 | } | 103 | } |
126 | 104 | ||
@@ -129,25 +107,16 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
129 | * returned by QDIO layer | 107 | * returned by QDIO layer |
130 | */ | 108 | */ |
131 | for (sbal_no = 0; sbal_no < count; sbal_no++) { | 109 | for (sbal_no = 0; sbal_no < count; sbal_no++) { |
132 | sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; | 110 | sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; |
133 | /* go through all SBALEs of SBAL */ | 111 | /* go through all SBALEs of SBAL */ |
134 | zfcp_fsf_reqid_check(qdio, sbal_idx); | 112 | zfcp_fsf_reqid_check(qdio, sbal_idx); |
135 | } | 113 | } |
136 | 114 | ||
137 | /* | 115 | /* |
138 | * put range of SBALs back to response queue | 116 | * put SBALs back to response queue |
139 | * (including SBALs which have already been free before) | ||
140 | */ | 117 | */ |
141 | zfcp_qdio_resp_put_back(qdio, count); | 118 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) |
142 | } | 119 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL); |
143 | |||
144 | static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, | ||
145 | struct zfcp_qdio_req *q_req, int max_sbals) | ||
146 | { | ||
147 | int count = atomic_read(&qdio->req_q.count); | ||
148 | count = min(count, max_sbals); | ||
149 | q_req->sbal_limit = (q_req->sbal_first + count - 1) | ||
150 | % QDIO_MAX_BUFFERS_PER_Q; | ||
151 | } | 120 | } |
152 | 121 | ||
153 | static struct qdio_buffer_element * | 122 | static struct qdio_buffer_element * |
@@ -173,6 +142,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | |||
173 | 142 | ||
174 | /* keep this requests number of SBALs up-to-date */ | 143 | /* keep this requests number of SBALs up-to-date */ |
175 | q_req->sbal_number++; | 144 | q_req->sbal_number++; |
145 | BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ); | ||
176 | 146 | ||
177 | /* start at first SBALE of new SBAL */ | 147 | /* start at first SBALE of new SBAL */ |
178 | q_req->sbale_curr = 0; | 148 | q_req->sbale_curr = 0; |
@@ -193,17 +163,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | |||
193 | return zfcp_qdio_sbale_curr(qdio, q_req); | 163 | return zfcp_qdio_sbale_curr(qdio, q_req); |
194 | } | 164 | } |
195 | 165 | ||
196 | static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, | ||
197 | struct zfcp_qdio_req *q_req) | ||
198 | { | ||
199 | struct qdio_buffer **sbal = qdio->req_q.sbal; | ||
200 | int first = q_req->sbal_first; | ||
201 | int last = q_req->sbal_last; | ||
202 | int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % | ||
203 | QDIO_MAX_BUFFERS_PER_Q + 1; | ||
204 | zfcp_qdio_zero_sbals(sbal, first, count); | ||
205 | } | ||
206 | |||
207 | /** | 166 | /** |
208 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list | 167 | * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list |
209 | * @qdio: pointer to struct zfcp_qdio | 168 | * @qdio: pointer to struct zfcp_qdio |
@@ -213,14 +172,11 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, | |||
213 | * Returns: number of bytes, or error (negativ) | 172 | * Returns: number of bytes, or error (negativ) |
214 | */ | 173 | */ |
215 | int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | 174 | int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, |
216 | struct scatterlist *sg, int max_sbals) | 175 | struct scatterlist *sg) |
217 | { | 176 | { |
218 | struct qdio_buffer_element *sbale; | 177 | struct qdio_buffer_element *sbale; |
219 | int bytes = 0; | 178 | int bytes = 0; |
220 | 179 | ||
221 | /* figure out last allowed SBAL */ | ||
222 | zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); | ||
223 | |||
224 | /* set storage-block type for this request */ | 180 | /* set storage-block type for this request */ |
225 | sbale = zfcp_qdio_sbale_req(qdio, q_req); | 181 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
226 | sbale->flags |= q_req->sbtype; | 182 | sbale->flags |= q_req->sbtype; |
@@ -229,7 +185,8 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | |||
229 | sbale = zfcp_qdio_sbale_next(qdio, q_req); | 185 | sbale = zfcp_qdio_sbale_next(qdio, q_req); |
230 | if (!sbale) { | 186 | if (!sbale) { |
231 | atomic_inc(&qdio->req_q_full); | 187 | atomic_inc(&qdio->req_q_full); |
232 | zfcp_qdio_undo_sbals(qdio, q_req); | 188 | zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, |
189 | q_req->sbal_number); | ||
233 | return -EINVAL; | 190 | return -EINVAL; |
234 | } | 191 | } |
235 | 192 | ||
@@ -239,19 +196,13 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | |||
239 | bytes += sg->length; | 196 | bytes += sg->length; |
240 | } | 197 | } |
241 | 198 | ||
242 | /* assume that no other SBALEs are to follow in the same SBAL */ | ||
243 | sbale = zfcp_qdio_sbale_curr(qdio, q_req); | ||
244 | sbale->flags |= SBAL_FLAGS_LAST_ENTRY; | ||
245 | |||
246 | return bytes; | 199 | return bytes; |
247 | } | 200 | } |
248 | 201 | ||
249 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) | 202 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) |
250 | { | 203 | { |
251 | struct zfcp_qdio_queue *req_q = &qdio->req_q; | ||
252 | |||
253 | spin_lock_bh(&qdio->req_q_lock); | 204 | spin_lock_bh(&qdio->req_q_lock); |
254 | if (atomic_read(&req_q->count) || | 205 | if (atomic_read(&qdio->req_q_free) || |
255 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 206 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
256 | return 1; | 207 | return 1; |
257 | spin_unlock_bh(&qdio->req_q_lock); | 208 | spin_unlock_bh(&qdio->req_q_lock); |
@@ -300,25 +251,25 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
300 | */ | 251 | */ |
301 | int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | 252 | int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) |
302 | { | 253 | { |
303 | struct zfcp_qdio_queue *req_q = &qdio->req_q; | ||
304 | int first = q_req->sbal_first; | ||
305 | int count = q_req->sbal_number; | ||
306 | int retval; | 254 | int retval; |
307 | unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; | 255 | u8 sbal_number = q_req->sbal_number; |
308 | 256 | ||
309 | zfcp_qdio_account(qdio); | 257 | zfcp_qdio_account(qdio); |
310 | 258 | ||
311 | retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, | 259 | retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, |
312 | count); | 260 | q_req->sbal_first, sbal_number); |
261 | |||
313 | if (unlikely(retval)) { | 262 | if (unlikely(retval)) { |
314 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | 263 | zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, |
264 | sbal_number); | ||
315 | return retval; | 265 | return retval; |
316 | } | 266 | } |
317 | 267 | ||
318 | /* account for transferred buffers */ | 268 | /* account for transferred buffers */ |
319 | atomic_sub(count, &req_q->count); | 269 | atomic_sub(sbal_number, &qdio->req_q_free); |
320 | req_q->first += count; | 270 | qdio->req_q_idx += sbal_number; |
321 | req_q->first %= QDIO_MAX_BUFFERS_PER_Q; | 271 | qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q; |
272 | |||
322 | return 0; | 273 | return 0; |
323 | } | 274 | } |
324 | 275 | ||
@@ -331,6 +282,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, | |||
331 | id->q_format = QDIO_ZFCP_QFMT; | 282 | id->q_format = QDIO_ZFCP_QFMT; |
332 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); | 283 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); |
333 | ASCEBC(id->adapter_name, 8); | 284 | ASCEBC(id->adapter_name, 8); |
285 | id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; | ||
334 | id->qib_param_field_format = 0; | 286 | id->qib_param_field_format = 0; |
335 | id->qib_param_field = NULL; | 287 | id->qib_param_field = NULL; |
336 | id->input_slib_elements = NULL; | 288 | id->input_slib_elements = NULL; |
@@ -340,10 +292,10 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, | |||
340 | id->input_handler = zfcp_qdio_int_resp; | 292 | id->input_handler = zfcp_qdio_int_resp; |
341 | id->output_handler = zfcp_qdio_int_req; | 293 | id->output_handler = zfcp_qdio_int_req; |
342 | id->int_parm = (unsigned long) qdio; | 294 | id->int_parm = (unsigned long) qdio; |
343 | id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); | 295 | id->input_sbal_addr_array = (void **) (qdio->res_q); |
344 | id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); | 296 | id->output_sbal_addr_array = (void **) (qdio->req_q); |
345 | |||
346 | } | 297 | } |
298 | |||
347 | /** | 299 | /** |
348 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data | 300 | * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data |
349 | * @adapter: pointer to struct zfcp_adapter | 301 | * @adapter: pointer to struct zfcp_adapter |
@@ -354,8 +306,8 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) | |||
354 | { | 306 | { |
355 | struct qdio_initialize init_data; | 307 | struct qdio_initialize init_data; |
356 | 308 | ||
357 | if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || | 309 | if (zfcp_qdio_buffers_enqueue(qdio->req_q) || |
358 | zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) | 310 | zfcp_qdio_buffers_enqueue(qdio->res_q)) |
359 | return -ENOMEM; | 311 | return -ENOMEM; |
360 | 312 | ||
361 | zfcp_qdio_setup_init_data(&init_data, qdio); | 313 | zfcp_qdio_setup_init_data(&init_data, qdio); |
@@ -369,34 +321,30 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) | |||
369 | */ | 321 | */ |
370 | void zfcp_qdio_close(struct zfcp_qdio *qdio) | 322 | void zfcp_qdio_close(struct zfcp_qdio *qdio) |
371 | { | 323 | { |
372 | struct zfcp_qdio_queue *req_q; | 324 | struct zfcp_adapter *adapter = qdio->adapter; |
373 | int first, count; | 325 | int idx, count; |
374 | 326 | ||
375 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 327 | if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
376 | return; | 328 | return; |
377 | 329 | ||
378 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | 330 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
379 | req_q = &qdio->req_q; | ||
380 | spin_lock_bh(&qdio->req_q_lock); | 331 | spin_lock_bh(&qdio->req_q_lock); |
381 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); | 332 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); |
382 | spin_unlock_bh(&qdio->req_q_lock); | 333 | spin_unlock_bh(&qdio->req_q_lock); |
383 | 334 | ||
384 | wake_up(&qdio->req_q_wq); | 335 | wake_up(&qdio->req_q_wq); |
385 | 336 | ||
386 | qdio_shutdown(qdio->adapter->ccw_device, | 337 | qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); |
387 | QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
388 | 338 | ||
389 | /* cleanup used outbound sbals */ | 339 | /* cleanup used outbound sbals */ |
390 | count = atomic_read(&req_q->count); | 340 | count = atomic_read(&qdio->req_q_free); |
391 | if (count < QDIO_MAX_BUFFERS_PER_Q) { | 341 | if (count < QDIO_MAX_BUFFERS_PER_Q) { |
392 | first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; | 342 | idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q; |
393 | count = QDIO_MAX_BUFFERS_PER_Q - count; | 343 | count = QDIO_MAX_BUFFERS_PER_Q - count; |
394 | zfcp_qdio_zero_sbals(req_q->sbal, first, count); | 344 | zfcp_qdio_zero_sbals(qdio->req_q, idx, count); |
395 | } | 345 | } |
396 | req_q->first = 0; | 346 | qdio->req_q_idx = 0; |
397 | atomic_set(&req_q->count, 0); | 347 | atomic_set(&qdio->req_q_free, 0); |
398 | qdio->resp_q.first = 0; | ||
399 | atomic_set(&qdio->resp_q.count, 0); | ||
400 | } | 348 | } |
401 | 349 | ||
402 | /** | 350 | /** |
@@ -408,34 +356,45 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) | |||
408 | { | 356 | { |
409 | struct qdio_buffer_element *sbale; | 357 | struct qdio_buffer_element *sbale; |
410 | struct qdio_initialize init_data; | 358 | struct qdio_initialize init_data; |
411 | struct ccw_device *cdev = qdio->adapter->ccw_device; | 359 | struct zfcp_adapter *adapter = qdio->adapter; |
360 | struct ccw_device *cdev = adapter->ccw_device; | ||
361 | struct qdio_ssqd_desc ssqd; | ||
412 | int cc; | 362 | int cc; |
413 | 363 | ||
414 | if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) | 364 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) |
415 | return -EIO; | 365 | return -EIO; |
416 | 366 | ||
367 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, | ||
368 | &qdio->adapter->status); | ||
369 | |||
417 | zfcp_qdio_setup_init_data(&init_data, qdio); | 370 | zfcp_qdio_setup_init_data(&init_data, qdio); |
418 | 371 | ||
419 | if (qdio_establish(&init_data)) | 372 | if (qdio_establish(&init_data)) |
420 | goto failed_establish; | 373 | goto failed_establish; |
421 | 374 | ||
375 | if (qdio_get_ssqd_desc(init_data.cdev, &ssqd)) | ||
376 | goto failed_qdio; | ||
377 | |||
378 | if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) | ||
379 | atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, | ||
380 | &qdio->adapter->status); | ||
381 | |||
422 | if (qdio_activate(cdev)) | 382 | if (qdio_activate(cdev)) |
423 | goto failed_qdio; | 383 | goto failed_qdio; |
424 | 384 | ||
425 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { | 385 | for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { |
426 | sbale = &(qdio->resp_q.sbal[cc]->element[0]); | 386 | sbale = &(qdio->res_q[cc]->element[0]); |
427 | sbale->length = 0; | 387 | sbale->length = 0; |
428 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; | 388 | sbale->flags = SBAL_FLAGS_LAST_ENTRY; |
429 | sbale->addr = NULL; | 389 | sbale->addr = NULL; |
430 | } | 390 | } |
431 | 391 | ||
432 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, | 392 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) |
433 | QDIO_MAX_BUFFERS_PER_Q)) | ||
434 | goto failed_qdio; | 393 | goto failed_qdio; |
435 | 394 | ||
436 | /* set index of first avalable SBALS / number of available SBALS */ | 395 | /* set index of first avalable SBALS / number of available SBALS */ |
437 | qdio->req_q.first = 0; | 396 | qdio->req_q_idx = 0; |
438 | atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); | 397 | atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); |
439 | 398 | ||
440 | return 0; | 399 | return 0; |
441 | 400 | ||
@@ -449,7 +408,6 @@ failed_establish: | |||
449 | 408 | ||
450 | void zfcp_qdio_destroy(struct zfcp_qdio *qdio) | 409 | void zfcp_qdio_destroy(struct zfcp_qdio *qdio) |
451 | { | 410 | { |
452 | struct qdio_buffer **sbal_req, **sbal_resp; | ||
453 | int p; | 411 | int p; |
454 | 412 | ||
455 | if (!qdio) | 413 | if (!qdio) |
@@ -458,12 +416,9 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio) | |||
458 | if (qdio->adapter->ccw_device) | 416 | if (qdio->adapter->ccw_device) |
459 | qdio_free(qdio->adapter->ccw_device); | 417 | qdio_free(qdio->adapter->ccw_device); |
460 | 418 | ||
461 | sbal_req = qdio->req_q.sbal; | ||
462 | sbal_resp = qdio->resp_q.sbal; | ||
463 | |||
464 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { | 419 | for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { |
465 | free_page((unsigned long) sbal_req[p]); | 420 | free_page((unsigned long) qdio->req_q[p]); |
466 | free_page((unsigned long) sbal_resp[p]); | 421 | free_page((unsigned long) qdio->res_q[p]); |
467 | } | 422 | } |
468 | 423 | ||
469 | kfree(qdio); | 424 | kfree(qdio); |
@@ -491,3 +446,26 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter) | |||
491 | return 0; | 446 | return 0; |
492 | } | 447 | } |
493 | 448 | ||
449 | /** | ||
450 | * zfcp_qdio_siosl - Trigger logging in FCP channel | ||
451 | * @adapter: The zfcp_adapter where to trigger logging | ||
452 | * | ||
453 | * Call the cio siosl function to trigger hardware logging. This | ||
454 | * wrapper function sets a flag to ensure hardware logging is only | ||
455 | * triggered once before going through qdio shutdown. | ||
456 | * | ||
457 | * The triggers are always run from qdio tasklet context, so no | ||
458 | * additional synchronization is necessary. | ||
459 | */ | ||
460 | void zfcp_qdio_siosl(struct zfcp_adapter *adapter) | ||
461 | { | ||
462 | int rc; | ||
463 | |||
464 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED) | ||
465 | return; | ||
466 | |||
467 | rc = ccw_device_siosl(adapter->ccw_device); | ||
468 | if (!rc) | ||
469 | atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, | ||
470 | &adapter->status); | ||
471 | } | ||
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h index 138fba577b48..2297d8d3e947 100644 --- a/drivers/s390/scsi/zfcp_qdio.h +++ b/drivers/s390/scsi/zfcp_qdio.h | |||
@@ -19,22 +19,20 @@ | |||
19 | /* index of last SBALE (with respect to DMQ bug workaround) */ | 19 | /* index of last SBALE (with respect to DMQ bug workaround) */ |
20 | #define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1) | 20 | #define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1) |
21 | 21 | ||
22 | /** | 22 | /* Max SBALS for chaining */ |
23 | * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count | 23 | #define ZFCP_QDIO_MAX_SBALS_PER_REQ 36 |
24 | * @sbal: qdio buffers | 24 | |
25 | * @first: index of next free buffer in queue | 25 | /* max. number of (data buffer) SBALEs in largest SBAL chain |
26 | * @count: number of free buffers in queue | 26 | * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ |
27 | */ | 27 | #define ZFCP_QDIO_MAX_SBALES_PER_REQ \ |
28 | struct zfcp_qdio_queue { | 28 | (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2) |
29 | struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; | ||
30 | u8 first; | ||
31 | atomic_t count; | ||
32 | }; | ||
33 | 29 | ||
34 | /** | 30 | /** |
35 | * struct zfcp_qdio - basic qdio data structure | 31 | * struct zfcp_qdio - basic qdio data structure |
36 | * @resp_q: response queue | 32 | * @res_q: response queue |
37 | * @req_q: request queue | 33 | * @req_q: request queue |
34 | * @req_q_idx: index of next free buffer | ||
35 | * @req_q_free: number of free buffers in queue | ||
38 | * @stat_lock: lock to protect req_q_util and req_q_time | 36 | * @stat_lock: lock to protect req_q_util and req_q_time |
39 | * @req_q_lock: lock to serialize access to request queue | 37 | * @req_q_lock: lock to serialize access to request queue |
40 | * @req_q_time: time of last fill level change | 38 | * @req_q_time: time of last fill level change |
@@ -44,8 +42,10 @@ struct zfcp_qdio_queue { | |||
44 | * @adapter: adapter used in conjunction with this qdio structure | 42 | * @adapter: adapter used in conjunction with this qdio structure |
45 | */ | 43 | */ |
46 | struct zfcp_qdio { | 44 | struct zfcp_qdio { |
47 | struct zfcp_qdio_queue resp_q; | 45 | struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q]; |
48 | struct zfcp_qdio_queue req_q; | 46 | struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q]; |
47 | u8 req_q_idx; | ||
48 | atomic_t req_q_free; | ||
49 | spinlock_t stat_lock; | 49 | spinlock_t stat_lock; |
50 | spinlock_t req_q_lock; | 50 | spinlock_t req_q_lock; |
51 | unsigned long long req_q_time; | 51 | unsigned long long req_q_time; |
@@ -65,7 +65,6 @@ struct zfcp_qdio { | |||
65 | * @sbale_curr: current sbale at creation of this request | 65 | * @sbale_curr: current sbale at creation of this request |
66 | * @sbal_response: sbal used in interrupt | 66 | * @sbal_response: sbal used in interrupt |
67 | * @qdio_outb_usage: usage of outbound queue | 67 | * @qdio_outb_usage: usage of outbound queue |
68 | * @qdio_inb_usage: usage of inbound queue | ||
69 | */ | 68 | */ |
70 | struct zfcp_qdio_req { | 69 | struct zfcp_qdio_req { |
71 | u32 sbtype; | 70 | u32 sbtype; |
@@ -76,22 +75,9 @@ struct zfcp_qdio_req { | |||
76 | u8 sbale_curr; | 75 | u8 sbale_curr; |
77 | u8 sbal_response; | 76 | u8 sbal_response; |
78 | u16 qdio_outb_usage; | 77 | u16 qdio_outb_usage; |
79 | u16 qdio_inb_usage; | ||
80 | }; | 78 | }; |
81 | 79 | ||
82 | /** | 80 | /** |
83 | * zfcp_qdio_sbale - return pointer to sbale in qdio queue | ||
84 | * @q: queue where to find sbal | ||
85 | * @sbal_idx: sbal index in queue | ||
86 | * @sbale_idx: sbale index in sbal | ||
87 | */ | ||
88 | static inline struct qdio_buffer_element * | ||
89 | zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) | ||
90 | { | ||
91 | return &q->sbal[sbal_idx]->element[sbale_idx]; | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request | 81 | * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request |
96 | * @qdio: pointer to struct zfcp_qdio | 82 | * @qdio: pointer to struct zfcp_qdio |
97 | * @q_rec: pointer to struct zfcp_qdio_req | 83 | * @q_rec: pointer to struct zfcp_qdio_req |
@@ -100,7 +86,7 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) | |||
100 | static inline struct qdio_buffer_element * | 86 | static inline struct qdio_buffer_element * |
101 | zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | 87 | zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) |
102 | { | 88 | { |
103 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); | 89 | return &qdio->req_q[q_req->sbal_last]->element[0]; |
104 | } | 90 | } |
105 | 91 | ||
106 | /** | 92 | /** |
@@ -112,8 +98,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | |||
112 | static inline struct qdio_buffer_element * | 98 | static inline struct qdio_buffer_element * |
113 | zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | 99 | zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) |
114 | { | 100 | { |
115 | return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, | 101 | return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr]; |
116 | q_req->sbale_curr); | ||
117 | } | 102 | } |
118 | 103 | ||
119 | /** | 104 | /** |
@@ -134,21 +119,25 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | |||
134 | unsigned long req_id, u32 sbtype, void *data, u32 len) | 119 | unsigned long req_id, u32 sbtype, void *data, u32 len) |
135 | { | 120 | { |
136 | struct qdio_buffer_element *sbale; | 121 | struct qdio_buffer_element *sbale; |
122 | int count = min(atomic_read(&qdio->req_q_free), | ||
123 | ZFCP_QDIO_MAX_SBALS_PER_REQ); | ||
137 | 124 | ||
138 | q_req->sbal_first = q_req->sbal_last = qdio->req_q.first; | 125 | q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx; |
139 | q_req->sbal_number = 1; | 126 | q_req->sbal_number = 1; |
140 | q_req->sbtype = sbtype; | 127 | q_req->sbtype = sbtype; |
128 | q_req->sbale_curr = 1; | ||
129 | q_req->sbal_limit = (q_req->sbal_first + count - 1) | ||
130 | % QDIO_MAX_BUFFERS_PER_Q; | ||
141 | 131 | ||
142 | sbale = zfcp_qdio_sbale_req(qdio, q_req); | 132 | sbale = zfcp_qdio_sbale_req(qdio, q_req); |
143 | sbale->addr = (void *) req_id; | 133 | sbale->addr = (void *) req_id; |
144 | sbale->flags |= SBAL_FLAGS0_COMMAND; | 134 | sbale->flags = SBAL_FLAGS0_COMMAND | sbtype; |
145 | sbale->flags |= sbtype; | ||
146 | 135 | ||
147 | q_req->sbale_curr = 1; | 136 | if (unlikely(!data)) |
137 | return; | ||
148 | sbale++; | 138 | sbale++; |
149 | sbale->addr = data; | 139 | sbale->addr = data; |
150 | if (likely(data)) | 140 | sbale->length = len; |
151 | sbale->length = len; | ||
152 | } | 141 | } |
153 | 142 | ||
154 | /** | 143 | /** |
@@ -210,4 +199,36 @@ void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req) | |||
210 | q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL; | 199 | q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL; |
211 | } | 200 | } |
212 | 201 | ||
202 | /** | ||
203 | * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req | ||
204 | * @qdio: pointer to struct zfcp_qdio | ||
205 | * @q_req: The current zfcp_qdio_req | ||
206 | * @max_sbals: maximum number of SBALs allowed | ||
207 | */ | ||
208 | static inline | ||
209 | void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, | ||
210 | struct zfcp_qdio_req *q_req, int max_sbals) | ||
211 | { | ||
212 | int count = min(atomic_read(&qdio->req_q_free), max_sbals); | ||
213 | |||
214 | q_req->sbal_limit = (q_req->sbal_first + count - 1) % | ||
215 | QDIO_MAX_BUFFERS_PER_Q; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * zfcp_qdio_set_data_div - set data division count | ||
220 | * @qdio: pointer to struct zfcp_qdio | ||
221 | * @q_req: The current zfcp_qdio_req | ||
222 | * @count: The data division count | ||
223 | */ | ||
224 | static inline | ||
225 | void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio, | ||
226 | struct zfcp_qdio_req *q_req, u32 count) | ||
227 | { | ||
228 | struct qdio_buffer_element *sbale; | ||
229 | |||
230 | sbale = &qdio->req_q[q_req->sbal_first]->element[0]; | ||
231 | sbale->length = count; | ||
232 | } | ||
233 | |||
213 | #endif /* ZFCP_QDIO_H */ | 234 | #endif /* ZFCP_QDIO_H */ |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index be5d2c60453d..cb000c9833bb 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <scsi/fc/fc_fcp.h> | 14 | #include <scsi/fc/fc_fcp.h> |
15 | #include <scsi/scsi_eh.h> | ||
15 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
16 | #include "zfcp_ext.h" | 17 | #include "zfcp_ext.h" |
17 | #include "zfcp_dbf.h" | 18 | #include "zfcp_dbf.h" |
@@ -22,6 +23,13 @@ static unsigned int default_depth = 32; | |||
22 | module_param_named(queue_depth, default_depth, uint, 0600); | 23 | module_param_named(queue_depth, default_depth, uint, 0600); |
23 | MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); | 24 | MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); |
24 | 25 | ||
26 | static bool enable_dif; | ||
27 | |||
28 | #ifdef CONFIG_ZFCP_DIF | ||
29 | module_param_named(dif, enable_dif, bool, 0600); | ||
30 | MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); | ||
31 | #endif | ||
32 | |||
25 | static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, | 33 | static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, |
26 | int reason) | 34 | int reason) |
27 | { | 35 | { |
@@ -506,8 +514,10 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) | |||
506 | * @rport: The FC rport where to teminate I/O | 514 | * @rport: The FC rport where to teminate I/O |
507 | * | 515 | * |
508 | * Abort all pending SCSI commands for a port by closing the | 516 | * Abort all pending SCSI commands for a port by closing the |
509 | * port. Using a reopen avoiding a conflict with a shutdown | 517 | * port. Using a reopen avoids a conflict with a shutdown |
510 | * overwriting a reopen. | 518 | * overwriting a reopen. The "forced" ensures that a disappeared port |
519 | * is not opened again as valid due to the cached plogi data in | ||
520 | * non-NPIV mode. | ||
511 | */ | 521 | */ |
512 | static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) | 522 | static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) |
513 | { | 523 | { |
@@ -519,11 +529,25 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) | |||
519 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); | 529 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); |
520 | 530 | ||
521 | if (port) { | 531 | if (port) { |
522 | zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); | 532 | zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL); |
523 | put_device(&port->dev); | 533 | put_device(&port->dev); |
524 | } | 534 | } |
525 | } | 535 | } |
526 | 536 | ||
537 | static void zfcp_scsi_queue_unit_register(struct zfcp_port *port) | ||
538 | { | ||
539 | struct zfcp_unit *unit; | ||
540 | |||
541 | read_lock_irq(&port->unit_list_lock); | ||
542 | list_for_each_entry(unit, &port->unit_list, list) { | ||
543 | get_device(&unit->dev); | ||
544 | if (scsi_queue_work(port->adapter->scsi_host, | ||
545 | &unit->scsi_work) <= 0) | ||
546 | put_device(&unit->dev); | ||
547 | } | ||
548 | read_unlock_irq(&port->unit_list_lock); | ||
549 | } | ||
550 | |||
527 | static void zfcp_scsi_rport_register(struct zfcp_port *port) | 551 | static void zfcp_scsi_rport_register(struct zfcp_port *port) |
528 | { | 552 | { |
529 | struct fc_rport_identifiers ids; | 553 | struct fc_rport_identifiers ids; |
@@ -548,6 +572,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) | |||
548 | rport->maxframe_size = port->maxframe_size; | 572 | rport->maxframe_size = port->maxframe_size; |
549 | rport->supported_classes = port->supported_classes; | 573 | rport->supported_classes = port->supported_classes; |
550 | port->rport = rport; | 574 | port->rport = rport; |
575 | port->starget_id = rport->scsi_target_id; | ||
576 | |||
577 | zfcp_scsi_queue_unit_register(port); | ||
551 | } | 578 | } |
552 | 579 | ||
553 | static void zfcp_scsi_rport_block(struct zfcp_port *port) | 580 | static void zfcp_scsi_rport_block(struct zfcp_port *port) |
@@ -610,24 +637,74 @@ void zfcp_scsi_rport_work(struct work_struct *work) | |||
610 | put_device(&port->dev); | 637 | put_device(&port->dev); |
611 | } | 638 | } |
612 | 639 | ||
613 | 640 | /** | |
614 | void zfcp_scsi_scan(struct work_struct *work) | 641 | * zfcp_scsi_scan - Register LUN with SCSI midlayer |
642 | * @unit: The LUN/unit to register | ||
643 | */ | ||
644 | void zfcp_scsi_scan(struct zfcp_unit *unit) | ||
615 | { | 645 | { |
616 | struct zfcp_unit *unit = container_of(work, struct zfcp_unit, | 646 | struct fc_rport *rport = unit->port->rport; |
617 | scsi_work); | ||
618 | struct fc_rport *rport; | ||
619 | |||
620 | flush_work(&unit->port->rport_work); | ||
621 | rport = unit->port->rport; | ||
622 | 647 | ||
623 | if (rport && rport->port_state == FC_PORTSTATE_ONLINE) | 648 | if (rport && rport->port_state == FC_PORTSTATE_ONLINE) |
624 | scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, | 649 | scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, |
625 | scsilun_to_int((struct scsi_lun *) | 650 | scsilun_to_int((struct scsi_lun *) |
626 | &unit->fcp_lun), 0); | 651 | &unit->fcp_lun), 0); |
652 | } | ||
627 | 653 | ||
654 | void zfcp_scsi_scan_work(struct work_struct *work) | ||
655 | { | ||
656 | struct zfcp_unit *unit = container_of(work, struct zfcp_unit, | ||
657 | scsi_work); | ||
658 | |||
659 | zfcp_scsi_scan(unit); | ||
628 | put_device(&unit->dev); | 660 | put_device(&unit->dev); |
629 | } | 661 | } |
630 | 662 | ||
663 | /** | ||
664 | * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host | ||
665 | * @adapter: The adapter where to configure DIF/DIX for the SCSI host | ||
666 | */ | ||
667 | void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) | ||
668 | { | ||
669 | unsigned int mask = 0; | ||
670 | unsigned int data_div; | ||
671 | struct Scsi_Host *shost = adapter->scsi_host; | ||
672 | |||
673 | data_div = atomic_read(&adapter->status) & | ||
674 | ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED; | ||
675 | |||
676 | if (enable_dif && | ||
677 | adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1) | ||
678 | mask |= SHOST_DIF_TYPE1_PROTECTION; | ||
679 | |||
680 | if (enable_dif && data_div && | ||
681 | adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { | ||
682 | mask |= SHOST_DIX_TYPE1_PROTECTION; | ||
683 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); | ||
684 | shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; | ||
685 | shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; | ||
686 | } | ||
687 | |||
688 | scsi_host_set_prot(shost, mask); | ||
689 | } | ||
690 | |||
691 | /** | ||
692 | * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error | ||
693 | * @scmd: The SCSI command to report the error for | ||
694 | * @ascq: The ASCQ to put in the sense buffer | ||
695 | * | ||
696 | * See the error handling in sd_done for the sense codes used here. | ||
697 | * Set DID_SOFT_ERROR to retry the request, if possible. | ||
698 | */ | ||
699 | void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq) | ||
700 | { | ||
701 | scsi_build_sense_buffer(1, scmd->sense_buffer, | ||
702 | ILLEGAL_REQUEST, 0x10, ascq); | ||
703 | set_driver_byte(scmd, DRIVER_SENSE); | ||
704 | scmd->result |= SAM_STAT_CHECK_CONDITION; | ||
705 | set_host_byte(scmd, DID_SOFT_ERROR); | ||
706 | } | ||
707 | |||
631 | struct fc_function_template zfcp_transport_functions = { | 708 | struct fc_function_template zfcp_transport_functions = { |
632 | .show_starget_port_id = 1, | 709 | .show_starget_port_id = 1, |
633 | .show_starget_port_name = 1, | 710 | .show_starget_port_name = 1, |
@@ -677,11 +754,11 @@ struct zfcp_data zfcp_data = { | |||
677 | .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, | 754 | .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, |
678 | .can_queue = 4096, | 755 | .can_queue = 4096, |
679 | .this_id = -1, | 756 | .this_id = -1, |
680 | .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ, | 757 | .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, |
681 | .cmd_per_lun = 1, | 758 | .cmd_per_lun = 1, |
682 | .use_clustering = 1, | 759 | .use_clustering = 1, |
683 | .sdev_attrs = zfcp_sysfs_sdev_attrs, | 760 | .sdev_attrs = zfcp_sysfs_sdev_attrs, |
684 | .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8), | 761 | .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), |
685 | .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, | 762 | .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, |
686 | .shost_attrs = zfcp_sysfs_shost_attrs, | 763 | .shost_attrs = zfcp_sysfs_shost_attrs, |
687 | }, | 764 | }, |
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index f5f60698dc4c..b4561c86e230 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -275,7 +275,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, | |||
275 | 275 | ||
276 | zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); | 276 | zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); |
277 | zfcp_erp_wait(unit->port->adapter); | 277 | zfcp_erp_wait(unit->port->adapter); |
278 | flush_work(&unit->scsi_work); | 278 | zfcp_scsi_scan(unit); |
279 | out: | 279 | out: |
280 | put_device(&port->dev); | 280 | put_device(&port->dev); |
281 | return retval ? retval : (ssize_t) count; | 281 | return retval ? retval : (ssize_t) count; |
@@ -290,6 +290,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, | |||
290 | struct zfcp_unit *unit; | 290 | struct zfcp_unit *unit; |
291 | u64 fcp_lun; | 291 | u64 fcp_lun; |
292 | int retval = -EINVAL; | 292 | int retval = -EINVAL; |
293 | struct scsi_device *sdev; | ||
293 | 294 | ||
294 | if (!(port && get_device(&port->dev))) | 295 | if (!(port && get_device(&port->dev))) |
295 | return -EBUSY; | 296 | return -EBUSY; |
@@ -303,8 +304,13 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, | |||
303 | else | 304 | else |
304 | retval = 0; | 305 | retval = 0; |
305 | 306 | ||
306 | /* wait for possible timeout during SCSI probe */ | 307 | sdev = scsi_device_lookup(port->adapter->scsi_host, 0, |
307 | flush_work(&unit->scsi_work); | 308 | port->starget_id, |
309 | scsilun_to_int((struct scsi_lun *)&fcp_lun)); | ||
310 | if (sdev) { | ||
311 | scsi_remove_device(sdev); | ||
312 | scsi_device_put(sdev); | ||
313 | } | ||
308 | 314 | ||
309 | write_lock_irq(&port->unit_list_lock); | 315 | write_lock_irq(&port->unit_list_lock); |
310 | list_del(&unit->list); | 316 | list_del(&unit->list); |