aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hvc_iucv.c259
-rw-r--r--drivers/s390/Makefile3
-rw-r--r--drivers/s390/block/dasd.c249
-rw-r--r--drivers/s390/block/dasd_3990_erp.c871
-rw-r--r--drivers/s390/block/dasd_alias.c35
-rw-r--r--drivers/s390/block/dasd_devmap.c43
-rw-r--r--drivers/s390/block/dasd_diag.c66
-rw-r--r--drivers/s390/block/dasd_eckd.c1297
-rw-r--r--drivers/s390/block/dasd_eckd.h49
-rw-r--r--drivers/s390/block/dasd_eer.c27
-rw-r--r--drivers/s390/block/dasd_erp.c21
-rw-r--r--drivers/s390/block/dasd_fba.c77
-rw-r--r--drivers/s390/block/dasd_genhd.c7
-rw-r--r--drivers/s390/block/dasd_int.h13
-rw-r--r--drivers/s390/block/dasd_ioctl.c46
-rw-r--r--drivers/s390/block/dasd_proc.c22
-rw-r--r--drivers/s390/char/tape.h2
-rw-r--r--drivers/s390/char/tape_34xx.c161
-rw-r--r--drivers/s390/char/tape_3590.c367
-rw-r--r--drivers/s390/char/tape_block.c18
-rw-r--r--drivers/s390/char/tape_char.c7
-rw-r--r--drivers/s390/char/tape_core.c68
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c25
-rw-r--r--drivers/s390/char/zcore.c90
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/airq.c6
-rw-r--r--drivers/s390/cio/blacklist.c3
-rw-r--r--drivers/s390/cio/ccwgroup.c73
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.c21
-rw-r--r--drivers/s390/cio/crw.c159
-rw-r--r--drivers/s390/cio/css.c58
-rw-r--r--drivers/s390/cio/device.c44
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/qdio.h8
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c222
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/cio/qdio_thinint.c23
-rw-r--r--drivers/s390/crypto/zcrypt_api.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c3
-rw-r--r--drivers/s390/ebcdic.c246
-rw-r--r--drivers/s390/net/qeth_core_main.c55
-rw-r--r--drivers/s390/s390mach.c538
-rw-r--r--drivers/s390/s390mach.h122
-rw-r--r--drivers/s390/sysinfo.c469
50 files changed, 3020 insertions, 2893 deletions
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index a53496828b76..54481a887769 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -13,10 +13,11 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <asm/ebcdic.h> 15#include <asm/ebcdic.h>
16#include <linux/ctype.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/mempool.h> 19#include <linux/mempool.h>
19#include <linux/module.h> 20#include <linux/moduleparam.h>
20#include <linux/tty.h> 21#include <linux/tty.h>
21#include <linux/wait.h> 22#include <linux/wait.h>
22#include <net/iucv/iucv.h> 23#include <net/iucv/iucv.h>
@@ -95,6 +96,12 @@ static unsigned long hvc_iucv_devices = 1;
95/* Array of allocated hvc iucv tty lines... */ 96/* Array of allocated hvc iucv tty lines... */
96static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; 97static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
97#define IUCV_HVC_CON_IDX (0) 98#define IUCV_HVC_CON_IDX (0)
99/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
100#define MAX_VMID_FILTER (500)
101static size_t hvc_iucv_filter_size;
102static void *hvc_iucv_filter;
103static const char *hvc_iucv_filter_string;
104static DEFINE_RWLOCK(hvc_iucv_filter_lock);
98 105
99/* Kmem cache and mempool for iucv_tty_buffer elements */ 106/* Kmem cache and mempool for iucv_tty_buffer elements */
100static struct kmem_cache *hvc_iucv_buffer_cache; 107static struct kmem_cache *hvc_iucv_buffer_cache;
@@ -618,6 +625,27 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
618} 625}
619 626
620/** 627/**
628 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
629 * @ipvmid: Originating z/VM user ID (right padded with blanks)
630 *
631 * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
632 * non-zero.
633 */
634static int hvc_iucv_filter_connreq(u8 ipvmid[8])
635{
636 size_t i;
637
638 /* Note: default policy is ACCEPT if no filter is set */
639 if (!hvc_iucv_filter_size)
640 return 0;
641
642 for (i = 0; i < hvc_iucv_filter_size; i++)
643 if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
644 return 0;
645 return 1;
646}
647
648/**
621 * hvc_iucv_path_pending() - IUCV handler to process a connection request. 649 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
622 * @path: Pending path (struct iucv_path) 650 * @path: Pending path (struct iucv_path)
623 * @ipvmid: z/VM system identifier of originator 651 * @ipvmid: z/VM system identifier of originator
@@ -641,6 +669,7 @@ static int hvc_iucv_path_pending(struct iucv_path *path,
641{ 669{
642 struct hvc_iucv_private *priv; 670 struct hvc_iucv_private *priv;
643 u8 nuser_data[16]; 671 u8 nuser_data[16];
672 u8 vm_user_id[9];
644 int i, rc; 673 int i, rc;
645 674
646 priv = NULL; 675 priv = NULL;
@@ -653,6 +682,20 @@ static int hvc_iucv_path_pending(struct iucv_path *path,
653 if (!priv) 682 if (!priv)
654 return -ENODEV; 683 return -ENODEV;
655 684
685 /* Enforce that ipvmid is allowed to connect to us */
686 read_lock(&hvc_iucv_filter_lock);
687 rc = hvc_iucv_filter_connreq(ipvmid);
688 read_unlock(&hvc_iucv_filter_lock);
689 if (rc) {
690 iucv_path_sever(path, ipuser);
691 iucv_path_free(path);
692 memcpy(vm_user_id, ipvmid, 8);
693 vm_user_id[8] = 0;
694 pr_info("A connection request from z/VM user ID %s "
695 "was refused\n", vm_user_id);
696 return 0;
697 }
698
656 spin_lock(&priv->lock); 699 spin_lock(&priv->lock);
657 700
658 /* If the terminal is already connected or being severed, then sever 701 /* If the terminal is already connected or being severed, then sever
@@ -877,6 +920,171 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
877} 920}
878 921
879/** 922/**
923 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
924 * @filter: String containing a comma-separated list of z/VM user IDs
925 */
926static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
927{
928 const char *nextdelim, *residual;
929 size_t len;
930
931 nextdelim = strchr(filter, ',');
932 if (nextdelim) {
933 len = nextdelim - filter;
934 residual = nextdelim + 1;
935 } else {
936 len = strlen(filter);
937 residual = filter + len;
938 }
939
940 if (len == 0)
941 return ERR_PTR(-EINVAL);
942
943 /* check for '\n' (if called from sysfs) */
944 if (filter[len - 1] == '\n')
945 len--;
946
947 if (len > 8)
948 return ERR_PTR(-EINVAL);
949
950 /* pad with blanks and save upper case version of user ID */
951 memset(dest, ' ', 8);
952 while (len--)
953 dest[len] = toupper(filter[len]);
954 return residual;
955}
956
957/**
958 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
959 * @filter: String consisting of a comma-separated list of z/VM user IDs
960 *
961 * The function parses the @filter string and creates an array containing
962 * the list of z/VM user ID filter entries.
963 * Return code 0 means success, -EINVAL if the filter is syntactically
964 * incorrect, -ENOMEM if there was not enough memory to allocate the
965 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
966 */
967static int hvc_iucv_setup_filter(const char *val)
968{
969 const char *residual;
970 int err;
971 size_t size, count;
972 void *array, *old_filter;
973
974 count = strlen(val);
975 if (count == 0 || (count == 1 && val[0] == '\n')) {
976 size = 0;
977 array = NULL;
978 goto out_replace_filter; /* clear filter */
979 }
980
981 /* count user IDs in order to allocate sufficient memory */
982 size = 1;
983 residual = val;
984 while ((residual = strchr(residual, ',')) != NULL) {
985 residual++;
986 size++;
987 }
988
989 /* check if the specified list exceeds the filter limit */
990 if (size > MAX_VMID_FILTER)
991 return -ENOSPC;
992
993 array = kzalloc(size * 8, GFP_KERNEL);
994 if (!array)
995 return -ENOMEM;
996
997 count = size;
998 residual = val;
999 while (*residual && count) {
1000 residual = hvc_iucv_parse_filter(residual,
1001 array + ((size - count) * 8));
1002 if (IS_ERR(residual)) {
1003 err = PTR_ERR(residual);
1004 kfree(array);
1005 goto out_err;
1006 }
1007 count--;
1008 }
1009
1010out_replace_filter:
1011 write_lock_bh(&hvc_iucv_filter_lock);
1012 old_filter = hvc_iucv_filter;
1013 hvc_iucv_filter_size = size;
1014 hvc_iucv_filter = array;
1015 write_unlock_bh(&hvc_iucv_filter_lock);
1016 kfree(old_filter);
1017
1018 err = 0;
1019out_err:
1020 return err;
1021}
1022
1023/**
1024 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1025 * @val: String consisting of a comma-separated list of z/VM user IDs
1026 * @kp: Kernel parameter pointing to hvc_iucv_filter array
1027 *
1028 * The function sets up the z/VM user ID filter specified as comma-separated
1029 * list of user IDs in @val.
1030 * Note: If it is called early in the boot process, @val is stored and
1031 * parsed later in hvc_iucv_init().
1032 */
1033static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
1034{
1035 int rc;
1036
1037 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1038 return -ENODEV;
1039
1040 if (!val)
1041 return -EINVAL;
1042
1043 rc = 0;
1044 if (slab_is_available())
1045 rc = hvc_iucv_setup_filter(val);
1046 else
1047 hvc_iucv_filter_string = val; /* defer... */
1048 return rc;
1049}
1050
1051/**
1052 * param_get_vmidfilter() - Get z/VM user ID filter
1053 * @buffer: Buffer to store z/VM user ID filter,
1054 * (buffer size assumption PAGE_SIZE)
1055 * @kp: Kernel parameter pointing to the hvc_iucv_filter array
1056 *
1057 * The function stores the filter as a comma-separated list of z/VM user IDs
1058 * in @buffer. Typically, sysfs routines call this function for attr show.
1059 */
1060static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
1061{
1062 int rc;
1063 size_t index, len;
1064 void *start, *end;
1065
1066 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1067 return -ENODEV;
1068
1069 rc = 0;
1070 read_lock_bh(&hvc_iucv_filter_lock);
1071 for (index = 0; index < hvc_iucv_filter_size; index++) {
1072 start = hvc_iucv_filter + (8 * index);
1073 end = memchr(start, ' ', 8);
1074 len = (end) ? end - start : 8;
1075 memcpy(buffer + rc, start, len);
1076 rc += len;
1077 buffer[rc++] = ',';
1078 }
1079 read_unlock_bh(&hvc_iucv_filter_lock);
1080 if (rc)
1081 buffer[--rc] = '\0'; /* replace last comma and update rc */
1082 return rc;
1083}
1084
1085#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1086
1087/**
880 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization 1088 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
881 */ 1089 */
882static int __init hvc_iucv_init(void) 1090static int __init hvc_iucv_init(void)
@@ -884,24 +1092,53 @@ static int __init hvc_iucv_init(void)
884 int rc; 1092 int rc;
885 unsigned int i; 1093 unsigned int i;
886 1094
1095 if (!hvc_iucv_devices)
1096 return -ENODEV;
1097
887 if (!MACHINE_IS_VM) { 1098 if (!MACHINE_IS_VM) {
888 pr_info("The z/VM IUCV HVC device driver cannot " 1099 pr_notice("The z/VM IUCV HVC device driver cannot "
889 "be used without z/VM\n"); 1100 "be used without z/VM\n");
890 return -ENODEV; 1101 rc = -ENODEV;
1102 goto out_error;
891 } 1103 }
892 1104
893 if (!hvc_iucv_devices) 1105 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
894 return -ENODEV; 1106 pr_err("%lu is not a valid value for the hvc_iucv= "
1107 "kernel parameter\n", hvc_iucv_devices);
1108 rc = -EINVAL;
1109 goto out_error;
1110 }
895 1111
896 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) 1112 /* parse hvc_iucv_allow string and create z/VM user ID filter list */
897 return -EINVAL; 1113 if (hvc_iucv_filter_string) {
1114 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1115 switch (rc) {
1116 case 0:
1117 break;
1118 case -ENOMEM:
1119 pr_err("Allocating memory failed with "
1120 "reason code=%d\n", 3);
1121 goto out_error;
1122 case -EINVAL:
1123 pr_err("hvc_iucv_allow= does not specify a valid "
1124 "z/VM user ID list\n");
1125 goto out_error;
1126 case -ENOSPC:
1127 pr_err("hvc_iucv_allow= specifies too many "
1128 "z/VM user IDs\n");
1129 goto out_error;
1130 default:
1131 goto out_error;
1132 }
1133 }
898 1134
899 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT, 1135 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
900 sizeof(struct iucv_tty_buffer), 1136 sizeof(struct iucv_tty_buffer),
901 0, 0, NULL); 1137 0, 0, NULL);
902 if (!hvc_iucv_buffer_cache) { 1138 if (!hvc_iucv_buffer_cache) {
903 pr_err("Allocating memory failed with reason code=%d\n", 1); 1139 pr_err("Allocating memory failed with reason code=%d\n", 1);
904 return -ENOMEM; 1140 rc = -ENOMEM;
1141 goto out_error;
905 } 1142 }
906 1143
907 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, 1144 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
@@ -909,7 +1146,8 @@ static int __init hvc_iucv_init(void)
909 if (!hvc_iucv_mempool) { 1146 if (!hvc_iucv_mempool) {
910 pr_err("Allocating memory failed with reason code=%d\n", 2); 1147 pr_err("Allocating memory failed with reason code=%d\n", 2);
911 kmem_cache_destroy(hvc_iucv_buffer_cache); 1148 kmem_cache_destroy(hvc_iucv_buffer_cache);
912 return -ENOMEM; 1149 rc = -ENOMEM;
1150 goto out_error;
913 } 1151 }
914 1152
915 /* register the first terminal device as console 1153 /* register the first terminal device as console
@@ -953,6 +1191,8 @@ out_error_hvc:
953out_error_memory: 1191out_error_memory:
954 mempool_destroy(hvc_iucv_mempool); 1192 mempool_destroy(hvc_iucv_mempool);
955 kmem_cache_destroy(hvc_iucv_buffer_cache); 1193 kmem_cache_destroy(hvc_iucv_buffer_cache);
1194out_error:
1195 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
956 return rc; 1196 return rc;
957} 1197}
958 1198
@@ -968,3 +1208,4 @@ static int __init hvc_iucv_config(char *val)
968 1208
969device_initcall(hvc_iucv_init); 1209device_initcall(hvc_iucv_init);
970__setup("hvc_iucv=", hvc_iucv_config); 1210__setup("hvc_iucv=", hvc_iucv_config);
1211core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index d0eae59bc366..95bccfd3f169 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,9 +2,6 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
6
7obj-y += s390mach.o sysinfo.o
8obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ 5obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
9 6
10drivers-y += drivers/s390/built-in.o 7drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 08c23a921012..2fd64e5a9ab2 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -9,6 +9,9 @@
9 * 9 *
10 */ 10 */
11 11
12#define KMSG_COMPONENT "dasd"
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
12#include <linux/kmod.h> 15#include <linux/kmod.h>
13#include <linux/init.h> 16#include <linux/init.h>
14#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -22,6 +25,7 @@
22#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
23#include <asm/idals.h> 26#include <asm/idals.h>
24#include <asm/todclk.h> 27#include <asm/todclk.h>
28#include <asm/itcw.h>
25 29
26/* This is ugly... */ 30/* This is ugly... */
27#define PRINTK_HEADER "dasd:" 31#define PRINTK_HEADER "dasd:"
@@ -221,7 +225,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
221 return rc; 225 return rc;
222 } 226 }
223 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 227 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
224 device->debug_area = debug_register(dev_name(&device->cdev->dev), 1, 1, 228 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
225 8 * sizeof(long)); 229 8 * sizeof(long));
226 debug_register_view(device->debug_area, &debug_sprintf_view); 230 debug_register_view(device->debug_area, &debug_sprintf_view);
227 debug_set_level(device->debug_area, DBF_WARNING); 231 debug_set_level(device->debug_area, DBF_WARNING);
@@ -762,7 +766,7 @@ static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
762 return -EINVAL; 766 return -EINVAL;
763 device = cqr->startdev; 767 device = cqr->startdev;
764 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 768 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
765 DEV_MESSAGE(KERN_WARNING, device, 769 DBF_DEV_EVENT(DBF_WARNING, device,
766 " dasd_ccw_req 0x%08x magic doesn't match" 770 " dasd_ccw_req 0x%08x magic doesn't match"
767 " discipline 0x%08x", 771 " discipline 0x%08x",
768 cqr->magic, 772 cqr->magic,
@@ -782,6 +786,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
782{ 786{
783 struct dasd_device *device; 787 struct dasd_device *device;
784 int retries, rc; 788 int retries, rc;
789 char errorstring[ERRORLENGTH];
785 790
786 /* Check the cqr */ 791 /* Check the cqr */
787 rc = dasd_check_cqr(cqr); 792 rc = dasd_check_cqr(cqr);
@@ -815,10 +820,10 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
815 "device busy, retry later"); 820 "device busy, retry later");
816 break; 821 break;
817 default: 822 default:
818 DEV_MESSAGE(KERN_ERR, device, 823 /* internal error 10 - unknown rc*/
819 "line %d unknown RC=%d, please " 824 snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
820 "report to linux390@de.ibm.com", 825 dev_err(&device->cdev->dev, "An error occurred in the "
821 __LINE__, rc); 826 "DASD device driver, reason=%s\n", errorstring);
822 BUG(); 827 BUG();
823 break; 828 break;
824 } 829 }
@@ -836,6 +841,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
836{ 841{
837 struct dasd_device *device; 842 struct dasd_device *device;
838 int rc; 843 int rc;
844 char errorstring[ERRORLENGTH];
839 845
840 /* Check the cqr */ 846 /* Check the cqr */
841 rc = dasd_check_cqr(cqr); 847 rc = dasd_check_cqr(cqr);
@@ -843,17 +849,23 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
843 return rc; 849 return rc;
844 device = (struct dasd_device *) cqr->startdev; 850 device = (struct dasd_device *) cqr->startdev;
845 if (cqr->retries < 0) { 851 if (cqr->retries < 0) {
846 DEV_MESSAGE(KERN_DEBUG, device, 852 /* internal error 14 - start_IO run out of retries */
847 "start_IO: request %p (%02x/%i) - no retry left.", 853 sprintf(errorstring, "14 %p", cqr);
848 cqr, cqr->status, cqr->retries); 854 dev_err(&device->cdev->dev, "An error occurred in the DASD "
855 "device driver, reason=%s\n", errorstring);
849 cqr->status = DASD_CQR_ERROR; 856 cqr->status = DASD_CQR_ERROR;
850 return -EIO; 857 return -EIO;
851 } 858 }
852 cqr->startclk = get_clock(); 859 cqr->startclk = get_clock();
853 cqr->starttime = jiffies; 860 cqr->starttime = jiffies;
854 cqr->retries--; 861 cqr->retries--;
855 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 862 if (cqr->cpmode == 1) {
856 cqr->lpm, 0); 863 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
864 (long) cqr, cqr->lpm);
865 } else {
866 rc = ccw_device_start(device->cdev, cqr->cpaddr,
867 (long) cqr, cqr->lpm, 0);
868 }
857 switch (rc) { 869 switch (rc) {
858 case 0: 870 case 0:
859 cqr->status = DASD_CQR_IN_IO; 871 cqr->status = DASD_CQR_IN_IO;
@@ -862,11 +874,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
862 cqr); 874 cqr);
863 break; 875 break;
864 case -EBUSY: 876 case -EBUSY:
865 DBF_DEV_EVENT(DBF_ERR, device, "%s", 877 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
866 "start_IO: device busy, retry later"); 878 "start_IO: device busy, retry later");
867 break; 879 break;
868 case -ETIMEDOUT: 880 case -ETIMEDOUT:
869 DBF_DEV_EVENT(DBF_ERR, device, "%s", 881 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
870 "start_IO: request timeout, retry later"); 882 "start_IO: request timeout, retry later");
871 break; 883 break;
872 case -EACCES: 884 case -EACCES:
@@ -876,19 +888,24 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
876 * Do a retry with all available pathes. 888 * Do a retry with all available pathes.
877 */ 889 */
878 cqr->lpm = LPM_ANYPATH; 890 cqr->lpm = LPM_ANYPATH;
879 DBF_DEV_EVENT(DBF_ERR, device, "%s", 891 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
880 "start_IO: selected pathes gone," 892 "start_IO: selected pathes gone,"
881 " retry on all pathes"); 893 " retry on all pathes");
882 break; 894 break;
883 case -ENODEV: 895 case -ENODEV:
896 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
897 "start_IO: -ENODEV device gone, retry");
898 break;
884 case -EIO: 899 case -EIO:
885 DBF_DEV_EVENT(DBF_ERR, device, "%s", 900 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
886 "start_IO: device gone, retry"); 901 "start_IO: -EIO device gone, retry");
887 break; 902 break;
888 default: 903 default:
889 DEV_MESSAGE(KERN_ERR, device, 904 /* internal error 11 - unknown rc */
890 "line %d unknown RC=%d, please report" 905 snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
891 " to linux390@de.ibm.com", __LINE__, rc); 906 dev_err(&device->cdev->dev,
907 "An error occurred in the DASD device driver, "
908 "reason=%s\n", errorstring);
892 BUG(); 909 BUG();
893 break; 910 break;
894 } 911 }
@@ -945,7 +962,7 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
945 return; 962 return;
946 cqr = (struct dasd_ccw_req *) intparm; 963 cqr = (struct dasd_ccw_req *) intparm;
947 if (cqr->status != DASD_CQR_IN_IO) { 964 if (cqr->status != DASD_CQR_IN_IO) {
948 MESSAGE(KERN_DEBUG, 965 DBF_EVENT(DBF_DEBUG,
949 "invalid status in handle_killed_request: " 966 "invalid status in handle_killed_request: "
950 "bus_id %s, status %02x", 967 "bus_id %s, status %02x",
951 dev_name(&cdev->dev), cqr->status); 968 dev_name(&cdev->dev), cqr->status);
@@ -956,8 +973,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
956 if (device == NULL || 973 if (device == NULL ||
957 device != dasd_device_from_cdev_locked(cdev) || 974 device != dasd_device_from_cdev_locked(cdev) ||
958 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 975 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
959 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 976 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
960 dev_name(&cdev->dev)); 977 "bus_id %s", dev_name(&cdev->dev));
961 return; 978 return;
962 } 979 }
963 980
@@ -996,11 +1013,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
996 case -EIO: 1013 case -EIO:
997 break; 1014 break;
998 case -ETIMEDOUT: 1015 case -ETIMEDOUT:
999 printk(KERN_WARNING"%s(%s): request timed out\n", 1016 DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
1000 __func__, dev_name(&cdev->dev)); 1017 __func__, dev_name(&cdev->dev));
1001 break; 1018 break;
1002 default: 1019 default:
1003 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 1020 DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
1004 __func__, dev_name(&cdev->dev), PTR_ERR(irb)); 1021 __func__, dev_name(&cdev->dev), PTR_ERR(irb));
1005 } 1022 }
1006 dasd_handle_killed_request(cdev, intparm); 1023 dasd_handle_killed_request(cdev, intparm);
@@ -1009,15 +1026,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1009 1026
1010 now = get_clock(); 1027 now = get_clock();
1011 1028
1012 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
1013 dev_name(&cdev->dev), ((irb->scsw.cmd.cstat << 8) |
1014 irb->scsw.cmd.dstat), (unsigned int) intparm);
1015
1016 /* check for unsolicited interrupts */ 1029 /* check for unsolicited interrupts */
1017 cqr = (struct dasd_ccw_req *) intparm; 1030 cqr = (struct dasd_ccw_req *) intparm;
1018 if (!cqr || ((irb->scsw.cmd.cc == 1) && 1031 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1019 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1032 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1020 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) { 1033 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
1021 if (cqr && cqr->status == DASD_CQR_IN_IO) 1034 if (cqr && cqr->status == DASD_CQR_IN_IO)
1022 cqr->status = DASD_CQR_QUEUED; 1035 cqr->status = DASD_CQR_QUEUED;
1023 device = dasd_device_from_cdev_locked(cdev); 1036 device = dasd_device_from_cdev_locked(cdev);
@@ -1033,14 +1046,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1033 device = (struct dasd_device *) cqr->startdev; 1046 device = (struct dasd_device *) cqr->startdev;
1034 if (!device || 1047 if (!device ||
1035 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1048 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1036 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1049 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
1037 dev_name(&cdev->dev)); 1050 "bus_id %s", dev_name(&cdev->dev));
1038 return; 1051 return;
1039 } 1052 }
1040 1053
1041 /* Check for clear pending */ 1054 /* Check for clear pending */
1042 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1055 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1043 irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1056 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1044 cqr->status = DASD_CQR_CLEARED; 1057 cqr->status = DASD_CQR_CLEARED;
1045 dasd_device_clear_timer(device); 1058 dasd_device_clear_timer(device);
1046 wake_up(&dasd_flush_wq); 1059 wake_up(&dasd_flush_wq);
@@ -1048,19 +1061,17 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1048 return; 1061 return;
1049 } 1062 }
1050 1063
1051 /* check status - the request might have been killed by dyn detach */ 1064 /* check status - the request might have been killed by dyn detach */
1052 if (cqr->status != DASD_CQR_IN_IO) { 1065 if (cqr->status != DASD_CQR_IN_IO) {
1053 MESSAGE(KERN_DEBUG, 1066 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1054 "invalid status: bus_id %s, status %02x", 1067 "status %02x", dev_name(&cdev->dev), cqr->status);
1055 dev_name(&cdev->dev), cqr->status);
1056 return; 1068 return;
1057 } 1069 }
1058 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1070
1059 ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr);
1060 next = NULL; 1071 next = NULL;
1061 expires = 0; 1072 expires = 0;
1062 if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1073 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1063 irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) { 1074 scsw_cstat(&irb->scsw) == 0) {
1064 /* request was completed successfully */ 1075 /* request was completed successfully */
1065 cqr->status = DASD_CQR_SUCCESS; 1076 cqr->status = DASD_CQR_SUCCESS;
1066 cqr->stopclk = now; 1077 cqr->stopclk = now;
@@ -1071,18 +1082,23 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1071 } 1082 }
1072 } else { /* error */ 1083 } else { /* error */
1073 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1084 memcpy(&cqr->irb, irb, sizeof(struct irb));
1085 /* log sense for every failed I/O to s390 debugfeature */
1086 dasd_log_sense_dbf(cqr, irb);
1074 if (device->features & DASD_FEATURE_ERPLOG) { 1087 if (device->features & DASD_FEATURE_ERPLOG) {
1075 dasd_log_sense(cqr, irb); 1088 dasd_log_sense(cqr, irb);
1076 } 1089 }
1090
1077 /* 1091 /*
1078 * If we don't want complex ERP for this request, then just 1092 * If we don't want complex ERP for this request, then just
1079 * reset this and retry it in the fastpath 1093 * reset this and retry it in the fastpath
1080 */ 1094 */
1081 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1095 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1082 cqr->retries > 0) { 1096 cqr->retries > 0) {
1083 DEV_MESSAGE(KERN_DEBUG, device, 1097 if (cqr->lpm == LPM_ANYPATH)
1084 "default ERP in fastpath (%i retries left)", 1098 DBF_DEV_EVENT(DBF_DEBUG, device,
1085 cqr->retries); 1099 "default ERP in fastpath "
1100 "(%i retries left)",
1101 cqr->retries);
1086 cqr->lpm = LPM_ANYPATH; 1102 cqr->lpm = LPM_ANYPATH;
1087 cqr->status = DASD_CQR_QUEUED; 1103 cqr->status = DASD_CQR_QUEUED;
1088 next = cqr; 1104 next = cqr;
@@ -1093,10 +1109,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1093 (!device->stopped)) { 1109 (!device->stopped)) {
1094 if (device->discipline->start_IO(next) == 0) 1110 if (device->discipline->start_IO(next) == 0)
1095 expires = next->expires; 1111 expires = next->expires;
1096 else
1097 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1098 "Interrupt fastpath "
1099 "failed!");
1100 } 1112 }
1101 if (expires != 0) 1113 if (expires != 0)
1102 dasd_device_set_timer(device, expires); 1114 dasd_device_set_timer(device, expires);
@@ -1169,6 +1181,7 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1169 struct dasd_block *block; 1181 struct dasd_block *block;
1170 void (*callback)(struct dasd_ccw_req *, void *data); 1182 void (*callback)(struct dasd_ccw_req *, void *data);
1171 void *callback_data; 1183 void *callback_data;
1184 char errorstring[ERRORLENGTH];
1172 1185
1173 list_for_each_safe(l, n, final_queue) { 1186 list_for_each_safe(l, n, final_queue) {
1174 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1187 cqr = list_entry(l, struct dasd_ccw_req, devlist);
@@ -1189,10 +1202,11 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1189 cqr->status = DASD_CQR_TERMINATED; 1202 cqr->status = DASD_CQR_TERMINATED;
1190 break; 1203 break;
1191 default: 1204 default:
1192 DEV_MESSAGE(KERN_ERR, device, 1205 /* internal error 12 - wrong cqr status*/
1193 "wrong cqr status in __dasd_process_final_queue " 1206 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1194 "for cqr %p, status %x", 1207 dev_err(&device->cdev->dev,
1195 cqr, cqr->status); 1208 "An error occurred in the DASD device driver, "
1209 "reason=%s\n", errorstring);
1196 BUG(); 1210 BUG();
1197 } 1211 }
1198 if (cqr->callback != NULL) 1212 if (cqr->callback != NULL)
@@ -1217,18 +1231,17 @@ static void __dasd_device_check_expire(struct dasd_device *device)
1217 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1231 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1218 if (device->discipline->term_IO(cqr) != 0) { 1232 if (device->discipline->term_IO(cqr) != 0) {
1219 /* Hmpf, try again in 5 sec */ 1233 /* Hmpf, try again in 5 sec */
1220 DEV_MESSAGE(KERN_ERR, device, 1234 dev_err(&device->cdev->dev,
1221 "internal error - timeout (%is) expired " 1235 "cqr %p timed out (%is) but cannot be "
1222 "for cqr %p, termination failed, " 1236 "ended, retrying in 5 s\n",
1223 "retrying in 5s", 1237 cqr, (cqr->expires/HZ));
1224 (cqr->expires/HZ), cqr);
1225 cqr->expires += 5*HZ; 1238 cqr->expires += 5*HZ;
1226 dasd_device_set_timer(device, 5*HZ); 1239 dasd_device_set_timer(device, 5*HZ);
1227 } else { 1240 } else {
1228 DEV_MESSAGE(KERN_ERR, device, 1241 dev_err(&device->cdev->dev,
1229 "internal error - timeout (%is) expired " 1242 "cqr %p timed out (%is), %i retries "
1230 "for cqr %p (%i retries left)", 1243 "remaining\n", cqr, (cqr->expires/HZ),
1231 (cqr->expires/HZ), cqr, cqr->retries); 1244 cqr->retries);
1232 } 1245 }
1233 } 1246 }
1234} 1247}
@@ -1290,10 +1303,9 @@ int dasd_flush_device_queue(struct dasd_device *device)
1290 rc = device->discipline->term_IO(cqr); 1303 rc = device->discipline->term_IO(cqr);
1291 if (rc) { 1304 if (rc) {
1292 /* unable to terminate requeust */ 1305 /* unable to terminate requeust */
1293 DEV_MESSAGE(KERN_ERR, device, 1306 dev_err(&device->cdev->dev,
1294 "dasd flush ccw_queue is unable " 1307 "Flushing the DASD request queue "
1295 " to terminate request %p", 1308 "failed for request %p\n", cqr);
1296 cqr);
1297 /* stop flush processing */ 1309 /* stop flush processing */
1298 goto finished; 1310 goto finished;
1299 } 1311 }
@@ -1537,10 +1549,9 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
1537 /* request in IO - terminate IO and release again */ 1549 /* request in IO - terminate IO and release again */
1538 rc = device->discipline->term_IO(cqr); 1550 rc = device->discipline->term_IO(cqr);
1539 if (rc) { 1551 if (rc) {
1540 DEV_MESSAGE(KERN_ERR, device, 1552 dev_err(&device->cdev->dev,
1541 "dasd_cancel_req is unable " 1553 "Cancelling request %p failed with rc=%d\n",
1542 " to terminate request %p, rc = %d", 1554 cqr, rc);
1543 cqr, rc);
1544 } else { 1555 } else {
1545 cqr->stopclk = get_clock(); 1556 cqr->stopclk = get_clock();
1546 rc = 1; 1557 rc = 1;
@@ -1617,7 +1628,7 @@ static inline void __dasd_block_process_erp(struct dasd_block *block,
1617 if (cqr->status == DASD_CQR_DONE) 1628 if (cqr->status == DASD_CQR_DONE)
1618 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1629 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1619 else 1630 else
1620 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1631 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1621 erp_fn = device->discipline->erp_postaction(cqr); 1632 erp_fn = device->discipline->erp_postaction(cqr);
1622 erp_fn(cqr); 1633 erp_fn(cqr);
1623} 1634}
@@ -1991,8 +2002,11 @@ static void dasd_setup_queue(struct dasd_block *block)
1991 blk_queue_max_sectors(block->request_queue, max); 2002 blk_queue_max_sectors(block->request_queue, max);
1992 blk_queue_max_phys_segments(block->request_queue, -1L); 2003 blk_queue_max_phys_segments(block->request_queue, -1L);
1993 blk_queue_max_hw_segments(block->request_queue, -1L); 2004 blk_queue_max_hw_segments(block->request_queue, -1L);
1994 blk_queue_max_segment_size(block->request_queue, -1L); 2005 /* with page sized segments we can translate each segement into
1995 blk_queue_segment_boundary(block->request_queue, -1L); 2006 * one idaw/tidaw
2007 */
2008 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2009 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
1996 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); 2010 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
1997} 2011}
1998 2012
@@ -2043,8 +2057,9 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
2043 } 2057 }
2044 2058
2045 if (dasd_probeonly) { 2059 if (dasd_probeonly) {
2046 DEV_MESSAGE(KERN_INFO, base, "%s", 2060 dev_info(&base->cdev->dev,
2047 "No access to device due to probeonly mode"); 2061 "Accessing the DASD failed because it is in "
2062 "probeonly mode\n");
2048 rc = -EPERM; 2063 rc = -EPERM;
2049 goto out; 2064 goto out;
2050 } 2065 }
@@ -2101,7 +2116,8 @@ dasd_device_operations = {
2101 .owner = THIS_MODULE, 2116 .owner = THIS_MODULE,
2102 .open = dasd_open, 2117 .open = dasd_open,
2103 .release = dasd_release, 2118 .release = dasd_release,
2104 .locked_ioctl = dasd_ioctl, 2119 .ioctl = dasd_ioctl,
2120 .compat_ioctl = dasd_ioctl,
2105 .getgeo = dasd_getgeo, 2121 .getgeo = dasd_getgeo,
2106}; 2122};
2107 2123
@@ -2143,14 +2159,14 @@ int dasd_generic_probe(struct ccw_device *cdev,
2143 2159
2144 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 2160 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
2145 if (ret) { 2161 if (ret) {
2146 printk(KERN_WARNING 2162 DBF_EVENT(DBF_WARNING,
2147 "dasd_generic_probe: could not set ccw-device options " 2163 "dasd_generic_probe: could not set ccw-device options "
2148 "for %s\n", dev_name(&cdev->dev)); 2164 "for %s\n", dev_name(&cdev->dev));
2149 return ret; 2165 return ret;
2150 } 2166 }
2151 ret = dasd_add_sysfs_files(cdev); 2167 ret = dasd_add_sysfs_files(cdev);
2152 if (ret) { 2168 if (ret) {
2153 printk(KERN_WARNING 2169 DBF_EVENT(DBF_WARNING,
2154 "dasd_generic_probe: could not add sysfs entries " 2170 "dasd_generic_probe: could not add sysfs entries "
2155 "for %s\n", dev_name(&cdev->dev)); 2171 "for %s\n", dev_name(&cdev->dev));
2156 return ret; 2172 return ret;
@@ -2166,9 +2182,7 @@ int dasd_generic_probe(struct ccw_device *cdev,
2166 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 2182 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2167 ret = ccw_device_set_online(cdev); 2183 ret = ccw_device_set_online(cdev);
2168 if (ret) 2184 if (ret)
2169 printk(KERN_WARNING 2185 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2170 "dasd_generic_probe: could not initially "
2171 "online ccw-device %s; return code: %d\n",
2172 dev_name(&cdev->dev), ret); 2186 dev_name(&cdev->dev), ret);
2173 return 0; 2187 return 0;
2174} 2188}
@@ -2232,10 +2246,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2232 discipline = base_discipline; 2246 discipline = base_discipline;
2233 if (device->features & DASD_FEATURE_USEDIAG) { 2247 if (device->features & DASD_FEATURE_USEDIAG) {
2234 if (!dasd_diag_discipline_pointer) { 2248 if (!dasd_diag_discipline_pointer) {
2235 printk (KERN_WARNING 2249 pr_warning("%s Setting the DASD online failed because "
2236 "dasd_generic couldn't online device %s " 2250 "of missing DIAG discipline\n",
2237 "- discipline DIAG not available\n", 2251 dev_name(&cdev->dev));
2238 dev_name(&cdev->dev));
2239 dasd_delete_device(device); 2252 dasd_delete_device(device);
2240 return -ENODEV; 2253 return -ENODEV;
2241 } 2254 }
@@ -2256,10 +2269,9 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2256 /* check_device will allocate block device if necessary */ 2269 /* check_device will allocate block device if necessary */
2257 rc = discipline->check_device(device); 2270 rc = discipline->check_device(device);
2258 if (rc) { 2271 if (rc) {
2259 printk (KERN_WARNING 2272 pr_warning("%s Setting the DASD online with discipline %s "
2260 "dasd_generic couldn't online device %s " 2273 "failed with rc=%i\n",
2261 "with discipline %s rc=%i\n", 2274 dev_name(&cdev->dev), discipline->name, rc);
2262 dev_name(&cdev->dev), discipline->name, rc);
2263 module_put(discipline->owner); 2275 module_put(discipline->owner);
2264 module_put(base_discipline->owner); 2276 module_put(base_discipline->owner);
2265 dasd_delete_device(device); 2277 dasd_delete_device(device);
@@ -2268,9 +2280,8 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2268 2280
2269 dasd_set_target_state(device, DASD_STATE_ONLINE); 2281 dasd_set_target_state(device, DASD_STATE_ONLINE);
2270 if (device->state <= DASD_STATE_KNOWN) { 2282 if (device->state <= DASD_STATE_KNOWN) {
2271 printk (KERN_WARNING 2283 pr_warning("%s Setting the DASD online failed because of a "
2272 "dasd_generic discipline not found for %s\n", 2284 "missing discipline\n", dev_name(&cdev->dev));
2273 dev_name(&cdev->dev));
2274 rc = -ENODEV; 2285 rc = -ENODEV;
2275 dasd_set_target_state(device, DASD_STATE_NEW); 2286 dasd_set_target_state(device, DASD_STATE_NEW);
2276 if (device->block) 2287 if (device->block)
@@ -2314,13 +2325,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
2314 open_count = atomic_read(&device->block->open_count); 2325 open_count = atomic_read(&device->block->open_count);
2315 if (open_count > max_count) { 2326 if (open_count > max_count) {
2316 if (open_count > 0) 2327 if (open_count > 0)
2317 printk(KERN_WARNING "Can't offline dasd " 2328 pr_warning("%s: The DASD cannot be set offline "
2318 "device with open count = %i.\n", 2329 "with open count %i\n",
2319 open_count); 2330 dev_name(&cdev->dev), open_count);
2320 else 2331 else
2321 printk(KERN_WARNING "%s", 2332 pr_warning("%s: The DASD cannot be set offline "
2322 "Can't offline dasd device due " 2333 "while it is in use\n",
2323 "to internal use\n"); 2334 dev_name(&cdev->dev));
2324 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2335 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2325 dasd_put_device(device); 2336 dasd_put_device(device);
2326 return -EBUSY; 2337 return -EBUSY;
@@ -2393,8 +2404,10 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2393 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2404 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2394 2405
2395 if (IS_ERR(cqr)) { 2406 if (IS_ERR(cqr)) {
2396 DEV_MESSAGE(KERN_WARNING, device, "%s", 2407 /* internal error 13 - Allocating the RDC request failed*/
2397 "Could not allocate RDC request"); 2408 dev_err(&device->cdev->dev,
2409 "An error occurred in the DASD device driver, "
2410 "reason=%s\n", "13");
2398 return cqr; 2411 return cqr;
2399 } 2412 }
2400 2413
@@ -2431,6 +2444,40 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2431} 2444}
2432EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2445EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2433 2446
2447/*
2448 * In command mode and transport mode we need to look for sense
2449 * data in different places. The sense data itself is allways
2450 * an array of 32 bytes, so we can unify the sense data access
2451 * for both modes.
2452 */
2453char *dasd_get_sense(struct irb *irb)
2454{
2455 struct tsb *tsb = NULL;
2456 char *sense = NULL;
2457
2458 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2459 if (irb->scsw.tm.tcw)
2460 tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2461 irb->scsw.tm.tcw);
2462 if (tsb && tsb->length == 64 && tsb->flags)
2463 switch (tsb->flags & 0x07) {
2464 case 1: /* tsa_iostat */
2465 sense = tsb->tsa.iostat.sense;
2466 break;
2467 case 2: /* tsa_ddpc */
2468 sense = tsb->tsa.ddpc.sense;
2469 break;
2470 default:
2471 /* currently we don't use interrogate data */
2472 break;
2473 }
2474 } else if (irb->esw.esw0.erw.cons) {
2475 sense = irb->ecw;
2476 }
2477 return sense;
2478}
2479EXPORT_SYMBOL_GPL(dasd_get_sense);
2480
2434static int __init dasd_init(void) 2481static int __init dasd_init(void)
2435{ 2482{
2436 int rc; 2483 int rc;
@@ -2472,7 +2519,7 @@ static int __init dasd_init(void)
2472 2519
2473 return 0; 2520 return 0;
2474failed: 2521failed:
2475 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); 2522 pr_info("The DASD device driver could not be initialized\n");
2476 dasd_exit(); 2523 dasd_exit();
2477 return rc; 2524 return rc;
2478} 2525}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index d82aad5224f0..27991b692056 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,6 +7,8 @@
7 * 7 *
8 */ 8 */
9 9
10#define KMSG_COMPONENT "dasd"
11
10#include <linux/timer.h> 12#include <linux/timer.h>
11#include <linux/slab.h> 13#include <linux/slab.h>
12#include <asm/idals.h> 14#include <asm/idals.h>
@@ -75,7 +77,7 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
75 struct dasd_device *device = erp->startdev; 77 struct dasd_device *device = erp->startdev;
76 unsigned long flags; 78 unsigned long flags;
77 79
78 DEV_MESSAGE(KERN_INFO, device, 80 DBF_DEV_EVENT(DBF_INFO, device,
79 "blocking request queue for %is", expires/HZ); 81 "blocking request queue for %is", expires/HZ);
80 82
81 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 83 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
@@ -114,9 +116,9 @@ dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
114 } else { 116 } else {
115 117
116 /* issue a message and wait for 'device ready' interrupt */ 118 /* issue a message and wait for 'device ready' interrupt */
117 DEV_MESSAGE(KERN_ERR, device, "%s", 119 dev_err(&device->cdev->dev,
118 "is offline or not installed - " 120 "is offline or not installed - "
119 "INTERVENTION REQUIRED!!"); 121 "INTERVENTION REQUIRED!!\n");
120 122
121 dasd_3990_erp_block_queue(erp, 60*HZ); 123 dasd_3990_erp_block_queue(erp, 60*HZ);
122 } 124 }
@@ -158,7 +160,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
158 160
159 if ((erp->lpm & opm) != 0x00) { 161 if ((erp->lpm & opm) != 0x00) {
160 162
161 DEV_MESSAGE(KERN_DEBUG, device, 163 DBF_DEV_EVENT(DBF_WARNING, device,
162 "try alternate lpm=%x (lpum=%x / opm=%x)", 164 "try alternate lpm=%x (lpum=%x / opm=%x)",
163 erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm); 165 erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
164 166
@@ -166,10 +168,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
166 erp->status = DASD_CQR_FILLED; 168 erp->status = DASD_CQR_FILLED;
167 erp->retries = 10; 169 erp->retries = 10;
168 } else { 170 } else {
169 DEV_MESSAGE(KERN_ERR, device, 171 dev_err(&device->cdev->dev,
170 "No alternate channel path left (lpum=%x / " 172 "The DASD cannot be reached on any path (lpum=%x"
171 "opm=%x) -> permanent error", 173 "/opm=%x)\n", erp->irb.esw.esw0.sublog.lpum, opm);
172 erp->irb.esw.esw0.sublog.lpum, opm);
173 174
174 /* post request with permanent error */ 175 /* post request with permanent error */
175 erp->status = DASD_CQR_FAILED; 176 erp->status = DASD_CQR_FAILED;
@@ -204,8 +205,8 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
204 sizeof(struct DCTL_data), 205 sizeof(struct DCTL_data),
205 device); 206 device);
206 if (IS_ERR(dctl_cqr)) { 207 if (IS_ERR(dctl_cqr)) {
207 DEV_MESSAGE(KERN_ERR, device, "%s", 208 dev_err(&device->cdev->dev,
208 "Unable to allocate DCTL-CQR"); 209 "Unable to allocate DCTL-CQR\n");
209 erp->status = DASD_CQR_FAILED; 210 erp->status = DASD_CQR_FAILED;
210 return erp; 211 return erp;
211 } 212 }
@@ -294,7 +295,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
294 /* interrupt (this enables easier enqueing of the cqr) */ 295 /* interrupt (this enables easier enqueing of the cqr) */
295 if (erp->function != dasd_3990_erp_action_4) { 296 if (erp->function != dasd_3990_erp_action_4) {
296 297
297 DEV_MESSAGE(KERN_INFO, device, "%s", 298 DBF_DEV_EVENT(DBF_INFO, device, "%s",
298 "dasd_3990_erp_action_4: first time retry"); 299 "dasd_3990_erp_action_4: first time retry");
299 300
300 erp->retries = 256; 301 erp->retries = 256;
@@ -303,7 +304,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
303 } else { 304 } else {
304 if (sense && (sense[25] == 0x1D)) { /* state change pending */ 305 if (sense && (sense[25] == 0x1D)) { /* state change pending */
305 306
306 DEV_MESSAGE(KERN_INFO, device, 307 DBF_DEV_EVENT(DBF_INFO, device,
307 "waiting for state change pending " 308 "waiting for state change pending "
308 "interrupt, %d retries left", 309 "interrupt, %d retries left",
309 erp->retries); 310 erp->retries);
@@ -311,15 +312,14 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
311 dasd_3990_erp_block_queue(erp, 30*HZ); 312 dasd_3990_erp_block_queue(erp, 30*HZ);
312 313
313 } else if (sense && (sense[25] == 0x1E)) { /* busy */ 314 } else if (sense && (sense[25] == 0x1E)) { /* busy */
314 DEV_MESSAGE(KERN_INFO, device, 315 DBF_DEV_EVENT(DBF_INFO, device,
315 "busy - redriving request later, " 316 "busy - redriving request later, "
316 "%d retries left", 317 "%d retries left",
317 erp->retries); 318 erp->retries);
318 dasd_3990_erp_block_queue(erp, HZ); 319 dasd_3990_erp_block_queue(erp, HZ);
319 } else { 320 } else {
320
321 /* no state change pending - retry */ 321 /* no state change pending - retry */
322 DEV_MESSAGE (KERN_INFO, device, 322 DBF_DEV_EVENT(DBF_INFO, device,
323 "redriving request immediately, " 323 "redriving request immediately, "
324 "%d retries left", 324 "%d retries left",
325 erp->retries); 325 erp->retries);
@@ -384,6 +384,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
384 struct dasd_device *device = erp->startdev; 384 struct dasd_device *device = erp->startdev;
385 char msg_format = (sense[7] & 0xF0); 385 char msg_format = (sense[7] & 0xF0);
386 char msg_no = (sense[7] & 0x0F); 386 char msg_no = (sense[7] & 0x0F);
387 char errorstring[ERRORLENGTH];
387 388
388 switch (msg_format) { 389 switch (msg_format) {
389 case 0x00: /* Format 0 - Program or System Checks */ 390 case 0x00: /* Format 0 - Program or System Checks */
@@ -394,95 +395,97 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
394 case 0x00: /* No Message */ 395 case 0x00: /* No Message */
395 break; 396 break;
396 case 0x01: 397 case 0x01:
397 DEV_MESSAGE(KERN_WARNING, device, "%s", 398 dev_warn(&device->cdev->dev,
398 "FORMAT 0 - Invalid Command"); 399 "FORMAT 0 - Invalid Command\n");
399 break; 400 break;
400 case 0x02: 401 case 0x02:
401 DEV_MESSAGE(KERN_WARNING, device, "%s", 402 dev_warn(&device->cdev->dev,
402 "FORMAT 0 - Invalid Command " 403 "FORMAT 0 - Invalid Command "
403 "Sequence"); 404 "Sequence\n");
404 break; 405 break;
405 case 0x03: 406 case 0x03:
406 DEV_MESSAGE(KERN_WARNING, device, "%s", 407 dev_warn(&device->cdev->dev,
407 "FORMAT 0 - CCW Count less than " 408 "FORMAT 0 - CCW Count less than "
408 "required"); 409 "required\n");
409 break; 410 break;
410 case 0x04: 411 case 0x04:
411 DEV_MESSAGE(KERN_WARNING, device, "%s", 412 dev_warn(&device->cdev->dev,
412 "FORMAT 0 - Invalid Parameter"); 413 "FORMAT 0 - Invalid Parameter\n");
413 break; 414 break;
414 case 0x05: 415 case 0x05:
415 DEV_MESSAGE(KERN_WARNING, device, "%s", 416 dev_warn(&device->cdev->dev,
416 "FORMAT 0 - Diagnostic of Sepecial" 417 "FORMAT 0 - Diagnostic of Special"
417 " Command Violates File Mask"); 418 " Command Violates File Mask\n");
418 break; 419 break;
419 case 0x07: 420 case 0x07:
420 DEV_MESSAGE(KERN_WARNING, device, "%s", 421 dev_warn(&device->cdev->dev,
421 "FORMAT 0 - Channel Returned with " 422 "FORMAT 0 - Channel Returned with "
422 "Incorrect retry CCW"); 423 "Incorrect retry CCW\n");
423 break; 424 break;
424 case 0x08: 425 case 0x08:
425 DEV_MESSAGE(KERN_WARNING, device, "%s", 426 dev_warn(&device->cdev->dev,
426 "FORMAT 0 - Reset Notification"); 427 "FORMAT 0 - Reset Notification\n");
427 break; 428 break;
428 case 0x09: 429 case 0x09:
429 DEV_MESSAGE(KERN_WARNING, device, "%s", 430 dev_warn(&device->cdev->dev,
430 "FORMAT 0 - Storage Path Restart"); 431 "FORMAT 0 - Storage Path Restart\n");
431 break; 432 break;
432 case 0x0A: 433 case 0x0A:
433 DEV_MESSAGE(KERN_WARNING, device, 434 dev_warn(&device->cdev->dev,
434 "FORMAT 0 - Channel requested " 435 "FORMAT 0 - Channel requested "
435 "... %02x", sense[8]); 436 "... %02x\n", sense[8]);
436 break; 437 break;
437 case 0x0B: 438 case 0x0B:
438 DEV_MESSAGE(KERN_WARNING, device, "%s", 439 dev_warn(&device->cdev->dev,
439 "FORMAT 0 - Invalid Defective/" 440 "FORMAT 0 - Invalid Defective/"
440 "Alternate Track Pointer"); 441 "Alternate Track Pointer\n");
441 break; 442 break;
442 case 0x0C: 443 case 0x0C:
443 DEV_MESSAGE(KERN_WARNING, device, "%s", 444 dev_warn(&device->cdev->dev,
444 "FORMAT 0 - DPS Installation " 445 "FORMAT 0 - DPS Installation "
445 "Check"); 446 "Check\n");
446 break; 447 break;
447 case 0x0E: 448 case 0x0E:
448 DEV_MESSAGE(KERN_WARNING, device, "%s", 449 dev_warn(&device->cdev->dev,
449 "FORMAT 0 - Command Invalid on " 450 "FORMAT 0 - Command Invalid on "
450 "Secondary Address"); 451 "Secondary Address\n");
451 break; 452 break;
452 case 0x0F: 453 case 0x0F:
453 DEV_MESSAGE(KERN_WARNING, device, 454 dev_warn(&device->cdev->dev,
454 "FORMAT 0 - Status Not As " 455 "FORMAT 0 - Status Not As "
455 "Required: reason %02x", sense[8]); 456 "Required: reason %02x\n",
457 sense[8]);
456 break; 458 break;
457 default: 459 default:
458 DEV_MESSAGE(KERN_WARNING, device, "%s", 460 dev_warn(&device->cdev->dev,
459 "FORMAT 0 - Reseved"); 461 "FORMAT 0 - Reserved\n");
460 } 462 }
461 } else { 463 } else {
462 switch (msg_no) { 464 switch (msg_no) {
463 case 0x00: /* No Message */ 465 case 0x00: /* No Message */
464 break; 466 break;
465 case 0x01: 467 case 0x01:
466 DEV_MESSAGE(KERN_WARNING, device, "%s", 468 dev_warn(&device->cdev->dev,
467 "FORMAT 0 - Device Error Source"); 469 "FORMAT 0 - Device Error "
470 "Source\n");
468 break; 471 break;
469 case 0x02: 472 case 0x02:
470 DEV_MESSAGE(KERN_WARNING, device, "%s", 473 dev_warn(&device->cdev->dev,
471 "FORMAT 0 - Reserved"); 474 "FORMAT 0 - Reserved\n");
472 break; 475 break;
473 case 0x03: 476 case 0x03:
474 DEV_MESSAGE(KERN_WARNING, device, 477 dev_warn(&device->cdev->dev,
475 "FORMAT 0 - Device Fenced - " 478 "FORMAT 0 - Device Fenced - "
476 "device = %02x", sense[4]); 479 "device = %02x\n", sense[4]);
477 break; 480 break;
478 case 0x04: 481 case 0x04:
479 DEV_MESSAGE(KERN_WARNING, device, "%s", 482 dev_warn(&device->cdev->dev,
480 "FORMAT 0 - Data Pinned for " 483 "FORMAT 0 - Data Pinned for "
481 "Device"); 484 "Device\n");
482 break; 485 break;
483 default: 486 default:
484 DEV_MESSAGE(KERN_WARNING, device, "%s", 487 dev_warn(&device->cdev->dev,
485 "FORMAT 0 - Reserved"); 488 "FORMAT 0 - Reserved\n");
486 } 489 }
487 } 490 }
488 break; 491 break;
@@ -492,348 +495,352 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
492 case 0x00: /* No Message */ 495 case 0x00: /* No Message */
493 break; 496 break;
494 case 0x01: 497 case 0x01:
495 DEV_MESSAGE(KERN_WARNING, device, "%s", 498 dev_warn(&device->cdev->dev,
496 "FORMAT 1 - Device Status 1 not as " 499 "FORMAT 1 - Device Status 1 not as "
497 "expected"); 500 "expected\n");
498 break; 501 break;
499 case 0x03: 502 case 0x03:
500 DEV_MESSAGE(KERN_WARNING, device, "%s", 503 dev_warn(&device->cdev->dev,
501 "FORMAT 1 - Index missing"); 504 "FORMAT 1 - Index missing\n");
502 break; 505 break;
503 case 0x04: 506 case 0x04:
504 DEV_MESSAGE(KERN_WARNING, device, "%s", 507 dev_warn(&device->cdev->dev,
505 "FORMAT 1 - Interruption cannot be reset"); 508 "FORMAT 1 - Interruption cannot be "
509 "reset\n");
506 break; 510 break;
507 case 0x05: 511 case 0x05:
508 DEV_MESSAGE(KERN_WARNING, device, "%s", 512 dev_warn(&device->cdev->dev,
509 "FORMAT 1 - Device did not respond to " 513 "FORMAT 1 - Device did not respond to "
510 "selection"); 514 "selection\n");
511 break; 515 break;
512 case 0x06: 516 case 0x06:
513 DEV_MESSAGE(KERN_WARNING, device, "%s", 517 dev_warn(&device->cdev->dev,
514 "FORMAT 1 - Device check-2 error or Set " 518 "FORMAT 1 - Device check-2 error or Set "
515 "Sector is not complete"); 519 "Sector is not complete\n");
516 break; 520 break;
517 case 0x07: 521 case 0x07:
518 DEV_MESSAGE(KERN_WARNING, device, "%s", 522 dev_warn(&device->cdev->dev,
519 "FORMAT 1 - Head address does not " 523 "FORMAT 1 - Head address does not "
520 "compare"); 524 "compare\n");
521 break; 525 break;
522 case 0x08: 526 case 0x08:
523 DEV_MESSAGE(KERN_WARNING, device, "%s", 527 dev_warn(&device->cdev->dev,
524 "FORMAT 1 - Device status 1 not valid"); 528 "FORMAT 1 - Device status 1 not valid\n");
525 break; 529 break;
526 case 0x09: 530 case 0x09:
527 DEV_MESSAGE(KERN_WARNING, device, "%s", 531 dev_warn(&device->cdev->dev,
528 "FORMAT 1 - Device not ready"); 532 "FORMAT 1 - Device not ready\n");
529 break; 533 break;
530 case 0x0A: 534 case 0x0A:
531 DEV_MESSAGE(KERN_WARNING, device, "%s", 535 dev_warn(&device->cdev->dev,
532 "FORMAT 1 - Track physical address did " 536 "FORMAT 1 - Track physical address did "
533 "not compare"); 537 "not compare\n");
534 break; 538 break;
535 case 0x0B: 539 case 0x0B:
536 DEV_MESSAGE(KERN_WARNING, device, "%s", 540 dev_warn(&device->cdev->dev,
537 "FORMAT 1 - Missing device address bit"); 541 "FORMAT 1 - Missing device address bit\n");
538 break; 542 break;
539 case 0x0C: 543 case 0x0C:
540 DEV_MESSAGE(KERN_WARNING, device, "%s", 544 dev_warn(&device->cdev->dev,
541 "FORMAT 1 - Drive motor switch is off"); 545 "FORMAT 1 - Drive motor switch is off\n");
542 break; 546 break;
543 case 0x0D: 547 case 0x0D:
544 DEV_MESSAGE(KERN_WARNING, device, "%s", 548 dev_warn(&device->cdev->dev,
545 "FORMAT 1 - Seek incomplete"); 549 "FORMAT 1 - Seek incomplete\n");
546 break; 550 break;
547 case 0x0E: 551 case 0x0E:
548 DEV_MESSAGE(KERN_WARNING, device, "%s", 552 dev_warn(&device->cdev->dev,
549 "FORMAT 1 - Cylinder address did not " 553 "FORMAT 1 - Cylinder address did not "
550 "compare"); 554 "compare\n");
551 break; 555 break;
552 case 0x0F: 556 case 0x0F:
553 DEV_MESSAGE(KERN_WARNING, device, "%s", 557 dev_warn(&device->cdev->dev,
554 "FORMAT 1 - Offset active cannot be " 558 "FORMAT 1 - Offset active cannot be "
555 "reset"); 559 "reset\n");
556 break; 560 break;
557 default: 561 default:
558 DEV_MESSAGE(KERN_WARNING, device, "%s", 562 dev_warn(&device->cdev->dev,
559 "FORMAT 1 - Reserved"); 563 "FORMAT 1 - Reserved\n");
560 } 564 }
561 break; 565 break;
562 566
563 case 0x20: /* Format 2 - 3990 Equipment Checks */ 567 case 0x20: /* Format 2 - 3990 Equipment Checks */
564 switch (msg_no) { 568 switch (msg_no) {
565 case 0x08: 569 case 0x08:
566 DEV_MESSAGE(KERN_WARNING, device, "%s", 570 dev_warn(&device->cdev->dev,
567 "FORMAT 2 - 3990 check-2 error"); 571 "FORMAT 2 - 3990 check-2 error\n");
568 break; 572 break;
569 case 0x0E: 573 case 0x0E:
570 DEV_MESSAGE(KERN_WARNING, device, "%s", 574 dev_warn(&device->cdev->dev,
571 "FORMAT 2 - Support facility errors"); 575 "FORMAT 2 - Support facility errors\n");
572 break; 576 break;
573 case 0x0F: 577 case 0x0F:
574 DEV_MESSAGE(KERN_WARNING, device, 578 dev_warn(&device->cdev->dev,
575 "FORMAT 2 - Microcode detected error %02x", 579 "FORMAT 2 - Microcode detected error "
576 sense[8]); 580 "%02x\n",
581 sense[8]);
577 break; 582 break;
578 default: 583 default:
579 DEV_MESSAGE(KERN_WARNING, device, "%s", 584 dev_warn(&device->cdev->dev,
580 "FORMAT 2 - Reserved"); 585 "FORMAT 2 - Reserved\n");
581 } 586 }
582 break; 587 break;
583 588
584 case 0x30: /* Format 3 - 3990 Control Checks */ 589 case 0x30: /* Format 3 - 3990 Control Checks */
585 switch (msg_no) { 590 switch (msg_no) {
586 case 0x0F: 591 case 0x0F:
587 DEV_MESSAGE(KERN_WARNING, device, "%s", 592 dev_warn(&device->cdev->dev,
588 "FORMAT 3 - Allegiance terminated"); 593 "FORMAT 3 - Allegiance terminated\n");
589 break; 594 break;
590 default: 595 default:
591 DEV_MESSAGE(KERN_WARNING, device, "%s", 596 dev_warn(&device->cdev->dev,
592 "FORMAT 3 - Reserved"); 597 "FORMAT 3 - Reserved\n");
593 } 598 }
594 break; 599 break;
595 600
596 case 0x40: /* Format 4 - Data Checks */ 601 case 0x40: /* Format 4 - Data Checks */
597 switch (msg_no) { 602 switch (msg_no) {
598 case 0x00: 603 case 0x00:
599 DEV_MESSAGE(KERN_WARNING, device, "%s", 604 dev_warn(&device->cdev->dev,
600 "FORMAT 4 - Home address area error"); 605 "FORMAT 4 - Home address area error\n");
601 break; 606 break;
602 case 0x01: 607 case 0x01:
603 DEV_MESSAGE(KERN_WARNING, device, "%s", 608 dev_warn(&device->cdev->dev,
604 "FORMAT 4 - Count area error"); 609 "FORMAT 4 - Count area error\n");
605 break; 610 break;
606 case 0x02: 611 case 0x02:
607 DEV_MESSAGE(KERN_WARNING, device, "%s", 612 dev_warn(&device->cdev->dev,
608 "FORMAT 4 - Key area error"); 613 "FORMAT 4 - Key area error\n");
609 break; 614 break;
610 case 0x03: 615 case 0x03:
611 DEV_MESSAGE(KERN_WARNING, device, "%s", 616 dev_warn(&device->cdev->dev,
612 "FORMAT 4 - Data area error"); 617 "FORMAT 4 - Data area error\n");
613 break; 618 break;
614 case 0x04: 619 case 0x04:
615 DEV_MESSAGE(KERN_WARNING, device, "%s", 620 dev_warn(&device->cdev->dev,
616 "FORMAT 4 - No sync byte in home address " 621 "FORMAT 4 - No sync byte in home address "
617 "area"); 622 "area\n");
618 break; 623 break;
619 case 0x05: 624 case 0x05:
620 DEV_MESSAGE(KERN_WARNING, device, "%s", 625 dev_warn(&device->cdev->dev,
621 "FORMAT 4 - No sync byte in count address " 626 "FORMAT 4 - No sync byte in count address "
622 "area"); 627 "area\n");
623 break; 628 break;
624 case 0x06: 629 case 0x06:
625 DEV_MESSAGE(KERN_WARNING, device, "%s", 630 dev_warn(&device->cdev->dev,
626 "FORMAT 4 - No sync byte in key area"); 631 "FORMAT 4 - No sync byte in key area\n");
627 break; 632 break;
628 case 0x07: 633 case 0x07:
629 DEV_MESSAGE(KERN_WARNING, device, "%s", 634 dev_warn(&device->cdev->dev,
630 "FORMAT 4 - No sync byte in data area"); 635 "FORMAT 4 - No sync byte in data area\n");
631 break; 636 break;
632 case 0x08: 637 case 0x08:
633 DEV_MESSAGE(KERN_WARNING, device, "%s", 638 dev_warn(&device->cdev->dev,
634 "FORMAT 4 - Home address area error; " 639 "FORMAT 4 - Home address area error; "
635 "offset active"); 640 "offset active\n");
636 break; 641 break;
637 case 0x09: 642 case 0x09:
638 DEV_MESSAGE(KERN_WARNING, device, "%s", 643 dev_warn(&device->cdev->dev,
639 "FORMAT 4 - Count area error; offset " 644 "FORMAT 4 - Count area error; offset "
640 "active"); 645 "active\n");
641 break; 646 break;
642 case 0x0A: 647 case 0x0A:
643 DEV_MESSAGE(KERN_WARNING, device, "%s", 648 dev_warn(&device->cdev->dev,
644 "FORMAT 4 - Key area error; offset " 649 "FORMAT 4 - Key area error; offset "
645 "active"); 650 "active\n");
646 break; 651 break;
647 case 0x0B: 652 case 0x0B:
648 DEV_MESSAGE(KERN_WARNING, device, "%s", 653 dev_warn(&device->cdev->dev,
649 "FORMAT 4 - Data area error; " 654 "FORMAT 4 - Data area error; "
650 "offset active"); 655 "offset active\n");
651 break; 656 break;
652 case 0x0C: 657 case 0x0C:
653 DEV_MESSAGE(KERN_WARNING, device, "%s", 658 dev_warn(&device->cdev->dev,
654 "FORMAT 4 - No sync byte in home " 659 "FORMAT 4 - No sync byte in home "
655 "address area; offset active"); 660 "address area; offset active\n");
656 break; 661 break;
657 case 0x0D: 662 case 0x0D:
658 DEV_MESSAGE(KERN_WARNING, device, "%s", 663 dev_warn(&device->cdev->dev,
659 "FORMAT 4 - No syn byte in count " 664 "FORMAT 4 - No syn byte in count "
660 "address area; offset active"); 665 "address area; offset active\n");
661 break; 666 break;
662 case 0x0E: 667 case 0x0E:
663 DEV_MESSAGE(KERN_WARNING, device, "%s", 668 dev_warn(&device->cdev->dev,
664 "FORMAT 4 - No sync byte in key area; " 669 "FORMAT 4 - No sync byte in key area; "
665 "offset active"); 670 "offset active\n");
666 break; 671 break;
667 case 0x0F: 672 case 0x0F:
668 DEV_MESSAGE(KERN_WARNING, device, "%s", 673 dev_warn(&device->cdev->dev,
669 "FORMAT 4 - No syn byte in data area; " 674 "FORMAT 4 - No syn byte in data area; "
670 "offset active"); 675 "offset active\n");
671 break; 676 break;
672 default: 677 default:
673 DEV_MESSAGE(KERN_WARNING, device, "%s", 678 dev_warn(&device->cdev->dev,
674 "FORMAT 4 - Reserved"); 679 "FORMAT 4 - Reserved\n");
675 } 680 }
676 break; 681 break;
677 682
678 case 0x50: /* Format 5 - Data Check with displacement information */ 683 case 0x50: /* Format 5 - Data Check with displacement information */
679 switch (msg_no) { 684 switch (msg_no) {
680 case 0x00: 685 case 0x00:
681 DEV_MESSAGE(KERN_WARNING, device, "%s", 686 dev_warn(&device->cdev->dev,
682 "FORMAT 5 - Data Check in the " 687 "FORMAT 5 - Data Check in the "
683 "home address area"); 688 "home address area\n");
684 break; 689 break;
685 case 0x01: 690 case 0x01:
686 DEV_MESSAGE(KERN_WARNING, device, "%s", 691 dev_warn(&device->cdev->dev,
687 "FORMAT 5 - Data Check in the count area"); 692 "FORMAT 5 - Data Check in the count "
693 "area\n");
688 break; 694 break;
689 case 0x02: 695 case 0x02:
690 DEV_MESSAGE(KERN_WARNING, device, "%s", 696 dev_warn(&device->cdev->dev,
691 "FORMAT 5 - Data Check in the key area"); 697 "FORMAT 5 - Data Check in the key area\n");
692 break; 698 break;
693 case 0x03: 699 case 0x03:
694 DEV_MESSAGE(KERN_WARNING, device, "%s", 700 dev_warn(&device->cdev->dev,
695 "FORMAT 5 - Data Check in the data area"); 701 "FORMAT 5 - Data Check in the data "
702 "area\n");
696 break; 703 break;
697 case 0x08: 704 case 0x08:
698 DEV_MESSAGE(KERN_WARNING, device, "%s", 705 dev_warn(&device->cdev->dev,
699 "FORMAT 5 - Data Check in the " 706 "FORMAT 5 - Data Check in the "
700 "home address area; offset active"); 707 "home address area; offset active\n");
701 break; 708 break;
702 case 0x09: 709 case 0x09:
703 DEV_MESSAGE(KERN_WARNING, device, "%s", 710 dev_warn(&device->cdev->dev,
704 "FORMAT 5 - Data Check in the count area; " 711 "FORMAT 5 - Data Check in the count area; "
705 "offset active"); 712 "offset active\n");
706 break; 713 break;
707 case 0x0A: 714 case 0x0A:
708 DEV_MESSAGE(KERN_WARNING, device, "%s", 715 dev_warn(&device->cdev->dev,
709 "FORMAT 5 - Data Check in the key area; " 716 "FORMAT 5 - Data Check in the key area; "
710 "offset active"); 717 "offset active\n");
711 break; 718 break;
712 case 0x0B: 719 case 0x0B:
713 DEV_MESSAGE(KERN_WARNING, device, "%s", 720 dev_warn(&device->cdev->dev,
714 "FORMAT 5 - Data Check in the data area; " 721 "FORMAT 5 - Data Check in the data area; "
715 "offset active"); 722 "offset active\n");
716 break; 723 break;
717 default: 724 default:
718 DEV_MESSAGE(KERN_WARNING, device, "%s", 725 dev_warn(&device->cdev->dev,
719 "FORMAT 5 - Reserved"); 726 "FORMAT 5 - Reserved\n");
720 } 727 }
721 break; 728 break;
722 729
723 case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */ 730 case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */
724 switch (msg_no) { 731 switch (msg_no) {
725 case 0x00: 732 case 0x00:
726 DEV_MESSAGE(KERN_WARNING, device, "%s", 733 dev_warn(&device->cdev->dev,
727 "FORMAT 6 - Overrun on channel A"); 734 "FORMAT 6 - Overrun on channel A\n");
728 break; 735 break;
729 case 0x01: 736 case 0x01:
730 DEV_MESSAGE(KERN_WARNING, device, "%s", 737 dev_warn(&device->cdev->dev,
731 "FORMAT 6 - Overrun on channel B"); 738 "FORMAT 6 - Overrun on channel B\n");
732 break; 739 break;
733 case 0x02: 740 case 0x02:
734 DEV_MESSAGE(KERN_WARNING, device, "%s", 741 dev_warn(&device->cdev->dev,
735 "FORMAT 6 - Overrun on channel C"); 742 "FORMAT 6 - Overrun on channel C\n");
736 break; 743 break;
737 case 0x03: 744 case 0x03:
738 DEV_MESSAGE(KERN_WARNING, device, "%s", 745 dev_warn(&device->cdev->dev,
739 "FORMAT 6 - Overrun on channel D"); 746 "FORMAT 6 - Overrun on channel D\n");
740 break; 747 break;
741 case 0x04: 748 case 0x04:
742 DEV_MESSAGE(KERN_WARNING, device, "%s", 749 dev_warn(&device->cdev->dev,
743 "FORMAT 6 - Overrun on channel E"); 750 "FORMAT 6 - Overrun on channel E\n");
744 break; 751 break;
745 case 0x05: 752 case 0x05:
746 DEV_MESSAGE(KERN_WARNING, device, "%s", 753 dev_warn(&device->cdev->dev,
747 "FORMAT 6 - Overrun on channel F"); 754 "FORMAT 6 - Overrun on channel F\n");
748 break; 755 break;
749 case 0x06: 756 case 0x06:
750 DEV_MESSAGE(KERN_WARNING, device, "%s", 757 dev_warn(&device->cdev->dev,
751 "FORMAT 6 - Overrun on channel G"); 758 "FORMAT 6 - Overrun on channel G\n");
752 break; 759 break;
753 case 0x07: 760 case 0x07:
754 DEV_MESSAGE(KERN_WARNING, device, "%s", 761 dev_warn(&device->cdev->dev,
755 "FORMAT 6 - Overrun on channel H"); 762 "FORMAT 6 - Overrun on channel H\n");
756 break; 763 break;
757 default: 764 default:
758 DEV_MESSAGE(KERN_WARNING, device, "%s", 765 dev_warn(&device->cdev->dev,
759 "FORMAT 6 - Reserved"); 766 "FORMAT 6 - Reserved\n");
760 } 767 }
761 break; 768 break;
762 769
763 case 0x70: /* Format 7 - Device Connection Control Checks */ 770 case 0x70: /* Format 7 - Device Connection Control Checks */
764 switch (msg_no) { 771 switch (msg_no) {
765 case 0x00: 772 case 0x00:
766 DEV_MESSAGE(KERN_WARNING, device, "%s", 773 dev_warn(&device->cdev->dev,
767 "FORMAT 7 - RCC initiated by a connection " 774 "FORMAT 7 - RCC initiated by a connection "
768 "check alert"); 775 "check alert\n");
769 break; 776 break;
770 case 0x01: 777 case 0x01:
771 DEV_MESSAGE(KERN_WARNING, device, "%s", 778 dev_warn(&device->cdev->dev,
772 "FORMAT 7 - RCC 1 sequence not " 779 "FORMAT 7 - RCC 1 sequence not "
773 "successful"); 780 "successful\n");
774 break; 781 break;
775 case 0x02: 782 case 0x02:
776 DEV_MESSAGE(KERN_WARNING, device, "%s", 783 dev_warn(&device->cdev->dev,
777 "FORMAT 7 - RCC 1 and RCC 2 sequences not " 784 "FORMAT 7 - RCC 1 and RCC 2 sequences not "
778 "successful"); 785 "successful\n");
779 break; 786 break;
780 case 0x03: 787 case 0x03:
781 DEV_MESSAGE(KERN_WARNING, device, "%s", 788 dev_warn(&device->cdev->dev,
782 "FORMAT 7 - Invalid tag-in during " 789 "FORMAT 7 - Invalid tag-in during "
783 "selection sequence"); 790 "selection sequence\n");
784 break; 791 break;
785 case 0x04: 792 case 0x04:
786 DEV_MESSAGE(KERN_WARNING, device, "%s", 793 dev_warn(&device->cdev->dev,
787 "FORMAT 7 - extra RCC required"); 794 "FORMAT 7 - extra RCC required\n");
788 break; 795 break;
789 case 0x05: 796 case 0x05:
790 DEV_MESSAGE(KERN_WARNING, device, "%s", 797 dev_warn(&device->cdev->dev,
791 "FORMAT 7 - Invalid DCC selection " 798 "FORMAT 7 - Invalid DCC selection "
792 "response or timeout"); 799 "response or timeout\n");
793 break; 800 break;
794 case 0x06: 801 case 0x06:
795 DEV_MESSAGE(KERN_WARNING, device, "%s", 802 dev_warn(&device->cdev->dev,
796 "FORMAT 7 - Missing end operation; device " 803 "FORMAT 7 - Missing end operation; device "
797 "transfer complete"); 804 "transfer complete\n");
798 break; 805 break;
799 case 0x07: 806 case 0x07:
800 DEV_MESSAGE(KERN_WARNING, device, "%s", 807 dev_warn(&device->cdev->dev,
801 "FORMAT 7 - Missing end operation; device " 808 "FORMAT 7 - Missing end operation; device "
802 "transfer incomplete"); 809 "transfer incomplete\n");
803 break; 810 break;
804 case 0x08: 811 case 0x08:
805 DEV_MESSAGE(KERN_WARNING, device, "%s", 812 dev_warn(&device->cdev->dev,
806 "FORMAT 7 - Invalid tag-in for an " 813 "FORMAT 7 - Invalid tag-in for an "
807 "immediate command sequence"); 814 "immediate command sequence\n");
808 break; 815 break;
809 case 0x09: 816 case 0x09:
810 DEV_MESSAGE(KERN_WARNING, device, "%s", 817 dev_warn(&device->cdev->dev,
811 "FORMAT 7 - Invalid tag-in for an " 818 "FORMAT 7 - Invalid tag-in for an "
812 "extended command sequence"); 819 "extended command sequence\n");
813 break; 820 break;
814 case 0x0A: 821 case 0x0A:
815 DEV_MESSAGE(KERN_WARNING, device, "%s", 822 dev_warn(&device->cdev->dev,
816 "FORMAT 7 - 3990 microcode time out when " 823 "FORMAT 7 - 3990 microcode time out when "
817 "stopping selection"); 824 "stopping selection\n");
818 break; 825 break;
819 case 0x0B: 826 case 0x0B:
820 DEV_MESSAGE(KERN_WARNING, device, "%s", 827 dev_warn(&device->cdev->dev,
821 "FORMAT 7 - No response to selection " 828 "FORMAT 7 - No response to selection "
822 "after a poll interruption"); 829 "after a poll interruption\n");
823 break; 830 break;
824 case 0x0C: 831 case 0x0C:
825 DEV_MESSAGE(KERN_WARNING, device, "%s", 832 dev_warn(&device->cdev->dev,
826 "FORMAT 7 - Permanent path error (DASD " 833 "FORMAT 7 - Permanent path error (DASD "
827 "controller not available)"); 834 "controller not available)\n");
828 break; 835 break;
829 case 0x0D: 836 case 0x0D:
830 DEV_MESSAGE(KERN_WARNING, device, "%s", 837 dev_warn(&device->cdev->dev,
831 "FORMAT 7 - DASD controller not available" 838 "FORMAT 7 - DASD controller not available"
832 " on disconnected command chain"); 839 " on disconnected command chain\n");
833 break; 840 break;
834 default: 841 default:
835 DEV_MESSAGE(KERN_WARNING, device, "%s", 842 dev_warn(&device->cdev->dev,
836 "FORMAT 7 - Reserved"); 843 "FORMAT 7 - Reserved\n");
837 } 844 }
838 break; 845 break;
839 846
@@ -841,52 +848,52 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
841 switch (msg_no) { 848 switch (msg_no) {
842 case 0x00: /* No Message */ 849 case 0x00: /* No Message */
843 case 0x01: 850 case 0x01:
844 DEV_MESSAGE(KERN_WARNING, device, "%s", 851 dev_warn(&device->cdev->dev,
845 "FORMAT 8 - Error correction code " 852 "FORMAT 8 - Error correction code "
846 "hardware fault"); 853 "hardware fault\n");
847 break; 854 break;
848 case 0x03: 855 case 0x03:
849 DEV_MESSAGE(KERN_WARNING, device, "%s", 856 dev_warn(&device->cdev->dev,
850 "FORMAT 8 - Unexpected end operation " 857 "FORMAT 8 - Unexpected end operation "
851 "response code"); 858 "response code\n");
852 break; 859 break;
853 case 0x04: 860 case 0x04:
854 DEV_MESSAGE(KERN_WARNING, device, "%s", 861 dev_warn(&device->cdev->dev,
855 "FORMAT 8 - End operation with transfer " 862 "FORMAT 8 - End operation with transfer "
856 "count not zero"); 863 "count not zero\n");
857 break; 864 break;
858 case 0x05: 865 case 0x05:
859 DEV_MESSAGE(KERN_WARNING, device, "%s", 866 dev_warn(&device->cdev->dev,
860 "FORMAT 8 - End operation with transfer " 867 "FORMAT 8 - End operation with transfer "
861 "count zero"); 868 "count zero\n");
862 break; 869 break;
863 case 0x06: 870 case 0x06:
864 DEV_MESSAGE(KERN_WARNING, device, "%s", 871 dev_warn(&device->cdev->dev,
865 "FORMAT 8 - DPS checks after a system " 872 "FORMAT 8 - DPS checks after a system "
866 "reset or selective reset"); 873 "reset or selective reset\n");
867 break; 874 break;
868 case 0x07: 875 case 0x07:
869 DEV_MESSAGE(KERN_WARNING, device, "%s", 876 dev_warn(&device->cdev->dev,
870 "FORMAT 8 - DPS cannot be filled"); 877 "FORMAT 8 - DPS cannot be filled\n");
871 break; 878 break;
872 case 0x08: 879 case 0x08:
873 DEV_MESSAGE(KERN_WARNING, device, "%s", 880 dev_warn(&device->cdev->dev,
874 "FORMAT 8 - Short busy time-out during " 881 "FORMAT 8 - Short busy time-out during "
875 "device selection"); 882 "device selection\n");
876 break; 883 break;
877 case 0x09: 884 case 0x09:
878 DEV_MESSAGE(KERN_WARNING, device, "%s", 885 dev_warn(&device->cdev->dev,
879 "FORMAT 8 - DASD controller failed to " 886 "FORMAT 8 - DASD controller failed to "
880 "set or reset the long busy latch"); 887 "set or reset the long busy latch\n");
881 break; 888 break;
882 case 0x0A: 889 case 0x0A:
883 DEV_MESSAGE(KERN_WARNING, device, "%s", 890 dev_warn(&device->cdev->dev,
884 "FORMAT 8 - No interruption from device " 891 "FORMAT 8 - No interruption from device "
885 "during a command chain"); 892 "during a command chain\n");
886 break; 893 break;
887 default: 894 default:
888 DEV_MESSAGE(KERN_WARNING, device, "%s", 895 dev_warn(&device->cdev->dev,
889 "FORMAT 8 - Reserved"); 896 "FORMAT 8 - Reserved\n");
890 } 897 }
891 break; 898 break;
892 899
@@ -895,97 +902,100 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
895 case 0x00: 902 case 0x00:
896 break; /* No Message */ 903 break; /* No Message */
897 case 0x06: 904 case 0x06:
898 DEV_MESSAGE(KERN_WARNING, device, "%s", 905 dev_warn(&device->cdev->dev,
899 "FORMAT 9 - Device check-2 error"); 906 "FORMAT 9 - Device check-2 error\n");
900 break; 907 break;
901 case 0x07: 908 case 0x07:
902 DEV_MESSAGE(KERN_WARNING, device, "%s", 909 dev_warn(&device->cdev->dev,
903 "FORMAT 9 - Head address did not compare"); 910 "FORMAT 9 - Head address did not "
911 "compare\n");
904 break; 912 break;
905 case 0x0A: 913 case 0x0A:
906 DEV_MESSAGE(KERN_WARNING, device, "%s", 914 dev_warn(&device->cdev->dev,
907 "FORMAT 9 - Track physical address did " 915 "FORMAT 9 - Track physical address did "
908 "not compare while oriented"); 916 "not compare while oriented\n");
909 break; 917 break;
910 case 0x0E: 918 case 0x0E:
911 DEV_MESSAGE(KERN_WARNING, device, "%s", 919 dev_warn(&device->cdev->dev,
912 "FORMAT 9 - Cylinder address did not " 920 "FORMAT 9 - Cylinder address did not "
913 "compare"); 921 "compare\n");
914 break; 922 break;
915 default: 923 default:
916 DEV_MESSAGE(KERN_WARNING, device, "%s", 924 dev_warn(&device->cdev->dev,
917 "FORMAT 9 - Reserved"); 925 "FORMAT 9 - Reserved\n");
918 } 926 }
919 break; 927 break;
920 928
921 case 0xF0: /* Format F - Cache Storage Checks */ 929 case 0xF0: /* Format F - Cache Storage Checks */
922 switch (msg_no) { 930 switch (msg_no) {
923 case 0x00: 931 case 0x00:
924 DEV_MESSAGE(KERN_WARNING, device, "%s", 932 dev_warn(&device->cdev->dev,
925 "FORMAT F - Operation Terminated"); 933 "FORMAT F - Operation Terminated\n");
926 break; 934 break;
927 case 0x01: 935 case 0x01:
928 DEV_MESSAGE(KERN_WARNING, device, "%s", 936 dev_warn(&device->cdev->dev,
929 "FORMAT F - Subsystem Processing Error"); 937 "FORMAT F - Subsystem Processing Error\n");
930 break; 938 break;
931 case 0x02: 939 case 0x02:
932 DEV_MESSAGE(KERN_WARNING, device, "%s", 940 dev_warn(&device->cdev->dev,
933 "FORMAT F - Cache or nonvolatile storage " 941 "FORMAT F - Cache or nonvolatile storage "
934 "equipment failure"); 942 "equipment failure\n");
935 break; 943 break;
936 case 0x04: 944 case 0x04:
937 DEV_MESSAGE(KERN_WARNING, device, "%s", 945 dev_warn(&device->cdev->dev,
938 "FORMAT F - Caching terminated"); 946 "FORMAT F - Caching terminated\n");
939 break; 947 break;
940 case 0x06: 948 case 0x06:
941 DEV_MESSAGE(KERN_WARNING, device, "%s", 949 dev_warn(&device->cdev->dev,
942 "FORMAT F - Cache fast write access not " 950 "FORMAT F - Cache fast write access not "
943 "authorized"); 951 "authorized\n");
944 break; 952 break;
945 case 0x07: 953 case 0x07:
946 DEV_MESSAGE(KERN_WARNING, device, "%s", 954 dev_warn(&device->cdev->dev,
947 "FORMAT F - Track format incorrect"); 955 "FORMAT F - Track format incorrect\n");
948 break; 956 break;
949 case 0x09: 957 case 0x09:
950 DEV_MESSAGE(KERN_WARNING, device, "%s", 958 dev_warn(&device->cdev->dev,
951 "FORMAT F - Caching reinitiated"); 959 "FORMAT F - Caching reinitiated\n");
952 break; 960 break;
953 case 0x0A: 961 case 0x0A:
954 DEV_MESSAGE(KERN_WARNING, device, "%s", 962 dev_warn(&device->cdev->dev,
955 "FORMAT F - Nonvolatile storage " 963 "FORMAT F - Nonvolatile storage "
956 "terminated"); 964 "terminated\n");
957 break; 965 break;
958 case 0x0B: 966 case 0x0B:
959 DEV_MESSAGE(KERN_WARNING, device, "%s", 967 dev_warn(&device->cdev->dev,
960 "FORMAT F - Volume is suspended duplex"); 968 "FORMAT F - Volume is suspended duplex\n");
961 /* call extended error reporting (EER) */ 969 /* call extended error reporting (EER) */
962 dasd_eer_write(device, erp->refers, 970 dasd_eer_write(device, erp->refers,
963 DASD_EER_PPRCSUSPEND); 971 DASD_EER_PPRCSUSPEND);
964 break; 972 break;
965 case 0x0C: 973 case 0x0C:
966 DEV_MESSAGE(KERN_WARNING, device, "%s", 974 dev_warn(&device->cdev->dev,
967 "FORMAT F - Subsystem status connot be " 975 "FORMAT F - Subsystem status cannot be "
968 "determined"); 976 "determined\n");
969 break; 977 break;
970 case 0x0D: 978 case 0x0D:
971 DEV_MESSAGE(KERN_WARNING, device, "%s", 979 dev_warn(&device->cdev->dev,
972 "FORMAT F - Caching status reset to " 980 "FORMAT F - Caching status reset to "
973 "default"); 981 "default\n");
974 break; 982 break;
975 case 0x0E: 983 case 0x0E:
976 DEV_MESSAGE(KERN_WARNING, device, "%s", 984 dev_warn(&device->cdev->dev,
977 "FORMAT F - DASD Fast Write inhibited"); 985 "FORMAT F - DASD Fast Write inhibited\n");
978 break; 986 break;
979 default: 987 default:
980 DEV_MESSAGE(KERN_WARNING, device, "%s", 988 dev_warn(&device->cdev->dev,
981 "FORMAT D - Reserved"); 989 "FORMAT D - Reserved\n");
982 } 990 }
983 break; 991 break;
984 992
985 default: /* unknown message format - should not happen */ 993 default: /* unknown message format - should not happen
986 DEV_MESSAGE (KERN_WARNING, device, 994 internal error 03 - unknown message format */
987 "unknown message format %02x", 995 snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
988 msg_format); 996 dev_err(&device->cdev->dev,
997 "An error occurred in the DASD device driver, "
998 "reason=%s\n", errorstring);
989 break; 999 break;
990 } /* end switch message format */ 1000 } /* end switch message format */
991 1001
@@ -1015,7 +1025,7 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
1015 /* env data present (ACTION 10 - retry should work) */ 1025 /* env data present (ACTION 10 - retry should work) */
1016 if (sense[2] & SNS2_ENV_DATA_PRESENT) { 1026 if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1017 1027
1018 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1028 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1019 "Command Reject - environmental data present"); 1029 "Command Reject - environmental data present");
1020 1030
1021 dasd_3990_handle_env_data(erp, sense); 1031 dasd_3990_handle_env_data(erp, sense);
@@ -1023,9 +1033,10 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
1023 erp->retries = 5; 1033 erp->retries = 5;
1024 1034
1025 } else { 1035 } else {
1026 /* fatal error - set status to FAILED */ 1036 /* fatal error - set status to FAILED
1027 DEV_MESSAGE(KERN_ERR, device, "%s", 1037 internal error 09 - Command Reject */
1028 "Command Reject - Fatal error"); 1038 dev_err(&device->cdev->dev, "An error occurred in the DASD "
1039 "device driver, reason=%s\n", "09");
1029 1040
1030 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 1041 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1031 } 1042 }
@@ -1061,7 +1072,7 @@ dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
1061 } else { 1072 } else {
1062 1073
1063 /* issue a message and wait for 'device ready' interrupt */ 1074 /* issue a message and wait for 'device ready' interrupt */
1064 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1075 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1065 "bus out parity error or BOPC requested by " 1076 "bus out parity error or BOPC requested by "
1066 "channel"); 1077 "channel");
1067 1078
@@ -1093,21 +1104,19 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1093 erp->function = dasd_3990_erp_equip_check; 1104 erp->function = dasd_3990_erp_equip_check;
1094 1105
1095 if (sense[1] & SNS1_WRITE_INHIBITED) { 1106 if (sense[1] & SNS1_WRITE_INHIBITED) {
1107 dev_info(&device->cdev->dev,
1108 "Write inhibited path encountered\n");
1096 1109
1097 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1110 /* vary path offline
1098 "Write inhibited path encountered"); 1111 internal error 04 - Path should be varied off-line.*/
1099 1112 dev_err(&device->cdev->dev, "An error occurred in the DASD "
1100 /* vary path offline */ 1113 "device driver, reason=%s\n", "04");
1101 DEV_MESSAGE(KERN_ERR, device, "%s",
1102 "Path should be varied off-line. "
1103 "This is not implemented yet \n - please report "
1104 "to linux390@de.ibm.com");
1105 1114
1106 erp = dasd_3990_erp_action_1(erp); 1115 erp = dasd_3990_erp_action_1(erp);
1107 1116
1108 } else if (sense[2] & SNS2_ENV_DATA_PRESENT) { 1117 } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1109 1118
1110 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1119 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1111 "Equipment Check - " "environmental data present"); 1120 "Equipment Check - " "environmental data present");
1112 1121
1113 dasd_3990_handle_env_data(erp, sense); 1122 dasd_3990_handle_env_data(erp, sense);
@@ -1116,7 +1125,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1116 1125
1117 } else if (sense[1] & SNS1_PERM_ERR) { 1126 } else if (sense[1] & SNS1_PERM_ERR) {
1118 1127
1119 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1128 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1120 "Equipment Check - retry exhausted or " 1129 "Equipment Check - retry exhausted or "
1121 "undesirable"); 1130 "undesirable");
1122 1131
@@ -1125,7 +1134,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1125 } else { 1134 } else {
1126 /* all other equipment checks - Action 5 */ 1135 /* all other equipment checks - Action 5 */
1127 /* rest is done when retries == 0 */ 1136 /* rest is done when retries == 0 */
1128 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1137 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1129 "Equipment check or processing error"); 1138 "Equipment check or processing error");
1130 1139
1131 erp = dasd_3990_erp_action_5(erp); 1140 erp = dasd_3990_erp_action_5(erp);
@@ -1156,9 +1165,9 @@ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
1156 if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */ 1165 if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */
1157 1166
1158 /* issue message that the data has been corrected */ 1167 /* issue message that the data has been corrected */
1159 DEV_MESSAGE(KERN_EMERG, device, "%s", 1168 dev_emerg(&device->cdev->dev,
1160 "Data recovered during retry with PCI " 1169 "Data recovered during retry with PCI "
1161 "fetch mode active"); 1170 "fetch mode active\n");
1162 1171
1163 /* not possible to handle this situation in Linux */ 1172 /* not possible to handle this situation in Linux */
1164 panic("No way to inform application about the possibly " 1173 panic("No way to inform application about the possibly "
@@ -1166,7 +1175,7 @@ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
1166 1175
1167 } else if (sense[2] & SNS2_ENV_DATA_PRESENT) { 1176 } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1168 1177
1169 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1178 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1170 "Uncorrectable data check recovered secondary " 1179 "Uncorrectable data check recovered secondary "
1171 "addr of duplex pair"); 1180 "addr of duplex pair");
1172 1181
@@ -1174,7 +1183,7 @@ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
1174 1183
1175 } else if (sense[1] & SNS1_PERM_ERR) { 1184 } else if (sense[1] & SNS1_PERM_ERR) {
1176 1185
1177 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1186 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1178 "Uncorrectable data check with internal " 1187 "Uncorrectable data check with internal "
1179 "retry exhausted"); 1188 "retry exhausted");
1180 1189
@@ -1182,7 +1191,7 @@ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
1182 1191
1183 } else { 1192 } else {
1184 /* all other data checks */ 1193 /* all other data checks */
1185 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1194 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1186 "Uncorrectable data check with retry count " 1195 "Uncorrectable data check with retry count "
1187 "exhausted..."); 1196 "exhausted...");
1188 1197
@@ -1212,7 +1221,7 @@ dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
1212 1221
1213 erp->function = dasd_3990_erp_overrun; 1222 erp->function = dasd_3990_erp_overrun;
1214 1223
1215 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1224 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1216 "Overrun - service overrun or overrun" 1225 "Overrun - service overrun or overrun"
1217 " error requested by channel"); 1226 " error requested by channel");
1218 1227
@@ -1243,7 +1252,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
1243 1252
1244 if (sense[2] & SNS2_ENV_DATA_PRESENT) { 1253 if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1245 1254
1246 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1255 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1247 "Track format error when destaging or " 1256 "Track format error when destaging or "
1248 "staging data"); 1257 "staging data");
1249 1258
@@ -1252,8 +1261,10 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
1252 erp = dasd_3990_erp_action_4(erp, sense); 1261 erp = dasd_3990_erp_action_4(erp, sense);
1253 1262
1254 } else { 1263 } else {
1255 DEV_MESSAGE(KERN_ERR, device, "%s", 1264 /* internal error 06 - The track format is not valid*/
1256 "Invalid Track Format - Fatal error"); 1265 dev_err(&device->cdev->dev,
1266 "An error occurred in the DASD device driver, "
1267 "reason=%s\n", "06");
1257 1268
1258 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 1269 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1259 } 1270 }
@@ -1279,8 +1290,8 @@ dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
1279 1290
1280 struct dasd_device *device = default_erp->startdev; 1291 struct dasd_device *device = default_erp->startdev;
1281 1292
1282 DEV_MESSAGE(KERN_ERR, device, "%s", 1293 dev_err(&device->cdev->dev,
1283 "End-of-Cylinder - must never happen"); 1294 "The cylinder data for accessing the DASD is inconsistent\n");
1284 1295
1285 /* implement action 7 - BUG */ 1296 /* implement action 7 - BUG */
1286 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); 1297 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
@@ -1306,7 +1317,7 @@ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
1306 1317
1307 erp->function = dasd_3990_erp_env_data; 1318 erp->function = dasd_3990_erp_env_data;
1308 1319
1309 DEV_MESSAGE(KERN_DEBUG, device, "%s", "Environmental data present"); 1320 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Environmental data present");
1310 1321
1311 dasd_3990_handle_env_data(erp, sense); 1322 dasd_3990_handle_env_data(erp, sense);
1312 1323
@@ -1339,8 +1350,8 @@ dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
1339 1350
1340 struct dasd_device *device = default_erp->startdev; 1351 struct dasd_device *device = default_erp->startdev;
1341 1352
1342 DEV_MESSAGE(KERN_ERR, device, "%s", 1353 dev_err(&device->cdev->dev,
1343 "No Record Found - Fatal error "); 1354 "The specified record was not found\n");
1344 1355
1345 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); 1356 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1346 1357
@@ -1365,7 +1376,8 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
1365 1376
1366 struct dasd_device *device = erp->startdev; 1377 struct dasd_device *device = erp->startdev;
1367 1378
1368 DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected"); 1379 dev_err(&device->cdev->dev, "Accessing the DASD failed because of "
1380 "a hardware error\n");
1369 1381
1370 return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 1382 return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1371 1383
@@ -1394,7 +1406,7 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
1394 if (cqr->block && 1406 if (cqr->block &&
1395 (cqr->block->base != cqr->startdev)) { 1407 (cqr->block->base != cqr->startdev)) {
1396 if (cqr->startdev->features & DASD_FEATURE_ERPLOG) { 1408 if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
1397 DEV_MESSAGE(KERN_ERR, cqr->startdev, 1409 DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
1398 "ERP on alias device for request %p," 1410 "ERP on alias device for request %p,"
1399 " recover on base device %s", cqr, 1411 " recover on base device %s", cqr,
1400 dev_name(&cqr->block->base->cdev->dev)); 1412 dev_name(&cqr->block->base->cdev->dev));
@@ -1511,7 +1523,7 @@ dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
1511 erp->retries = 256; 1523 erp->retries = 256;
1512 erp->function = dasd_3990_erp_action_10_32; 1524 erp->function = dasd_3990_erp_action_10_32;
1513 1525
1514 DEV_MESSAGE(KERN_DEBUG, device, "%s", "Perform logging requested"); 1526 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Perform logging requested");
1515 1527
1516 return erp; 1528 return erp;
1517 1529
@@ -1549,7 +1561,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1549 char *LO_data; /* LO_eckd_data_t */ 1561 char *LO_data; /* LO_eckd_data_t */
1550 struct ccw1 *ccw, *oldccw; 1562 struct ccw1 *ccw, *oldccw;
1551 1563
1552 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1564 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1553 "Write not finished because of unexpected condition"); 1565 "Write not finished because of unexpected condition");
1554 1566
1555 default_erp->function = dasd_3990_erp_action_1B_32; 1567 default_erp->function = dasd_3990_erp_action_1B_32;
@@ -1561,10 +1573,16 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1561 cqr = cqr->refers; 1573 cqr = cqr->refers;
1562 } 1574 }
1563 1575
1576 if (scsw_is_tm(&cqr->irb.scsw)) {
1577 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1578 "32 bit sense, action 1B is not defined"
1579 " in transport mode - just retry");
1580 return default_erp;
1581 }
1582
1564 /* for imprecise ending just do default erp */ 1583 /* for imprecise ending just do default erp */
1565 if (sense[1] & 0x01) { 1584 if (sense[1] & 0x01) {
1566 1585 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1567 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1568 "Imprecise ending is set - just retry"); 1586 "Imprecise ending is set - just retry");
1569 1587
1570 return default_erp; 1588 return default_erp;
@@ -1575,8 +1593,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1575 cpa = default_erp->refers->irb.scsw.cmd.cpa; 1593 cpa = default_erp->refers->irb.scsw.cmd.cpa;
1576 1594
1577 if (cpa == 0) { 1595 if (cpa == 0) {
1578 1596 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1579 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1580 "Unable to determine address of the CCW " 1597 "Unable to determine address of the CCW "
1581 "to be restarted"); 1598 "to be restarted");
1582 1599
@@ -1590,7 +1607,9 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1590 sizeof(struct LO_eckd_data), device); 1607 sizeof(struct LO_eckd_data), device);
1591 1608
1592 if (IS_ERR(erp)) { 1609 if (IS_ERR(erp)) {
1593 DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP"); 1610 /* internal error 01 - Unable to allocate ERP */
1611 dev_err(&device->cdev->dev, "An error occurred in the DASD "
1612 "device driver, reason=%s\n", "01");
1594 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); 1613 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1595 } 1614 }
1596 1615
@@ -1599,7 +1618,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1599 oldccw = cqr->cpaddr; 1618 oldccw = cqr->cpaddr;
1600 if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) { 1619 if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
1601 PFX_data = cqr->data; 1620 PFX_data = cqr->data;
1602 memcpy(DE_data, &PFX_data->define_extend, 1621 memcpy(DE_data, &PFX_data->define_extent,
1603 sizeof(struct DE_eckd_data)); 1622 sizeof(struct DE_eckd_data));
1604 } else 1623 } else
1605 memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data)); 1624 memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
@@ -1608,10 +1627,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1608 LO_data = erp->data + sizeof(struct DE_eckd_data); 1627 LO_data = erp->data + sizeof(struct DE_eckd_data);
1609 1628
1610 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { 1629 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1611 1630 /* should not */
1612 DEV_MESSAGE(KERN_ERR, device, "%s",
1613 "BUG - this should not happen");
1614
1615 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); 1631 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1616 } 1632 }
1617 1633
@@ -1701,7 +1717,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1701 char *LO_data; /* struct LO_eckd_data */ 1717 char *LO_data; /* struct LO_eckd_data */
1702 struct ccw1 *ccw; 1718 struct ccw1 *ccw;
1703 1719
1704 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1720 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1705 "Write not finished because of unexpected condition" 1721 "Write not finished because of unexpected condition"
1706 " - follow on"); 1722 " - follow on");
1707 1723
@@ -1712,10 +1728,16 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1712 cqr = cqr->refers; 1728 cqr = cqr->refers;
1713 } 1729 }
1714 1730
1731 if (scsw_is_tm(&cqr->irb.scsw)) {
1732 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1733 "32 bit sense, action 1B, update,"
1734 " in transport mode - just retry");
1735 return previous_erp;
1736 }
1737
1715 /* for imprecise ending just do default erp */ 1738 /* for imprecise ending just do default erp */
1716 if (sense[1] & 0x01) { 1739 if (sense[1] & 0x01) {
1717 1740 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1718 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1719 "Imprecise ending is set - just retry"); 1741 "Imprecise ending is set - just retry");
1720 1742
1721 previous_erp->status = DASD_CQR_FILLED; 1743 previous_erp->status = DASD_CQR_FILLED;
@@ -1728,10 +1750,10 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1728 cpa = previous_erp->irb.scsw.cmd.cpa; 1750 cpa = previous_erp->irb.scsw.cmd.cpa;
1729 1751
1730 if (cpa == 0) { 1752 if (cpa == 0) {
1731 1753 /* internal error 02 -
1732 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1754 Unable to determine address of the CCW to be restarted */
1733 "Unable to determine address of the CCW " 1755 dev_err(&device->cdev->dev, "An error occurred in the DASD "
1734 "to be restarted"); 1756 "device driver, reason=%s\n", "02");
1735 1757
1736 previous_erp->status = DASD_CQR_FAILED; 1758 previous_erp->status = DASD_CQR_FAILED;
1737 1759
@@ -1744,10 +1766,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1744 LO_data = erp->data + sizeof(struct DE_eckd_data); 1766 LO_data = erp->data + sizeof(struct DE_eckd_data);
1745 1767
1746 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { 1768 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1747 1769 /* should not happen */
1748 DEV_MESSAGE(KERN_ERR, device, "%s",
1749 "BUG - this should not happen");
1750
1751 previous_erp->status = DASD_CQR_FAILED; 1770 previous_erp->status = DASD_CQR_FAILED;
1752 1771
1753 return previous_erp; 1772 return previous_erp;
@@ -1935,14 +1954,13 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
1935 1954
1936 if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) { 1955 if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
1937 1956
1938 /* set to suspended duplex state then restart */ 1957 /* set to suspended duplex state then restart
1958 internal error 05 - Set device to suspended duplex state
1959 should be done */
1939 struct dasd_device *device = erp->startdev; 1960 struct dasd_device *device = erp->startdev;
1940 1961 dev_err(&device->cdev->dev,
1941 DEV_MESSAGE(KERN_ERR, device, "%s", 1962 "An error occurred in the DASD device driver, "
1942 "Set device to suspended duplex state should be " 1963 "reason=%s\n", "05");
1943 "done!\n"
1944 "This is not implemented yet (for compound ERP)"
1945 " - please report to linux390@de.ibm.com");
1946 1964
1947 } 1965 }
1948 1966
@@ -2012,15 +2030,14 @@ dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
2012{ 2030{
2013 /* print message according to log or message to operator mode */ 2031 /* print message according to log or message to operator mode */
2014 if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) { 2032 if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
2015
2016 /* print SIM SRC from RefCode */ 2033 /* print SIM SRC from RefCode */
2017 DEV_MESSAGE(KERN_ERR, device, "SIM - SRC: " 2034 dev_err(&device->cdev->dev, "SIM - SRC: "
2018 "%02x%02x%02x%02x", sense[22], 2035 "%02x%02x%02x%02x\n", sense[22],
2019 sense[23], sense[11], sense[12]); 2036 sense[23], sense[11], sense[12]);
2020 } else if (sense[24] & DASD_SIM_LOG) { 2037 } else if (sense[24] & DASD_SIM_LOG) {
2021 /* print SIM SRC Refcode */ 2038 /* print SIM SRC Refcode */
2022 DEV_MESSAGE(KERN_WARNING, device, "SIM - SRC: " 2039 dev_warn(&device->cdev->dev, "log SIM - SRC: "
2023 "%02x%02x%02x%02x", sense[22], 2040 "%02x%02x%02x%02x\n", sense[22],
2024 sense[23], sense[11], sense[12]); 2041 sense[23], sense[11], sense[12]);
2025 } 2042 }
2026} 2043}
@@ -2063,14 +2080,14 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2063 switch (sense[25]) { 2080 switch (sense[25]) {
2064 2081
2065 case 0x00: /* success - use default ERP for retries */ 2082 case 0x00: /* success - use default ERP for retries */
2066 DEV_MESSAGE(KERN_DEBUG, device, "%s", 2083 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
2067 "ERP called for successful request" 2084 "ERP called for successful request"
2068 " - just retry"); 2085 " - just retry");
2069 break; 2086 break;
2070 2087
2071 case 0x01: /* fatal error */ 2088 case 0x01: /* fatal error */
2072 DEV_MESSAGE(KERN_ERR, device, "%s", 2089 dev_err(&device->cdev->dev,
2073 "Retry not recommended - Fatal error"); 2090 "ERP failed for the DASD\n");
2074 2091
2075 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 2092 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2076 break; 2093 break;
@@ -2080,13 +2097,10 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2080 erp = dasd_3990_erp_int_req(erp); 2097 erp = dasd_3990_erp_int_req(erp);
2081 break; 2098 break;
2082 2099
2083 case 0x0F: /* length mismatch during update write command */ 2100 case 0x0F: /* length mismatch during update write command
2084 DEV_MESSAGE(KERN_ERR, device, "%s", 2101 internal error 08 - update write command error*/
2085 "update write command error - should not " 2102 dev_err(&device->cdev->dev, "An error occurred in the "
2086 "happen;\n" 2103 "DASD device driver, reason=%s\n", "08");
2087 "Please send this message together with "
2088 "the above sense data to linux390@de."
2089 "ibm.com");
2090 2104
2091 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 2105 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2092 break; 2106 break;
@@ -2095,13 +2109,12 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2095 erp = dasd_3990_erp_action_10_32(erp, sense); 2109 erp = dasd_3990_erp_action_10_32(erp, sense);
2096 break; 2110 break;
2097 2111
2098 case 0x15: /* next track outside defined extend */ 2112 case 0x15: /* next track outside defined extend
2099 DEV_MESSAGE(KERN_ERR, device, "%s", 2113 internal error 07 - The next track is not
2100 "next track outside defined extend - " 2114 within the defined storage extent */
2101 "should not happen;\n" 2115 dev_err(&device->cdev->dev,
2102 "Please send this message together with " 2116 "An error occurred in the DASD device driver, "
2103 "the above sense data to linux390@de." 2117 "reason=%s\n", "07");
2104 "ibm.com");
2105 2118
2106 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 2119 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2107 break; 2120 break;
@@ -2112,9 +2125,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2112 break; 2125 break;
2113 2126
2114 case 0x1C: /* invalid data */ 2127 case 0x1C: /* invalid data */
2115 DEV_MESSAGE(KERN_EMERG, device, "%s", 2128 dev_emerg(&device->cdev->dev,
2116 "Data recovered during retry with PCI " 2129 "Data recovered during retry with PCI "
2117 "fetch mode active"); 2130 "fetch mode active\n");
2118 2131
2119 /* not possible to handle this situation in Linux */ 2132 /* not possible to handle this situation in Linux */
2120 panic 2133 panic
@@ -2123,7 +2136,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2123 break; 2136 break;
2124 2137
2125 case 0x1D: /* state-change pending */ 2138 case 0x1D: /* state-change pending */
2126 DEV_MESSAGE(KERN_DEBUG, device, "%s", 2139 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2127 "A State change pending condition exists " 2140 "A State change pending condition exists "
2128 "for the subsystem or device"); 2141 "for the subsystem or device");
2129 2142
@@ -2131,7 +2144,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2131 break; 2144 break;
2132 2145
2133 case 0x1E: /* busy */ 2146 case 0x1E: /* busy */
2134 DEV_MESSAGE(KERN_DEBUG, device, "%s", 2147 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2135 "Busy condition exists " 2148 "Busy condition exists "
2136 "for the subsystem or device"); 2149 "for the subsystem or device");
2137 erp = dasd_3990_erp_action_4(erp, sense); 2150 erp = dasd_3990_erp_action_4(erp, sense);
@@ -2171,9 +2184,9 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
2171{ 2184{
2172 struct dasd_device *device = erp->startdev; 2185 struct dasd_device *device = erp->startdev;
2173 2186
2174 if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK 2187 if (scsw_cstat(&erp->refers->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK
2175 | SCHN_STAT_CHN_CTRL_CHK)) { 2188 | SCHN_STAT_CHN_CTRL_CHK)) {
2176 DEV_MESSAGE(KERN_DEBUG, device, "%s", 2189 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2177 "channel or interface control check"); 2190 "channel or interface control check");
2178 erp = dasd_3990_erp_action_4(erp, NULL); 2191 erp = dasd_3990_erp_action_4(erp, NULL);
2179 } 2192 }
@@ -2193,21 +2206,23 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
2193 * erp_new contens was possibly modified 2206 * erp_new contens was possibly modified
2194 */ 2207 */
2195static struct dasd_ccw_req * 2208static struct dasd_ccw_req *
2196dasd_3990_erp_inspect(struct dasd_ccw_req * erp) 2209dasd_3990_erp_inspect(struct dasd_ccw_req *erp)
2197{ 2210{
2198 2211
2199 struct dasd_ccw_req *erp_new = NULL; 2212 struct dasd_ccw_req *erp_new = NULL;
2200 /* sense data are located in the refers record of the */ 2213 char *sense;
2201 /* already set up new ERP ! */
2202 char *sense = erp->refers->irb.ecw;
2203 2214
2204 /* if this problem occured on an alias retry on base */ 2215 /* if this problem occured on an alias retry on base */
2205 erp_new = dasd_3990_erp_inspect_alias(erp); 2216 erp_new = dasd_3990_erp_inspect_alias(erp);
2206 if (erp_new) 2217 if (erp_new)
2207 return erp_new; 2218 return erp_new;
2208 2219
2209 /* check if no concurrent sens is available */ 2220 /* sense data are located in the refers record of the
2210 if (!erp->refers->irb.esw.esw0.erw.cons) 2221 * already set up new ERP !
2222 * check if concurrent sens is available
2223 */
2224 sense = dasd_get_sense(&erp->refers->irb);
2225 if (!sense)
2211 erp_new = dasd_3990_erp_control_check(erp); 2226 erp_new = dasd_3990_erp_control_check(erp);
2212 /* distinguish between 24 and 32 byte sense data */ 2227 /* distinguish between 24 and 32 byte sense data */
2213 else if (sense[27] & DASD_SENSE_BIT_0) { 2228 else if (sense[27] & DASD_SENSE_BIT_0) {
@@ -2231,7 +2246,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
2231 * DESCRIPTION 2246 * DESCRIPTION
2232 * This funtion adds an additional request block (ERP) to the head of 2247 * This funtion adds an additional request block (ERP) to the head of
2233 * the given cqr (or erp). 2248 * the given cqr (or erp).
2234 * This erp is initialized as an default erp (retry TIC) 2249 * For a command mode cqr the erp is initialized as an default erp
2250 * (retry TIC).
2251 * For transport mode we make a copy of the original TCW (points to
2252 * the original TCCB, TIDALs, etc.) but give it a fresh
2253 * TSB so the original sense data will not be changed.
2235 * 2254 *
2236 * PARAMETER 2255 * PARAMETER
2237 * cqr head of the current ERP-chain (or single cqr if 2256 * cqr head of the current ERP-chain (or single cqr if
@@ -2239,25 +2258,35 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
2239 * RETURN VALUES 2258 * RETURN VALUES
2240 * erp pointer to new ERP-chain head 2259 * erp pointer to new ERP-chain head
2241 */ 2260 */
2242static struct dasd_ccw_req * 2261static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2243dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2244{ 2262{
2245 2263
2246 struct dasd_device *device = cqr->startdev; 2264 struct dasd_device *device = cqr->startdev;
2247 struct ccw1 *ccw; 2265 struct ccw1 *ccw;
2248
2249 /* allocate additional request block */
2250 struct dasd_ccw_req *erp; 2266 struct dasd_ccw_req *erp;
2267 int cplength, datasize;
2268 struct tcw *tcw;
2269 struct tsb *tsb;
2270
2271 if (cqr->cpmode == 1) {
2272 cplength = 0;
2273 datasize = sizeof(struct tcw) + sizeof(struct tsb);
2274 } else {
2275 cplength = 2;
2276 datasize = 0;
2277 }
2251 2278
2252 erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, device); 2279 /* allocate additional request block */
2280 erp = dasd_alloc_erp_request((char *) &cqr->magic,
2281 cplength, datasize, device);
2253 if (IS_ERR(erp)) { 2282 if (IS_ERR(erp)) {
2254 if (cqr->retries <= 0) { 2283 if (cqr->retries <= 0) {
2255 DEV_MESSAGE(KERN_ERR, device, "%s", 2284 DBF_DEV_EVENT(DBF_ERR, device, "%s",
2256 "Unable to allocate ERP request"); 2285 "Unable to allocate ERP request");
2257 cqr->status = DASD_CQR_FAILED; 2286 cqr->status = DASD_CQR_FAILED;
2258 cqr->stopclk = get_clock (); 2287 cqr->stopclk = get_clock ();
2259 } else { 2288 } else {
2260 DEV_MESSAGE (KERN_ERR, device, 2289 DBF_DEV_EVENT(DBF_ERR, device,
2261 "Unable to allocate ERP request " 2290 "Unable to allocate ERP request "
2262 "(%i retries left)", 2291 "(%i retries left)",
2263 cqr->retries); 2292 cqr->retries);
@@ -2266,13 +2295,24 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2266 return cqr; 2295 return cqr;
2267 } 2296 }
2268 2297
2269 /* initialize request with default TIC to current ERP/CQR */ 2298 if (cqr->cpmode == 1) {
2270 ccw = erp->cpaddr; 2299 /* make a shallow copy of the original tcw but set new tsb */
2271 ccw->cmd_code = CCW_CMD_NOOP; 2300 erp->cpmode = 1;
2272 ccw->flags = CCW_FLAG_CC; 2301 erp->cpaddr = erp->data;
2273 ccw++; 2302 tcw = erp->data;
2274 ccw->cmd_code = CCW_CMD_TIC; 2303 tsb = (struct tsb *) &tcw[1];
2275 ccw->cda = (long)(cqr->cpaddr); 2304 *tcw = *((struct tcw *)cqr->cpaddr);
2305 tcw->tsb = (long)tsb;
2306 } else {
2307 /* initialize request with default TIC to current ERP/CQR */
2308 ccw = erp->cpaddr;
2309 ccw->cmd_code = CCW_CMD_NOOP;
2310 ccw->flags = CCW_FLAG_CC;
2311 ccw++;
2312 ccw->cmd_code = CCW_CMD_TIC;
2313 ccw->cda = (long)(cqr->cpaddr);
2314 }
2315
2276 erp->function = dasd_3990_erp_add_erp; 2316 erp->function = dasd_3990_erp_add_erp;
2277 erp->refers = cqr; 2317 erp->refers = cqr;
2278 erp->startdev = device; 2318 erp->startdev = device;
@@ -2282,7 +2322,6 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2282 erp->expires = 0; 2322 erp->expires = 0;
2283 erp->retries = 256; 2323 erp->retries = 256;
2284 erp->buildclk = get_clock(); 2324 erp->buildclk = get_clock();
2285
2286 erp->status = DASD_CQR_FILLED; 2325 erp->status = DASD_CQR_FILLED;
2287 2326
2288 return erp; 2327 return erp;
@@ -2340,28 +2379,33 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
2340 * match 'boolean' for match found 2379 * match 'boolean' for match found
2341 * returns 1 if match found, otherwise 0. 2380 * returns 1 if match found, otherwise 0.
2342 */ 2381 */
2343static int 2382static int dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1,
2344dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) 2383 struct dasd_ccw_req *cqr2)
2345{ 2384{
2385 char *sense1, *sense2;
2346 2386
2347 if (cqr1->startdev != cqr2->startdev) 2387 if (cqr1->startdev != cqr2->startdev)
2348 return 0; 2388 return 0;
2349 2389
2350 if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) 2390 sense1 = dasd_get_sense(&cqr1->irb);
2351 return 0; 2391 sense2 = dasd_get_sense(&cqr2->irb);
2352 2392
2353 if ((cqr1->irb.esw.esw0.erw.cons == 0) && 2393 /* one request has sense data, the other not -> no match, return 0 */
2354 (cqr2->irb.esw.esw0.erw.cons == 0)) { 2394 if (!sense1 != !sense2)
2355 if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2395 return 0;
2356 SCHN_STAT_CHN_CTRL_CHK)) == 2396 /* no sense data in both cases -> check cstat for IFCC */
2357 (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2397 if (!sense1 && !sense2) {
2358 SCHN_STAT_CHN_CTRL_CHK))) 2398 if ((scsw_cstat(&cqr1->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
2399 SCHN_STAT_CHN_CTRL_CHK)) ==
2400 (scsw_cstat(&cqr2->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
2401 SCHN_STAT_CHN_CTRL_CHK)))
2359 return 1; /* match with ifcc*/ 2402 return 1; /* match with ifcc*/
2360 } 2403 }
2361 /* check sense data; byte 0-2,25,27 */ 2404 /* check sense data; byte 0-2,25,27 */
2362 if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) && 2405 if (!(sense1 && sense2 &&
2363 (cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) && 2406 (memcmp(sense1, sense2, 3) == 0) &&
2364 (cqr1->irb.ecw[25] == cqr2->irb.ecw[25]))) { 2407 (sense1[27] == sense2[27]) &&
2408 (sense1[25] == sense2[25]))) {
2365 2409
2366 return 0; /* sense doesn't match */ 2410 return 0; /* sense doesn't match */
2367 } 2411 }
@@ -2434,7 +2478,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2434{ 2478{
2435 2479
2436 struct dasd_device *device = erp->startdev; 2480 struct dasd_device *device = erp->startdev;
2437 char *sense = erp->irb.ecw; 2481 char *sense = dasd_get_sense(&erp->irb);
2438 2482
2439 /* check for 24 byte sense ERP */ 2483 /* check for 24 byte sense ERP */
2440 if ((erp->function == dasd_3990_erp_bus_out) || 2484 if ((erp->function == dasd_3990_erp_bus_out) ||
@@ -2449,7 +2493,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2449 /* prepare erp for retry on different channel path */ 2493 /* prepare erp for retry on different channel path */
2450 erp = dasd_3990_erp_action_1(erp); 2494 erp = dasd_3990_erp_action_1(erp);
2451 2495
2452 if (!(sense[2] & DASD_SENSE_BIT_0)) { 2496 if (sense && !(sense[2] & DASD_SENSE_BIT_0)) {
2453 2497
2454 /* issue a Diagnostic Control command with an 2498 /* issue a Diagnostic Control command with an
2455 * Inhibit Write subcommand */ 2499 * Inhibit Write subcommand */
@@ -2471,7 +2515,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2471 break; 2515 break;
2472 } 2516 }
2473 default: 2517 default:
2474 DEV_MESSAGE(KERN_DEBUG, device, 2518 DBF_DEV_EVENT(DBF_WARNING, device,
2475 "invalid subcommand modifier 0x%x " 2519 "invalid subcommand modifier 0x%x "
2476 "for Diagnostic Control Command", 2520 "for Diagnostic Control Command",
2477 sense[25]); 2521 sense[25]);
@@ -2479,19 +2523,21 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2479 } 2523 }
2480 2524
2481 /* check for 32 byte sense ERP */ 2525 /* check for 32 byte sense ERP */
2482 } else if ((erp->function == dasd_3990_erp_compound_retry) || 2526 } else if (sense &&
2483 (erp->function == dasd_3990_erp_compound_path) || 2527 ((erp->function == dasd_3990_erp_compound_retry) ||
2484 (erp->function == dasd_3990_erp_compound_code) || 2528 (erp->function == dasd_3990_erp_compound_path) ||
2485 (erp->function == dasd_3990_erp_compound_config)) { 2529 (erp->function == dasd_3990_erp_compound_code) ||
2530 (erp->function == dasd_3990_erp_compound_config))) {
2486 2531
2487 erp = dasd_3990_erp_compound(erp, sense); 2532 erp = dasd_3990_erp_compound(erp, sense);
2488 2533
2489 } else { 2534 } else {
2490 /* No retry left and no additional special handling */ 2535 /*
2491 /*necessary */ 2536 * No retry left and no additional special handling
2492 DEV_MESSAGE(KERN_ERR, device, 2537 * necessary
2493 "no retries left for erp %p - " 2538 */
2494 "set status to FAILED", erp); 2539 dev_err(&device->cdev->dev,
2540 "ERP %p has run out of retries and failed\n", erp);
2495 2541
2496 erp->status = DASD_CQR_FAILED; 2542 erp->status = DASD_CQR_FAILED;
2497 } 2543 }
@@ -2548,24 +2594,25 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2548 2594
2549 if (erp->retries > 0) { 2595 if (erp->retries > 0) {
2550 2596
2551 char *sense = erp->refers->irb.ecw; 2597 char *sense = dasd_get_sense(&erp->refers->irb);
2552 2598
2553 /* check for special retries */ 2599 /* check for special retries */
2554 if (erp->function == dasd_3990_erp_action_4) { 2600 if (sense && erp->function == dasd_3990_erp_action_4) {
2555 2601
2556 erp = dasd_3990_erp_action_4(erp, sense); 2602 erp = dasd_3990_erp_action_4(erp, sense);
2557 2603
2558 } else if (erp->function == dasd_3990_erp_action_1B_32) { 2604 } else if (sense &&
2605 erp->function == dasd_3990_erp_action_1B_32) {
2559 2606
2560 erp = dasd_3990_update_1B(erp, sense); 2607 erp = dasd_3990_update_1B(erp, sense);
2561 2608
2562 } else if (erp->function == dasd_3990_erp_int_req) { 2609 } else if (sense && erp->function == dasd_3990_erp_int_req) {
2563 2610
2564 erp = dasd_3990_erp_int_req(erp); 2611 erp = dasd_3990_erp_int_req(erp);
2565 2612
2566 } else { 2613 } else {
2567 /* simple retry */ 2614 /* simple retry */
2568 DEV_MESSAGE(KERN_DEBUG, device, 2615 DBF_DEV_EVENT(DBF_DEBUG, device,
2569 "%i retries left for erp %p", 2616 "%i retries left for erp %p",
2570 erp->retries, erp); 2617 erp->retries, erp);
2571 2618
@@ -2609,24 +2656,24 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2609 2656
2610 if (device->features & DASD_FEATURE_ERPLOG) { 2657 if (device->features & DASD_FEATURE_ERPLOG) {
2611 /* print current erp_chain */ 2658 /* print current erp_chain */
2612 DEV_MESSAGE(KERN_ERR, device, "%s", 2659 dev_err(&device->cdev->dev,
2613 "ERP chain at BEGINNING of ERP-ACTION"); 2660 "ERP chain at BEGINNING of ERP-ACTION\n");
2614 for (temp_erp = cqr; 2661 for (temp_erp = cqr;
2615 temp_erp != NULL; temp_erp = temp_erp->refers) { 2662 temp_erp != NULL; temp_erp = temp_erp->refers) {
2616 2663
2617 DEV_MESSAGE(KERN_ERR, device, 2664 dev_err(&device->cdev->dev,
2618 " erp %p (%02x) refers to %p", 2665 "ERP %p (%02x) refers to %p\n",
2619 temp_erp, temp_erp->status, 2666 temp_erp, temp_erp->status,
2620 temp_erp->refers); 2667 temp_erp->refers);
2621 } 2668 }
2622 } 2669 }
2623 2670
2624 /* double-check if current erp/cqr was successful */ 2671 /* double-check if current erp/cqr was successful */
2625 if ((cqr->irb.scsw.cmd.cstat == 0x00) && 2672 if ((scsw_cstat(&cqr->irb.scsw) == 0x00) &&
2626 (cqr->irb.scsw.cmd.dstat == 2673 (scsw_dstat(&cqr->irb.scsw) ==
2627 (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { 2674 (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
2628 2675
2629 DEV_MESSAGE(KERN_DEBUG, device, 2676 DBF_DEV_EVENT(DBF_DEBUG, device,
2630 "ERP called for successful request %p" 2677 "ERP called for successful request %p"
2631 " - NO ERP necessary", cqr); 2678 " - NO ERP necessary", cqr);
2632 2679
@@ -2648,13 +2695,13 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2648 2695
2649 if (device->features & DASD_FEATURE_ERPLOG) { 2696 if (device->features & DASD_FEATURE_ERPLOG) {
2650 /* print current erp_chain */ 2697 /* print current erp_chain */
2651 DEV_MESSAGE(KERN_ERR, device, "%s", 2698 dev_err(&device->cdev->dev,
2652 "ERP chain at END of ERP-ACTION"); 2699 "ERP chain at END of ERP-ACTION\n");
2653 for (temp_erp = erp; 2700 for (temp_erp = erp;
2654 temp_erp != NULL; temp_erp = temp_erp->refers) { 2701 temp_erp != NULL; temp_erp = temp_erp->refers) {
2655 2702
2656 DEV_MESSAGE(KERN_ERR, device, 2703 dev_err(&device->cdev->dev,
2657 " erp %p (%02x) refers to %p", 2704 "ERP %p (%02x) refers to %p\n",
2658 temp_erp, temp_erp->status, 2705 temp_erp, temp_erp->status,
2659 temp_erp->refers); 2706 temp_erp->refers);
2660 } 2707 }
@@ -2667,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2667 list_add_tail(&erp->blocklist, &cqr->blocklist); 2714 list_add_tail(&erp->blocklist, &cqr->blocklist);
2668 } 2715 }
2669 2716
2717
2718
2670 return erp; 2719 return erp;
2671 2720
2672} /* end dasd_3990_erp_action */ 2721} /* end dasd_3990_erp_action */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 20676cdef4a5..5b7bbc87593b 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -5,6 +5,8 @@
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */ 6 */
7 7
8#define KMSG_COMPONENT "dasd"
9
8#include <linux/list.h> 10#include <linux/list.h>
9#include <asm/ebcdic.h> 11#include <asm/ebcdic.h>
10#include "dasd_int.h" 12#include "dasd_int.h"
@@ -503,7 +505,7 @@ static void lcu_update_work(struct work_struct *work)
503 */ 505 */
504 spin_lock_irqsave(&lcu->lock, flags); 506 spin_lock_irqsave(&lcu->lock, flags);
505 if (rc || (lcu->flags & NEED_UAC_UPDATE)) { 507 if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
506 DEV_MESSAGE(KERN_WARNING, device, "could not update" 508 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
507 " alias data in lcu (rc = %d), retry later", rc); 509 " alias data in lcu (rc = %d), retry later", rc);
508 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); 510 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
509 } else { 511 } else {
@@ -646,14 +648,16 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
646{ 648{
647 struct dasd_ccw_req *cqr; 649 struct dasd_ccw_req *cqr;
648 int rc = 0; 650 int rc = 0;
651 struct ccw1 *ccw;
649 652
650 cqr = lcu->rsu_cqr; 653 cqr = lcu->rsu_cqr;
651 strncpy((char *) &cqr->magic, "ECKD", 4); 654 strncpy((char *) &cqr->magic, "ECKD", 4);
652 ASCEBC((char *) &cqr->magic, 4); 655 ASCEBC((char *) &cqr->magic, 4);
653 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RSCK; 656 ccw = cqr->cpaddr;
654 cqr->cpaddr->flags = 0 ; 657 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
655 cqr->cpaddr->count = 16; 658 ccw->flags = 0 ;
656 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 659 ccw->count = 16;
660 ccw->cda = (__u32)(addr_t) cqr->data;
657 ((char *)cqr->data)[0] = reason; 661 ((char *)cqr->data)[0] = reason;
658 662
659 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 663 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -855,16 +859,25 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
855 struct alias_lcu *lcu; 859 struct alias_lcu *lcu;
856 char reason; 860 char reason;
857 struct dasd_eckd_private *private; 861 struct dasd_eckd_private *private;
862 char *sense;
858 863
859 private = (struct dasd_eckd_private *) device->private; 864 private = (struct dasd_eckd_private *) device->private;
860 865
861 reason = irb->ecw[8]; 866 sense = dasd_get_sense(irb);
862 DEV_MESSAGE(KERN_WARNING, device, "%s %x", 867 if (sense) {
863 "eckd handle summary unit check: reason", reason); 868 reason = sense[8];
869 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
870 "eckd handle summary unit check: reason", reason);
871 } else {
872 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
873 "eckd handle summary unit check:"
874 " no reason code available");
875 return;
876 }
864 877
865 lcu = private->lcu; 878 lcu = private->lcu;
866 if (!lcu) { 879 if (!lcu) {
867 DEV_MESSAGE(KERN_WARNING, device, "%s", 880 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
868 "device not ready to handle summary" 881 "device not ready to handle summary"
869 " unit check (no lcu structure)"); 882 " unit check (no lcu structure)");
870 return; 883 return;
@@ -877,7 +890,7 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
877 * the next interrupt on a different device 890 * the next interrupt on a different device
878 */ 891 */
879 if (list_empty(&device->alias_list)) { 892 if (list_empty(&device->alias_list)) {
880 DEV_MESSAGE(KERN_WARNING, device, "%s", 893 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
881 "device is in offline processing," 894 "device is in offline processing,"
882 " don't do summary unit check handling"); 895 " don't do summary unit check handling");
883 spin_unlock(&lcu->lock); 896 spin_unlock(&lcu->lock);
@@ -885,7 +898,7 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
885 } 898 }
886 if (lcu->suc_data.device) { 899 if (lcu->suc_data.device) {
887 /* already scheduled or running */ 900 /* already scheduled or running */
888 DEV_MESSAGE(KERN_WARNING, device, "%s", 901 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
889 "previous instance of summary unit check worker" 902 "previous instance of summary unit check worker"
890 " still pending"); 903 " still pending");
891 spin_unlock(&lcu->lock); 904 spin_unlock(&lcu->lock);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 34339902efb9..e77666c8e6c0 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -13,6 +13,8 @@
13 * 13 *
14 */ 14 */
15 15
16#define KMSG_COMPONENT "dasd"
17
16#include <linux/ctype.h> 18#include <linux/ctype.h>
17#include <linux/init.h> 19#include <linux/init.h>
18#include <linux/module.h> 20#include <linux/module.h>
@@ -67,6 +69,8 @@ int dasd_probeonly = 0; /* is true, when probeonly mode is active */
67int dasd_autodetect = 0; /* is true, when autodetection is active */ 69int dasd_autodetect = 0; /* is true, when autodetection is active */
68int dasd_nopav = 0; /* is true, when PAV is disabled */ 70int dasd_nopav = 0; /* is true, when PAV is disabled */
69EXPORT_SYMBOL_GPL(dasd_nopav); 71EXPORT_SYMBOL_GPL(dasd_nopav);
72int dasd_nofcx; /* disable High Performance Ficon */
73EXPORT_SYMBOL_GPL(dasd_nofcx);
70 74
71/* 75/*
72 * char *dasd[] is intended to hold the ranges supplied by the dasd= statement 76 * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
@@ -125,6 +129,7 @@ __setup ("dasd=", dasd_call_setup);
125 * Read a device busid/devno from a string. 129 * Read a device busid/devno from a string.
126 */ 130 */
127static int 131static int
132
128dasd_busid(char **str, int *id0, int *id1, int *devno) 133dasd_busid(char **str, int *id0, int *id1, int *devno)
129{ 134{
130 int val, old_style; 135 int val, old_style;
@@ -132,8 +137,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
132 /* Interpret ipldev busid */ 137 /* Interpret ipldev busid */
133 if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) { 138 if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
134 if (ipl_info.type != IPL_TYPE_CCW) { 139 if (ipl_info.type != IPL_TYPE_CCW) {
135 MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw " 140 pr_err("The IPL device is not a CCW device\n");
136 "device");
137 return -EINVAL; 141 return -EINVAL;
138 } 142 }
139 *id0 = 0; 143 *id0 = 0;
@@ -209,9 +213,8 @@ dasd_feature_list(char *str, char **endp)
209 else if (len == 8 && !strncmp(str, "failfast", 8)) 213 else if (len == 8 && !strncmp(str, "failfast", 8))
210 features |= DASD_FEATURE_FAILFAST; 214 features |= DASD_FEATURE_FAILFAST;
211 else { 215 else {
212 MESSAGE(KERN_WARNING, 216 pr_warning("%*s is not a supported device option\n",
213 "unsupported feature: %*s, " 217 len, str);
214 "ignoring setting", len, str);
215 rc = -EINVAL; 218 rc = -EINVAL;
216 } 219 }
217 str += len; 220 str += len;
@@ -220,8 +223,8 @@ dasd_feature_list(char *str, char **endp)
220 str++; 223 str++;
221 } 224 }
222 if (*str != ')') { 225 if (*str != ')') {
223 MESSAGE(KERN_WARNING, "%s", 226 pr_warning("A closing parenthesis ')' is missing in the "
224 "missing ')' in dasd parameter string\n"); 227 "dasd= parameter\n");
225 rc = -EINVAL; 228 rc = -EINVAL;
226 } else 229 } else
227 str++; 230 str++;
@@ -253,25 +256,29 @@ dasd_parse_keyword( char *parsestring ) {
253 } 256 }
254 if (strncmp("autodetect", parsestring, length) == 0) { 257 if (strncmp("autodetect", parsestring, length) == 0) {
255 dasd_autodetect = 1; 258 dasd_autodetect = 1;
256 MESSAGE (KERN_INFO, "%s", 259 pr_info("The autodetection mode has been activated\n");
257 "turning to autodetection mode");
258 return residual_str; 260 return residual_str;
259 } 261 }
260 if (strncmp("probeonly", parsestring, length) == 0) { 262 if (strncmp("probeonly", parsestring, length) == 0) {
261 dasd_probeonly = 1; 263 dasd_probeonly = 1;
262 MESSAGE(KERN_INFO, "%s", 264 pr_info("The probeonly mode has been activated\n");
263 "turning to probeonly mode");
264 return residual_str; 265 return residual_str;
265 } 266 }
266 if (strncmp("nopav", parsestring, length) == 0) { 267 if (strncmp("nopav", parsestring, length) == 0) {
267 if (MACHINE_IS_VM) 268 if (MACHINE_IS_VM)
268 MESSAGE(KERN_INFO, "%s", "'nopav' not supported on VM"); 269 pr_info("'nopav' is not supported on z/VM\n");
269 else { 270 else {
270 dasd_nopav = 1; 271 dasd_nopav = 1;
271 MESSAGE(KERN_INFO, "%s", "disable PAV mode"); 272 pr_info("PAV support has be deactivated\n");
272 } 273 }
273 return residual_str; 274 return residual_str;
274 } 275 }
276 if (strncmp("nofcx", parsestring, length) == 0) {
277 dasd_nofcx = 1;
278 pr_info("High Performance FICON support has been "
279 "deactivated\n");
280 return residual_str;
281 }
275 if (strncmp("fixedbuffers", parsestring, length) == 0) { 282 if (strncmp("fixedbuffers", parsestring, length) == 0) {
276 if (dasd_page_cache) 283 if (dasd_page_cache)
277 return residual_str; 284 return residual_str;
@@ -280,10 +287,10 @@ dasd_parse_keyword( char *parsestring ) {
280 PAGE_SIZE, SLAB_CACHE_DMA, 287 PAGE_SIZE, SLAB_CACHE_DMA,
281 NULL); 288 NULL);
282 if (!dasd_page_cache) 289 if (!dasd_page_cache)
283 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " 290 DBF_EVENT(DBF_WARNING, "%s", "Failed to create slab, "
284 "fixed buffer mode disabled."); 291 "fixed buffer mode disabled.");
285 else 292 else
286 MESSAGE (KERN_INFO, "%s", 293 DBF_EVENT(DBF_INFO, "%s",
287 "turning on fixed buffer mode"); 294 "turning on fixed buffer mode");
288 return residual_str; 295 return residual_str;
289 } 296 }
@@ -321,7 +328,7 @@ dasd_parse_range( char *parsestring ) {
321 (from_id0 != to_id0 || from_id1 != to_id1 || from > to)) 328 (from_id0 != to_id0 || from_id1 != to_id1 || from > to))
322 rc = -EINVAL; 329 rc = -EINVAL;
323 if (rc) { 330 if (rc) {
324 MESSAGE(KERN_ERR, "Invalid device range %s", parsestring); 331 pr_err("%s is not a valid device range\n", parsestring);
325 return ERR_PTR(rc); 332 return ERR_PTR(rc);
326 } 333 }
327 features = dasd_feature_list(str, &str); 334 features = dasd_feature_list(str, &str);
@@ -340,8 +347,8 @@ dasd_parse_range( char *parsestring ) {
340 return str + 1; 347 return str + 1;
341 if (*str == '\0') 348 if (*str == '\0')
342 return str; 349 return str;
343 MESSAGE(KERN_WARNING, 350 pr_warning("The dasd= parameter value %s has an invalid ending\n",
344 "junk at end of dasd parameter string: %s\n", str); 351 str);
345 return ERR_PTR(-EINVAL); 352 return ERR_PTR(-EINVAL);
346} 353}
347 354
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index ef2a56952054..b9a7f7733446 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,6 +8,8 @@
8 * 8 *
9 */ 9 */
10 10
11#define KMSG_COMPONENT "dasd"
12
11#include <linux/stddef.h> 13#include <linux/stddef.h>
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
@@ -144,8 +146,8 @@ dasd_diag_erp(struct dasd_device *device)
144 mdsk_term_io(device); 146 mdsk_term_io(device);
145 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); 147 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
146 if (rc) 148 if (rc)
147 DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, " 149 dev_warn(&device->cdev->dev, "DIAG ERP failed with "
148 "rc=%d", rc); 150 "rc=%d\n", rc);
149} 151}
150 152
151/* Start a given request at the device. Return zero on success, non-zero 153/* Start a given request at the device. Return zero on success, non-zero
@@ -160,7 +162,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
160 162
161 device = cqr->startdev; 163 device = cqr->startdev;
162 if (cqr->retries < 0) { 164 if (cqr->retries < 0) {
163 DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p " 165 DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
164 "- no retry left)", cqr); 166 "- no retry left)", cqr);
165 cqr->status = DASD_CQR_ERROR; 167 cqr->status = DASD_CQR_ERROR;
166 return -EIO; 168 return -EIO;
@@ -195,7 +197,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
195 break; 197 break;
196 default: /* Error condition */ 198 default: /* Error condition */
197 cqr->status = DASD_CQR_QUEUED; 199 cqr->status = DASD_CQR_QUEUED;
198 DEV_MESSAGE(KERN_WARNING, device, "dia250 returned rc=%d", rc); 200 DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
199 dasd_diag_erp(device); 201 dasd_diag_erp(device);
200 rc = -EIO; 202 rc = -EIO;
201 break; 203 break;
@@ -243,13 +245,14 @@ dasd_ext_handler(__u16 code)
243 return; 245 return;
244 } 246 }
245 if (!ip) { /* no intparm: unsolicited interrupt */ 247 if (!ip) { /* no intparm: unsolicited interrupt */
246 MESSAGE(KERN_DEBUG, "%s", "caught unsolicited interrupt"); 248 DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
249 "interrupt");
247 return; 250 return;
248 } 251 }
249 cqr = (struct dasd_ccw_req *) ip; 252 cqr = (struct dasd_ccw_req *) ip;
250 device = (struct dasd_device *) cqr->startdev; 253 device = (struct dasd_device *) cqr->startdev;
251 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 254 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
252 DEV_MESSAGE(KERN_WARNING, device, 255 DBF_DEV_EVENT(DBF_WARNING, device,
253 " magic number of dasd_ccw_req 0x%08X doesn't" 256 " magic number of dasd_ccw_req 0x%08X doesn't"
254 " match discipline 0x%08X", 257 " match discipline 0x%08X",
255 cqr->magic, *(int *) (&device->discipline->name)); 258 cqr->magic, *(int *) (&device->discipline->name));
@@ -281,15 +284,11 @@ dasd_ext_handler(__u16 code)
281 rc = dasd_start_diag(next); 284 rc = dasd_start_diag(next);
282 if (rc == 0) 285 if (rc == 0)
283 expires = next->expires; 286 expires = next->expires;
284 else if (rc != -EACCES)
285 DEV_MESSAGE(KERN_WARNING, device, "%s",
286 "Interrupt fastpath "
287 "failed!");
288 } 287 }
289 } 288 }
290 } else { 289 } else {
291 cqr->status = DASD_CQR_QUEUED; 290 cqr->status = DASD_CQR_QUEUED;
292 DEV_MESSAGE(KERN_WARNING, device, "interrupt status for " 291 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
293 "request %p was %d (%d retries left)", cqr, status, 292 "request %p was %d (%d retries left)", cqr, status,
294 cqr->retries); 293 cqr->retries);
295 dasd_diag_erp(device); 294 dasd_diag_erp(device);
@@ -322,8 +321,9 @@ dasd_diag_check_device(struct dasd_device *device)
322 if (private == NULL) { 321 if (private == NULL) {
323 private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL); 322 private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
324 if (private == NULL) { 323 if (private == NULL) {
325 DEV_MESSAGE(KERN_WARNING, device, "%s", 324 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
326 "memory allocation failed for private data"); 325 "Allocating memory for private DASD data "
326 "failed\n");
327 return -ENOMEM; 327 return -ENOMEM;
328 } 328 }
329 ccw_device_get_id(device->cdev, &private->dev_id); 329 ccw_device_get_id(device->cdev, &private->dev_id);
@@ -331,7 +331,7 @@ dasd_diag_check_device(struct dasd_device *device)
331 } 331 }
332 block = dasd_alloc_block(); 332 block = dasd_alloc_block();
333 if (IS_ERR(block)) { 333 if (IS_ERR(block)) {
334 DEV_MESSAGE(KERN_WARNING, device, "%s", 334 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
335 "could not allocate dasd block structure"); 335 "could not allocate dasd block structure");
336 device->private = NULL; 336 device->private = NULL;
337 kfree(private); 337 kfree(private);
@@ -347,7 +347,7 @@ dasd_diag_check_device(struct dasd_device *device)
347 347
348 rc = diag210((struct diag210 *) rdc_data); 348 rc = diag210((struct diag210 *) rdc_data);
349 if (rc) { 349 if (rc) {
350 DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device " 350 DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
351 "information (rc=%d)", rc); 351 "information (rc=%d)", rc);
352 rc = -EOPNOTSUPP; 352 rc = -EOPNOTSUPP;
353 goto out; 353 goto out;
@@ -362,8 +362,8 @@ dasd_diag_check_device(struct dasd_device *device)
362 private->pt_block = 2; 362 private->pt_block = 2;
363 break; 363 break;
364 default: 364 default:
365 DEV_MESSAGE(KERN_WARNING, device, "unsupported device class " 365 dev_warn(&device->cdev->dev, "Device type %d is not supported "
366 "(class=%d)", private->rdc_data.vdev_class); 366 "in DIAG mode\n", private->rdc_data.vdev_class);
367 rc = -EOPNOTSUPP; 367 rc = -EOPNOTSUPP;
368 goto out; 368 goto out;
369 } 369 }
@@ -380,7 +380,7 @@ dasd_diag_check_device(struct dasd_device *device)
380 /* figure out blocksize of device */ 380 /* figure out blocksize of device */
381 label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL); 381 label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
382 if (label == NULL) { 382 if (label == NULL) {
383 DEV_MESSAGE(KERN_WARNING, device, "%s", 383 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
384 "No memory to allocate initialization request"); 384 "No memory to allocate initialization request");
385 rc = -ENOMEM; 385 rc = -ENOMEM;
386 goto out; 386 goto out;
@@ -404,8 +404,8 @@ dasd_diag_check_device(struct dasd_device *device)
404 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 404 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
405 rc = dia250(&private->iob, RW_BIO); 405 rc = dia250(&private->iob, RW_BIO);
406 if (rc == 3) { 406 if (rc == 3) {
407 DEV_MESSAGE(KERN_WARNING, device, "%s", 407 dev_warn(&device->cdev->dev,
408 "DIAG call failed"); 408 "A 64-bit DIAG call failed\n");
409 rc = -EOPNOTSUPP; 409 rc = -EOPNOTSUPP;
410 goto out_label; 410 goto out_label;
411 } 411 }
@@ -414,8 +414,8 @@ dasd_diag_check_device(struct dasd_device *device)
414 break; 414 break;
415 } 415 }
416 if (bsize > PAGE_SIZE) { 416 if (bsize > PAGE_SIZE) {
417 DEV_MESSAGE(KERN_WARNING, device, "device access failed " 417 dev_warn(&device->cdev->dev, "Accessing the DASD failed because"
418 "(rc=%d)", rc); 418 " of an incorrect format (rc=%d)\n", rc);
419 rc = -EIO; 419 rc = -EIO;
420 goto out_label; 420 goto out_label;
421 } 421 }
@@ -433,15 +433,15 @@ dasd_diag_check_device(struct dasd_device *device)
433 block->s2b_shift++; 433 block->s2b_shift++;
434 rc = mdsk_init_io(device, block->bp_block, 0, NULL); 434 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
435 if (rc) { 435 if (rc) {
436 DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization " 436 dev_warn(&device->cdev->dev, "DIAG initialization "
437 "failed (rc=%d)", rc); 437 "failed with rc=%d\n", rc);
438 rc = -EIO; 438 rc = -EIO;
439 } else { 439 } else {
440 DEV_MESSAGE(KERN_INFO, device, 440 dev_info(&device->cdev->dev,
441 "(%ld B/blk): %ldkB", 441 "New DASD with %ld byte/block, total size %ld KB\n",
442 (unsigned long) block->bp_block, 442 (unsigned long) block->bp_block,
443 (unsigned long) (block->blocks << 443 (unsigned long) (block->blocks <<
444 block->s2b_shift) >> 1); 444 block->s2b_shift) >> 1);
445 } 445 }
446out_label: 446out_label:
447 free_page((long) label); 447 free_page((long) label);
@@ -595,7 +595,7 @@ static void
595dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, 595dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
596 struct irb *stat) 596 struct irb *stat)
597{ 597{
598 DEV_MESSAGE(KERN_ERR, device, "%s", 598 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
599 "dump sense not available for DIAG data"); 599 "dump sense not available for DIAG data");
600} 600}
601 601
@@ -621,10 +621,8 @@ static int __init
621dasd_diag_init(void) 621dasd_diag_init(void)
622{ 622{
623 if (!MACHINE_IS_VM) { 623 if (!MACHINE_IS_VM) {
624 MESSAGE_LOG(KERN_INFO, 624 pr_info("Discipline %s cannot be used without z/VM\n",
625 "Machine is not VM: %s " 625 dasd_diag_discipline.name);
626 "discipline not initializing",
627 dasd_diag_discipline.name);
628 return -ENODEV; 626 return -ENODEV;
629 } 627 }
630 ASCEBC(dasd_diag_discipline.ebcname, 4); 628 ASCEBC(dasd_diag_discipline.ebcname, 4);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bdb87998f364..21254793c604 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -11,6 +11,8 @@
11 * 11 *
12 */ 12 */
13 13
14#define KMSG_COMPONENT "dasd"
15
14#include <linux/stddef.h> 16#include <linux/stddef.h>
15#include <linux/kernel.h> 17#include <linux/kernel.h>
16#include <linux/slab.h> 18#include <linux/slab.h>
@@ -27,9 +29,12 @@
27#include <asm/uaccess.h> 29#include <asm/uaccess.h>
28#include <asm/cio.h> 30#include <asm/cio.h>
29#include <asm/ccwdev.h> 31#include <asm/ccwdev.h>
32#include <asm/itcw.h>
30 33
31#include "dasd_int.h" 34#include "dasd_int.h"
32#include "dasd_eckd.h" 35#include "dasd_eckd.h"
36#include "../cio/chsc.h"
37
33 38
34#ifdef PRINTK_HEADER 39#ifdef PRINTK_HEADER
35#undef PRINTK_HEADER 40#undef PRINTK_HEADER
@@ -84,7 +89,7 @@ dasd_eckd_probe (struct ccw_device *cdev)
84 /* set ECKD specific ccw-device options */ 89 /* set ECKD specific ccw-device options */
85 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); 90 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
86 if (ret) { 91 if (ret) {
87 printk(KERN_WARNING 92 DBF_EVENT(DBF_WARNING,
88 "dasd_eckd_probe: could not set ccw-device options " 93 "dasd_eckd_probe: could not set ccw-device options "
89 "for %s\n", dev_name(&cdev->dev)); 94 "for %s\n", dev_name(&cdev->dev));
90 return ret; 95 return ret;
@@ -159,6 +164,14 @@ recs_per_track(struct dasd_eckd_characteristics * rdc,
159 return 0; 164 return 0;
160} 165}
161 166
167static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
168{
169 geo->cyl = (__u16) cyl;
170 geo->head = cyl >> 16;
171 geo->head <<= 4;
172 geo->head |= head;
173}
174
162static int 175static int
163check_XRC (struct ccw1 *de_ccw, 176check_XRC (struct ccw1 *de_ccw,
164 struct DE_eckd_data *data, 177 struct DE_eckd_data *data,
@@ -186,11 +199,12 @@ check_XRC (struct ccw1 *de_ccw,
186} 199}
187 200
188static int 201static int
189define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, 202define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
190 int totrk, int cmd, struct dasd_device * device) 203 unsigned int totrk, int cmd, struct dasd_device *device)
191{ 204{
192 struct dasd_eckd_private *private; 205 struct dasd_eckd_private *private;
193 struct ch_t geo, beg, end; 206 u32 begcyl, endcyl;
207 u16 heads, beghead, endhead;
194 int rc = 0; 208 int rc = 0;
195 209
196 private = (struct dasd_eckd_private *) device->private; 210 private = (struct dasd_eckd_private *) device->private;
@@ -236,7 +250,8 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
236 rc = check_XRC (ccw, data, device); 250 rc = check_XRC (ccw, data, device);
237 break; 251 break;
238 default: 252 default:
239 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 253 dev_err(&device->cdev->dev,
254 "0x%x is not a known command\n", cmd);
240 break; 255 break;
241 } 256 }
242 257
@@ -248,27 +263,24 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
248 && !(private->uses_cdl && trk < 2)) 263 && !(private->uses_cdl && trk < 2))
249 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 264 data->ga_extended |= 0x40; /* Regular Data Format Mode */
250 265
251 geo.cyl = private->rdc_data.no_cyl; 266 heads = private->rdc_data.trk_per_cyl;
252 geo.head = private->rdc_data.trk_per_cyl; 267 begcyl = trk / heads;
253 beg.cyl = trk / geo.head; 268 beghead = trk % heads;
254 beg.head = trk % geo.head; 269 endcyl = totrk / heads;
255 end.cyl = totrk / geo.head; 270 endhead = totrk % heads;
256 end.head = totrk % geo.head;
257 271
258 /* check for sequential prestage - enhance cylinder range */ 272 /* check for sequential prestage - enhance cylinder range */
259 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 273 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
260 data->attributes.operation == DASD_SEQ_ACCESS) { 274 data->attributes.operation == DASD_SEQ_ACCESS) {
261 275
262 if (end.cyl + private->attrib.nr_cyl < geo.cyl) 276 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
263 end.cyl += private->attrib.nr_cyl; 277 endcyl += private->attrib.nr_cyl;
264 else 278 else
265 end.cyl = (geo.cyl - 1); 279 endcyl = (private->real_cyl - 1);
266 } 280 }
267 281
268 data->beg_ext.cyl = beg.cyl; 282 set_ch_t(&data->beg_ext, begcyl, beghead);
269 data->beg_ext.head = beg.head; 283 set_ch_t(&data->end_ext, endcyl, endhead);
270 data->end_ext.cyl = end.cyl;
271 data->end_ext.head = end.head;
272 return rc; 284 return rc;
273} 285}
274 286
@@ -283,29 +295,145 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
283 return 0; 295 return 0;
284 296
285 /* switch on System Time Stamp - needed for XRC Support */ 297 /* switch on System Time Stamp - needed for XRC Support */
286 pfxdata->define_extend.ga_extended |= 0x08; /* 'Time Stamp Valid' */ 298 pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */
287 pfxdata->define_extend.ga_extended |= 0x02; /* 'Extended Parameter' */ 299 pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
288 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 300 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
289 301
290 rc = get_sync_clock(&pfxdata->define_extend.ep_sys_time); 302 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
291 /* Ignore return code if sync clock is switched off. */ 303 /* Ignore return code if sync clock is switched off. */
292 if (rc == -ENOSYS || rc == -EACCES) 304 if (rc == -ENOSYS || rc == -EACCES)
293 rc = 0; 305 rc = 0;
294 return rc; 306 return rc;
295} 307}
296 308
297static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk, 309static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
298 int totrk, int cmd, struct dasd_device *basedev, 310 unsigned int rec_on_trk, int count, int cmd,
299 struct dasd_device *startdev) 311 struct dasd_device *device, unsigned int reclen,
312 unsigned int tlf)
313{
314 struct dasd_eckd_private *private;
315 int sector;
316 int dn, d;
317
318 private = (struct dasd_eckd_private *) device->private;
319
320 memset(data, 0, sizeof(*data));
321 sector = 0;
322 if (rec_on_trk) {
323 switch (private->rdc_data.dev_type) {
324 case 0x3390:
325 dn = ceil_quot(reclen + 6, 232);
326 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
327 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
328 break;
329 case 0x3380:
330 d = 7 + ceil_quot(reclen + 12, 32);
331 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
332 break;
333 }
334 }
335 data->sector = sector;
336 /* note: meaning of count depends on the operation
337 * for record based I/O it's the number of records, but for
338 * track based I/O it's the number of tracks
339 */
340 data->count = count;
341 switch (cmd) {
342 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
343 data->operation.orientation = 0x3;
344 data->operation.operation = 0x03;
345 break;
346 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
347 data->operation.orientation = 0x3;
348 data->operation.operation = 0x16;
349 break;
350 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
351 data->operation.orientation = 0x1;
352 data->operation.operation = 0x03;
353 data->count++;
354 break;
355 case DASD_ECKD_CCW_READ_RECORD_ZERO:
356 data->operation.orientation = 0x3;
357 data->operation.operation = 0x16;
358 data->count++;
359 break;
360 case DASD_ECKD_CCW_WRITE:
361 case DASD_ECKD_CCW_WRITE_MT:
362 case DASD_ECKD_CCW_WRITE_KD:
363 case DASD_ECKD_CCW_WRITE_KD_MT:
364 data->auxiliary.length_valid = 0x1;
365 data->length = reclen;
366 data->operation.operation = 0x01;
367 break;
368 case DASD_ECKD_CCW_WRITE_CKD:
369 case DASD_ECKD_CCW_WRITE_CKD_MT:
370 data->auxiliary.length_valid = 0x1;
371 data->length = reclen;
372 data->operation.operation = 0x03;
373 break;
374 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
375 data->auxiliary.length_valid = 0x1;
376 data->length = reclen; /* not tlf, as one might think */
377 data->operation.operation = 0x3F;
378 data->extended_operation = 0x23;
379 break;
380 case DASD_ECKD_CCW_READ:
381 case DASD_ECKD_CCW_READ_MT:
382 case DASD_ECKD_CCW_READ_KD:
383 case DASD_ECKD_CCW_READ_KD_MT:
384 data->auxiliary.length_valid = 0x1;
385 data->length = reclen;
386 data->operation.operation = 0x06;
387 break;
388 case DASD_ECKD_CCW_READ_CKD:
389 case DASD_ECKD_CCW_READ_CKD_MT:
390 data->auxiliary.length_valid = 0x1;
391 data->length = reclen;
392 data->operation.operation = 0x16;
393 break;
394 case DASD_ECKD_CCW_READ_COUNT:
395 data->operation.operation = 0x06;
396 break;
397 case DASD_ECKD_CCW_READ_TRACK_DATA:
398 data->auxiliary.length_valid = 0x1;
399 data->length = tlf;
400 data->operation.operation = 0x0C;
401 break;
402 case DASD_ECKD_CCW_ERASE:
403 data->length = reclen;
404 data->auxiliary.length_valid = 0x1;
405 data->operation.operation = 0x0b;
406 break;
407 default:
408 DBF_DEV_EVENT(DBF_ERR, device,
409 "fill LRE unknown opcode 0x%x", cmd);
410 BUG();
411 }
412 set_ch_t(&data->seek_addr,
413 trk / private->rdc_data.trk_per_cyl,
414 trk % private->rdc_data.trk_per_cyl);
415 data->search_arg.cyl = data->seek_addr.cyl;
416 data->search_arg.head = data->seek_addr.head;
417 data->search_arg.record = rec_on_trk;
418}
419
420static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
421 unsigned int trk, unsigned int totrk, int cmd,
422 struct dasd_device *basedev, struct dasd_device *startdev,
423 unsigned char format, unsigned int rec_on_trk, int count,
424 unsigned int blksize, unsigned int tlf)
300{ 425{
301 struct dasd_eckd_private *basepriv, *startpriv; 426 struct dasd_eckd_private *basepriv, *startpriv;
302 struct DE_eckd_data *data; 427 struct DE_eckd_data *dedata;
303 struct ch_t geo, beg, end; 428 struct LRE_eckd_data *lredata;
429 u32 begcyl, endcyl;
430 u16 heads, beghead, endhead;
304 int rc = 0; 431 int rc = 0;
305 432
306 basepriv = (struct dasd_eckd_private *) basedev->private; 433 basepriv = (struct dasd_eckd_private *) basedev->private;
307 startpriv = (struct dasd_eckd_private *) startdev->private; 434 startpriv = (struct dasd_eckd_private *) startdev->private;
308 data = &pfxdata->define_extend; 435 dedata = &pfxdata->define_extent;
436 lredata = &pfxdata->locate_record;
309 437
310 ccw->cmd_code = DASD_ECKD_CCW_PFX; 438 ccw->cmd_code = DASD_ECKD_CCW_PFX;
311 ccw->flags = 0; 439 ccw->flags = 0;
@@ -314,10 +442,16 @@ static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
314 442
315 memset(pfxdata, 0, sizeof(*pfxdata)); 443 memset(pfxdata, 0, sizeof(*pfxdata));
316 /* prefix data */ 444 /* prefix data */
317 pfxdata->format = 0; 445 if (format > 1) {
446 DBF_DEV_EVENT(DBF_ERR, basedev,
447 "PFX LRE unknown format 0x%x", format);
448 BUG();
449 return -EINVAL;
450 }
451 pfxdata->format = format;
318 pfxdata->base_address = basepriv->ned->unit_addr; 452 pfxdata->base_address = basepriv->ned->unit_addr;
319 pfxdata->base_lss = basepriv->ned->ID; 453 pfxdata->base_lss = basepriv->ned->ID;
320 pfxdata->validity.define_extend = 1; 454 pfxdata->validity.define_extent = 1;
321 455
322 /* private uid is kept up to date, conf_data may be outdated */ 456 /* private uid is kept up to date, conf_data may be outdated */
323 if (startpriv->uid.type != UA_BASE_DEVICE) { 457 if (startpriv->uid.type != UA_BASE_DEVICE) {
@@ -337,70 +471,94 @@ static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
337 case DASD_ECKD_CCW_READ_KD: 471 case DASD_ECKD_CCW_READ_KD:
338 case DASD_ECKD_CCW_READ_KD_MT: 472 case DASD_ECKD_CCW_READ_KD_MT:
339 case DASD_ECKD_CCW_READ_COUNT: 473 case DASD_ECKD_CCW_READ_COUNT:
340 data->mask.perm = 0x1; 474 dedata->mask.perm = 0x1;
341 data->attributes.operation = basepriv->attrib.operation; 475 dedata->attributes.operation = basepriv->attrib.operation;
476 break;
477 case DASD_ECKD_CCW_READ_TRACK_DATA:
478 dedata->mask.perm = 0x1;
479 dedata->attributes.operation = basepriv->attrib.operation;
480 dedata->blk_size = 0;
342 break; 481 break;
343 case DASD_ECKD_CCW_WRITE: 482 case DASD_ECKD_CCW_WRITE:
344 case DASD_ECKD_CCW_WRITE_MT: 483 case DASD_ECKD_CCW_WRITE_MT:
345 case DASD_ECKD_CCW_WRITE_KD: 484 case DASD_ECKD_CCW_WRITE_KD:
346 case DASD_ECKD_CCW_WRITE_KD_MT: 485 case DASD_ECKD_CCW_WRITE_KD_MT:
347 data->mask.perm = 0x02; 486 dedata->mask.perm = 0x02;
348 data->attributes.operation = basepriv->attrib.operation; 487 dedata->attributes.operation = basepriv->attrib.operation;
349 rc = check_XRC_on_prefix(pfxdata, basedev); 488 rc = check_XRC_on_prefix(pfxdata, basedev);
350 break; 489 break;
351 case DASD_ECKD_CCW_WRITE_CKD: 490 case DASD_ECKD_CCW_WRITE_CKD:
352 case DASD_ECKD_CCW_WRITE_CKD_MT: 491 case DASD_ECKD_CCW_WRITE_CKD_MT:
353 data->attributes.operation = DASD_BYPASS_CACHE; 492 dedata->attributes.operation = DASD_BYPASS_CACHE;
354 rc = check_XRC_on_prefix(pfxdata, basedev); 493 rc = check_XRC_on_prefix(pfxdata, basedev);
355 break; 494 break;
356 case DASD_ECKD_CCW_ERASE: 495 case DASD_ECKD_CCW_ERASE:
357 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 496 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
358 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 497 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
359 data->mask.perm = 0x3; 498 dedata->mask.perm = 0x3;
360 data->mask.auth = 0x1; 499 dedata->mask.auth = 0x1;
361 data->attributes.operation = DASD_BYPASS_CACHE; 500 dedata->attributes.operation = DASD_BYPASS_CACHE;
362 rc = check_XRC_on_prefix(pfxdata, basedev); 501 rc = check_XRC_on_prefix(pfxdata, basedev);
363 break; 502 break;
364 default: 503 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
365 DEV_MESSAGE(KERN_ERR, basedev, "unknown opcode 0x%x", cmd); 504 dedata->mask.perm = 0x02;
505 dedata->attributes.operation = basepriv->attrib.operation;
506 dedata->blk_size = blksize;
507 rc = check_XRC_on_prefix(pfxdata, basedev);
366 break; 508 break;
509 default:
510 DBF_DEV_EVENT(DBF_ERR, basedev,
511 "PFX LRE unknown opcode 0x%x", cmd);
512 BUG();
513 return -EINVAL;
367 } 514 }
368 515
369 data->attributes.mode = 0x3; /* ECKD */ 516 dedata->attributes.mode = 0x3; /* ECKD */
370 517
371 if ((basepriv->rdc_data.cu_type == 0x2105 || 518 if ((basepriv->rdc_data.cu_type == 0x2105 ||
372 basepriv->rdc_data.cu_type == 0x2107 || 519 basepriv->rdc_data.cu_type == 0x2107 ||
373 basepriv->rdc_data.cu_type == 0x1750) 520 basepriv->rdc_data.cu_type == 0x1750)
374 && !(basepriv->uses_cdl && trk < 2)) 521 && !(basepriv->uses_cdl && trk < 2))
375 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 522 dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
376 523
377 geo.cyl = basepriv->rdc_data.no_cyl; 524 heads = basepriv->rdc_data.trk_per_cyl;
378 geo.head = basepriv->rdc_data.trk_per_cyl; 525 begcyl = trk / heads;
379 beg.cyl = trk / geo.head; 526 beghead = trk % heads;
380 beg.head = trk % geo.head; 527 endcyl = totrk / heads;
381 end.cyl = totrk / geo.head; 528 endhead = totrk % heads;
382 end.head = totrk % geo.head;
383 529
384 /* check for sequential prestage - enhance cylinder range */ 530 /* check for sequential prestage - enhance cylinder range */
385 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 531 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
386 data->attributes.operation == DASD_SEQ_ACCESS) { 532 dedata->attributes.operation == DASD_SEQ_ACCESS) {
387 533
388 if (end.cyl + basepriv->attrib.nr_cyl < geo.cyl) 534 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
389 end.cyl += basepriv->attrib.nr_cyl; 535 endcyl += basepriv->attrib.nr_cyl;
390 else 536 else
391 end.cyl = (geo.cyl - 1); 537 endcyl = (basepriv->real_cyl - 1);
538 }
539
540 set_ch_t(&dedata->beg_ext, begcyl, beghead);
541 set_ch_t(&dedata->end_ext, endcyl, endhead);
542
543 if (format == 1) {
544 fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
545 basedev, blksize, tlf);
392 } 546 }
393 547
394 data->beg_ext.cyl = beg.cyl;
395 data->beg_ext.head = beg.head;
396 data->end_ext.cyl = end.cyl;
397 data->end_ext.head = end.head;
398 return rc; 548 return rc;
399} 549}
400 550
551static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
552 unsigned int trk, unsigned int totrk, int cmd,
553 struct dasd_device *basedev, struct dasd_device *startdev)
554{
555 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
556 0, 0, 0, 0, 0);
557}
558
401static void 559static void
402locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 560locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
403 int rec_on_trk, int no_rec, int cmd, 561 unsigned int rec_on_trk, int no_rec, int cmd,
404 struct dasd_device * device, int reclen) 562 struct dasd_device * device, int reclen)
405{ 563{
406 struct dasd_eckd_private *private; 564 struct dasd_eckd_private *private;
@@ -491,12 +649,14 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
491 data->operation.operation = 0x0b; 649 data->operation.operation = 0x0b;
492 break; 650 break;
493 default: 651 default:
494 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 652 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
495 } 653 "opcode 0x%x", cmd);
496 data->seek_addr.cyl = data->search_arg.cyl = 654 }
497 trk / private->rdc_data.trk_per_cyl; 655 set_ch_t(&data->seek_addr,
498 data->seek_addr.head = data->search_arg.head = 656 trk / private->rdc_data.trk_per_cyl,
499 trk % private->rdc_data.trk_per_cyl; 657 trk % private->rdc_data.trk_per_cyl);
658 data->search_arg.cyl = data->seek_addr.cyl;
659 data->search_arg.head = data->seek_addr.head;
500 data->search_arg.record = rec_on_trk; 660 data->search_arg.record = rec_on_trk;
501} 661}
502 662
@@ -585,8 +745,8 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
585 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device); 745 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device);
586 746
587 if (IS_ERR(cqr)) { 747 if (IS_ERR(cqr)) {
588 DEV_MESSAGE(KERN_WARNING, device, "%s", 748 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
589 "Could not allocate RCD request"); 749 "Could not allocate RCD request");
590 return cqr; 750 return cqr;
591 } 751 }
592 752
@@ -736,14 +896,16 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
736 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 896 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
737 &conf_len, lpm); 897 &conf_len, lpm);
738 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 898 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
739 MESSAGE(KERN_WARNING, 899 DBF_EVENT(DBF_WARNING,
740 "Read configuration data returned " 900 "Read configuration data returned "
741 "error %d", rc); 901 "error %d for device: %s", rc,
902 dev_name(&device->cdev->dev));
742 return rc; 903 return rc;
743 } 904 }
744 if (conf_data == NULL) { 905 if (conf_data == NULL) {
745 MESSAGE(KERN_WARNING, "%s", "No configuration " 906 DBF_EVENT(DBF_WARNING, "No configuration "
746 "data retrieved"); 907 "data retrieved for device: %s",
908 dev_name(&device->cdev->dev));
747 continue; /* no error */ 909 continue; /* no error */
748 } 910 }
749 /* save first valid configuration data */ 911 /* save first valid configuration data */
@@ -790,8 +952,9 @@ static int dasd_eckd_read_features(struct dasd_device *device)
790 sizeof(struct dasd_rssd_features)), 952 sizeof(struct dasd_rssd_features)),
791 device); 953 device);
792 if (IS_ERR(cqr)) { 954 if (IS_ERR(cqr)) {
793 DEV_MESSAGE(KERN_WARNING, device, "%s", 955 DBF_EVENT(DBF_WARNING, "Could not allocate initialization "
794 "Could not allocate initialization request"); 956 "request for device: %s",
957 dev_name(&device->cdev->dev));
795 return PTR_ERR(cqr); 958 return PTR_ERR(cqr);
796 } 959 }
797 cqr->startdev = device; 960 cqr->startdev = device;
@@ -840,7 +1003,8 @@ static int dasd_eckd_read_features(struct dasd_device *device)
840/* 1003/*
841 * Build CP for Perform Subsystem Function - SSC. 1004 * Build CP for Perform Subsystem Function - SSC.
842 */ 1005 */
843static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device) 1006static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1007 int enable_pav)
844{ 1008{
845 struct dasd_ccw_req *cqr; 1009 struct dasd_ccw_req *cqr;
846 struct dasd_psf_ssc_data *psf_ssc_data; 1010 struct dasd_psf_ssc_data *psf_ssc_data;
@@ -851,15 +1015,17 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
851 device); 1015 device);
852 1016
853 if (IS_ERR(cqr)) { 1017 if (IS_ERR(cqr)) {
854 DEV_MESSAGE(KERN_WARNING, device, "%s", 1018 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
855 "Could not allocate PSF-SSC request"); 1019 "Could not allocate PSF-SSC request");
856 return cqr; 1020 return cqr;
857 } 1021 }
858 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1022 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
859 psf_ssc_data->order = PSF_ORDER_SSC; 1023 psf_ssc_data->order = PSF_ORDER_SSC;
860 psf_ssc_data->suborder = 0x88; 1024 psf_ssc_data->suborder = 0x40;
861 psf_ssc_data->reserved[0] = 0x88; 1025 if (enable_pav) {
862 1026 psf_ssc_data->suborder |= 0x88;
1027 psf_ssc_data->reserved[0] = 0x88;
1028 }
863 ccw = cqr->cpaddr; 1029 ccw = cqr->cpaddr;
864 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1030 ccw->cmd_code = DASD_ECKD_CCW_PSF;
865 ccw->cda = (__u32)(addr_t)psf_ssc_data; 1031 ccw->cda = (__u32)(addr_t)psf_ssc_data;
@@ -880,12 +1046,12 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
880 * call might change behaviour of DASD devices. 1046 * call might change behaviour of DASD devices.
881 */ 1047 */
882static int 1048static int
883dasd_eckd_psf_ssc(struct dasd_device *device) 1049dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
884{ 1050{
885 struct dasd_ccw_req *cqr; 1051 struct dasd_ccw_req *cqr;
886 int rc; 1052 int rc;
887 1053
888 cqr = dasd_eckd_build_psf_ssc(device); 1054 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
889 if (IS_ERR(cqr)) 1055 if (IS_ERR(cqr))
890 return PTR_ERR(cqr); 1056 return PTR_ERR(cqr);
891 1057
@@ -904,19 +1070,20 @@ static int dasd_eckd_validate_server(struct dasd_device *device)
904{ 1070{
905 int rc; 1071 int rc;
906 struct dasd_eckd_private *private; 1072 struct dasd_eckd_private *private;
1073 int enable_pav;
907 1074
908 /* Currently PAV is the only reason to 'validate' server on LPAR */
909 if (dasd_nopav || MACHINE_IS_VM) 1075 if (dasd_nopav || MACHINE_IS_VM)
910 return 0; 1076 enable_pav = 0;
911 1077 else
912 rc = dasd_eckd_psf_ssc(device); 1078 enable_pav = 1;
1079 rc = dasd_eckd_psf_ssc(device, enable_pav);
913 /* may be requested feature is not available on server, 1080 /* may be requested feature is not available on server,
914 * therefore just report error and go ahead */ 1081 * therefore just report error and go ahead */
915 private = (struct dasd_eckd_private *) device->private; 1082 private = (struct dasd_eckd_private *) device->private;
916 DEV_MESSAGE(KERN_INFO, device, 1083 DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x "
917 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", 1084 "returned rc=%d for device: %s",
918 private->uid.vendor, private->uid.serial, 1085 private->uid.vendor, private->uid.serial,
919 private->uid.ssid, rc); 1086 private->uid.ssid, rc, dev_name(&device->cdev->dev));
920 /* RE-Read Configuration Data */ 1087 /* RE-Read Configuration Data */
921 return dasd_eckd_read_conf(device); 1088 return dasd_eckd_read_conf(device);
922} 1089}
@@ -938,9 +1105,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
938 private = kzalloc(sizeof(struct dasd_eckd_private), 1105 private = kzalloc(sizeof(struct dasd_eckd_private),
939 GFP_KERNEL | GFP_DMA); 1106 GFP_KERNEL | GFP_DMA);
940 if (private == NULL) { 1107 if (private == NULL) {
941 DEV_MESSAGE(KERN_WARNING, device, "%s", 1108 dev_warn(&device->cdev->dev,
942 "memory allocation failed for private " 1109 "Allocating memory for private DASD data "
943 "data"); 1110 "failed\n");
944 return -ENOMEM; 1111 return -ENOMEM;
945 } 1112 }
946 device->private = (void *) private; 1113 device->private = (void *) private;
@@ -965,8 +1132,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
965 if (private->uid.type == UA_BASE_DEVICE) { 1132 if (private->uid.type == UA_BASE_DEVICE) {
966 block = dasd_alloc_block(); 1133 block = dasd_alloc_block();
967 if (IS_ERR(block)) { 1134 if (IS_ERR(block)) {
968 DEV_MESSAGE(KERN_WARNING, device, "%s", 1135 DBF_EVENT(DBF_WARNING, "could not allocate dasd "
969 "could not allocate dasd block structure"); 1136 "block structure for device: %s",
1137 dev_name(&device->cdev->dev));
970 rc = PTR_ERR(block); 1138 rc = PTR_ERR(block);
971 goto out_err1; 1139 goto out_err1;
972 } 1140 }
@@ -997,20 +1165,27 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
997 memset(rdc_data, 0, sizeof(rdc_data)); 1165 memset(rdc_data, 0, sizeof(rdc_data));
998 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); 1166 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
999 if (rc) { 1167 if (rc) {
1000 DEV_MESSAGE(KERN_WARNING, device, 1168 DBF_EVENT(DBF_WARNING,
1001 "Read device characteristics returned " 1169 "Read device characteristics failed, rc=%d for "
1002 "rc=%d", rc); 1170 "device: %s", rc, dev_name(&device->cdev->dev));
1003 goto out_err3; 1171 goto out_err3;
1004 } 1172 }
1005 DEV_MESSAGE(KERN_INFO, device, 1173 /* find the vaild cylinder size */
1006 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d", 1174 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1007 private->rdc_data.dev_type, 1175 private->rdc_data.long_no_cyl)
1008 private->rdc_data.dev_model, 1176 private->real_cyl = private->rdc_data.long_no_cyl;
1009 private->rdc_data.cu_type, 1177 else
1010 private->rdc_data.cu_model.model, 1178 private->real_cyl = private->rdc_data.no_cyl;
1011 private->rdc_data.no_cyl, 1179
1012 private->rdc_data.trk_per_cyl, 1180 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1013 private->rdc_data.sec_per_trk); 1181 "with %d cylinders, %d heads, %d sectors\n",
1182 private->rdc_data.dev_type,
1183 private->rdc_data.dev_model,
1184 private->rdc_data.cu_type,
1185 private->rdc_data.cu_model.model,
1186 private->real_cyl,
1187 private->rdc_data.trk_per_cyl,
1188 private->rdc_data.sec_per_trk);
1014 return 0; 1189 return 0;
1015 1190
1016out_err3: 1191out_err3:
@@ -1151,14 +1326,12 @@ dasd_eckd_end_analysis(struct dasd_block *block)
1151 status = private->init_cqr_status; 1326 status = private->init_cqr_status;
1152 private->init_cqr_status = -1; 1327 private->init_cqr_status = -1;
1153 if (status != DASD_CQR_DONE) { 1328 if (status != DASD_CQR_DONE) {
1154 DEV_MESSAGE(KERN_WARNING, device, "%s", 1329 dev_warn(&device->cdev->dev,
1155 "volume analysis returned unformatted disk"); 1330 "The DASD is not formatted\n");
1156 return -EMEDIUMTYPE; 1331 return -EMEDIUMTYPE;
1157 } 1332 }
1158 1333
1159 private->uses_cdl = 1; 1334 private->uses_cdl = 1;
1160 /* Calculate number of blocks/records per track. */
1161 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1162 /* Check Track 0 for Compatible Disk Layout */ 1335 /* Check Track 0 for Compatible Disk Layout */
1163 count_area = NULL; 1336 count_area = NULL;
1164 for (i = 0; i < 3; i++) { 1337 for (i = 0; i < 3; i++) {
@@ -1182,8 +1355,8 @@ dasd_eckd_end_analysis(struct dasd_block *block)
1182 count_area = &private->count_area[0]; 1355 count_area = &private->count_area[0];
1183 } else { 1356 } else {
1184 if (private->count_area[3].record == 1) 1357 if (private->count_area[3].record == 1)
1185 DEV_MESSAGE(KERN_WARNING, device, "%s", 1358 dev_warn(&device->cdev->dev,
1186 "Trk 0: no records after VTOC!"); 1359 "Track 0 has no records following the VTOC\n");
1187 } 1360 }
1188 if (count_area != NULL && count_area->kl == 0) { 1361 if (count_area != NULL && count_area->kl == 0) {
1189 /* we found notthing violating our disk layout */ 1362 /* we found notthing violating our disk layout */
@@ -1191,8 +1364,8 @@ dasd_eckd_end_analysis(struct dasd_block *block)
1191 block->bp_block = count_area->dl; 1364 block->bp_block = count_area->dl;
1192 } 1365 }
1193 if (block->bp_block == 0) { 1366 if (block->bp_block == 0) {
1194 DEV_MESSAGE(KERN_WARNING, device, "%s", 1367 dev_warn(&device->cdev->dev,
1195 "Volume has incompatible disk layout"); 1368 "The disk layout of the DASD is not supported\n");
1196 return -EMEDIUMTYPE; 1369 return -EMEDIUMTYPE;
1197 } 1370 }
1198 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 1371 block->s2b_shift = 0; /* bits to shift 512 to get a block */
@@ -1200,19 +1373,19 @@ dasd_eckd_end_analysis(struct dasd_block *block)
1200 block->s2b_shift++; 1373 block->s2b_shift++;
1201 1374
1202 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 1375 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1203 block->blocks = (private->rdc_data.no_cyl * 1376 block->blocks = (private->real_cyl *
1204 private->rdc_data.trk_per_cyl * 1377 private->rdc_data.trk_per_cyl *
1205 blk_per_trk); 1378 blk_per_trk);
1206 1379
1207 DEV_MESSAGE(KERN_INFO, device, 1380 dev_info(&device->cdev->dev,
1208 "(%dkB blks): %dkB at %dkB/trk %s", 1381 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1209 (block->bp_block >> 10), 1382 "%s\n", (block->bp_block >> 10),
1210 ((private->rdc_data.no_cyl * 1383 ((private->real_cyl *
1211 private->rdc_data.trk_per_cyl * 1384 private->rdc_data.trk_per_cyl *
1212 blk_per_trk * (block->bp_block >> 9)) >> 1), 1385 blk_per_trk * (block->bp_block >> 9)) >> 1),
1213 ((blk_per_trk * block->bp_block) >> 10), 1386 ((blk_per_trk * block->bp_block) >> 10),
1214 private->uses_cdl ? 1387 private->uses_cdl ?
1215 "compatible disk layout" : "linux disk layout"); 1388 "compatible disk layout" : "linux disk layout");
1216 1389
1217 return 0; 1390 return 0;
1218} 1391}
@@ -1262,31 +1435,35 @@ dasd_eckd_format_device(struct dasd_device * device,
1262 struct eckd_count *ect; 1435 struct eckd_count *ect;
1263 struct ccw1 *ccw; 1436 struct ccw1 *ccw;
1264 void *data; 1437 void *data;
1265 int rpt, cyl, head; 1438 int rpt;
1439 struct ch_t address;
1266 int cplength, datasize; 1440 int cplength, datasize;
1267 int i; 1441 int i;
1442 int intensity = 0;
1443 int r0_perm;
1268 1444
1269 private = (struct dasd_eckd_private *) device->private; 1445 private = (struct dasd_eckd_private *) device->private;
1270 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize); 1446 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
1271 cyl = fdata->start_unit / private->rdc_data.trk_per_cyl; 1447 set_ch_t(&address,
1272 head = fdata->start_unit % private->rdc_data.trk_per_cyl; 1448 fdata->start_unit / private->rdc_data.trk_per_cyl,
1449 fdata->start_unit % private->rdc_data.trk_per_cyl);
1273 1450
1274 /* Sanity checks. */ 1451 /* Sanity checks. */
1275 if (fdata->start_unit >= 1452 if (fdata->start_unit >=
1276 (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) { 1453 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
1277 DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!", 1454 dev_warn(&device->cdev->dev, "Start track number %d used in "
1278 fdata->start_unit); 1455 "formatting is too big\n", fdata->start_unit);
1279 return ERR_PTR(-EINVAL); 1456 return ERR_PTR(-EINVAL);
1280 } 1457 }
1281 if (fdata->start_unit > fdata->stop_unit) { 1458 if (fdata->start_unit > fdata->stop_unit) {
1282 DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.", 1459 dev_warn(&device->cdev->dev, "Start track %d used in "
1283 fdata->start_unit); 1460 "formatting exceeds end track\n", fdata->start_unit);
1284 return ERR_PTR(-EINVAL); 1461 return ERR_PTR(-EINVAL);
1285 } 1462 }
1286 if (dasd_check_blocksize(fdata->blksize) != 0) { 1463 if (dasd_check_blocksize(fdata->blksize) != 0) {
1287 DEV_MESSAGE(KERN_WARNING, device, 1464 dev_warn(&device->cdev->dev,
1288 "Invalid blocksize %d...terminating!", 1465 "The DASD cannot be formatted with block size %d\n",
1289 fdata->blksize); 1466 fdata->blksize);
1290 return ERR_PTR(-EINVAL); 1467 return ERR_PTR(-EINVAL);
1291 } 1468 }
1292 1469
@@ -1296,9 +1473,17 @@ dasd_eckd_format_device(struct dasd_device * device,
1296 * Bit 1: write home address, currently not supported 1473 * Bit 1: write home address, currently not supported
1297 * Bit 2: invalidate tracks 1474 * Bit 2: invalidate tracks
1298 * Bit 3: use OS/390 compatible disk layout (cdl) 1475 * Bit 3: use OS/390 compatible disk layout (cdl)
1476 * Bit 4: do not allow storage subsystem to modify record zero
1299 * Only some bit combinations do make sense. 1477 * Only some bit combinations do make sense.
1300 */ 1478 */
1301 switch (fdata->intensity) { 1479 if (fdata->intensity & 0x10) {
1480 r0_perm = 0;
1481 intensity = fdata->intensity & ~0x10;
1482 } else {
1483 r0_perm = 1;
1484 intensity = fdata->intensity;
1485 }
1486 switch (intensity) {
1302 case 0x00: /* Normal format */ 1487 case 0x00: /* Normal format */
1303 case 0x08: /* Normal format, use cdl. */ 1488 case 0x08: /* Normal format, use cdl. */
1304 cplength = 2 + rpt; 1489 cplength = 2 + rpt;
@@ -1322,8 +1507,8 @@ dasd_eckd_format_device(struct dasd_device * device,
1322 sizeof(struct eckd_count); 1507 sizeof(struct eckd_count);
1323 break; 1508 break;
1324 default: 1509 default:
1325 DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.", 1510 dev_warn(&device->cdev->dev, "An I/O control call used "
1326 fdata->intensity); 1511 "incorrect flags 0x%x\n", fdata->intensity);
1327 return ERR_PTR(-EINVAL); 1512 return ERR_PTR(-EINVAL);
1328 } 1513 }
1329 /* Allocate the format ccw request. */ 1514 /* Allocate the format ccw request. */
@@ -1335,11 +1520,14 @@ dasd_eckd_format_device(struct dasd_device * device,
1335 data = fcp->data; 1520 data = fcp->data;
1336 ccw = fcp->cpaddr; 1521 ccw = fcp->cpaddr;
1337 1522
1338 switch (fdata->intensity & ~0x08) { 1523 switch (intensity & ~0x08) {
1339 case 0x00: /* Normal format. */ 1524 case 0x00: /* Normal format. */
1340 define_extent(ccw++, (struct DE_eckd_data *) data, 1525 define_extent(ccw++, (struct DE_eckd_data *) data,
1341 fdata->start_unit, fdata->start_unit, 1526 fdata->start_unit, fdata->start_unit,
1342 DASD_ECKD_CCW_WRITE_CKD, device); 1527 DASD_ECKD_CCW_WRITE_CKD, device);
1528 /* grant subsystem permission to format R0 */
1529 if (r0_perm)
1530 ((struct DE_eckd_data *)data)->ga_extended |= 0x04;
1343 data += sizeof(struct DE_eckd_data); 1531 data += sizeof(struct DE_eckd_data);
1344 ccw[-1].flags |= CCW_FLAG_CC; 1532 ccw[-1].flags |= CCW_FLAG_CC;
1345 locate_record(ccw++, (struct LO_eckd_data *) data, 1533 locate_record(ccw++, (struct LO_eckd_data *) data,
@@ -1373,11 +1561,11 @@ dasd_eckd_format_device(struct dasd_device * device,
1373 data += sizeof(struct LO_eckd_data); 1561 data += sizeof(struct LO_eckd_data);
1374 break; 1562 break;
1375 } 1563 }
1376 if (fdata->intensity & 0x01) { /* write record zero */ 1564 if (intensity & 0x01) { /* write record zero */
1377 ect = (struct eckd_count *) data; 1565 ect = (struct eckd_count *) data;
1378 data += sizeof(struct eckd_count); 1566 data += sizeof(struct eckd_count);
1379 ect->cyl = cyl; 1567 ect->cyl = address.cyl;
1380 ect->head = head; 1568 ect->head = address.head;
1381 ect->record = 0; 1569 ect->record = 0;
1382 ect->kl = 0; 1570 ect->kl = 0;
1383 ect->dl = 8; 1571 ect->dl = 8;
@@ -1388,11 +1576,11 @@ dasd_eckd_format_device(struct dasd_device * device,
1388 ccw->cda = (__u32)(addr_t) ect; 1576 ccw->cda = (__u32)(addr_t) ect;
1389 ccw++; 1577 ccw++;
1390 } 1578 }
1391 if ((fdata->intensity & ~0x08) & 0x04) { /* erase track */ 1579 if ((intensity & ~0x08) & 0x04) { /* erase track */
1392 ect = (struct eckd_count *) data; 1580 ect = (struct eckd_count *) data;
1393 data += sizeof(struct eckd_count); 1581 data += sizeof(struct eckd_count);
1394 ect->cyl = cyl; 1582 ect->cyl = address.cyl;
1395 ect->head = head; 1583 ect->head = address.head;
1396 ect->record = 1; 1584 ect->record = 1;
1397 ect->kl = 0; 1585 ect->kl = 0;
1398 ect->dl = 0; 1586 ect->dl = 0;
@@ -1405,20 +1593,20 @@ dasd_eckd_format_device(struct dasd_device * device,
1405 for (i = 0; i < rpt; i++) { 1593 for (i = 0; i < rpt; i++) {
1406 ect = (struct eckd_count *) data; 1594 ect = (struct eckd_count *) data;
1407 data += sizeof(struct eckd_count); 1595 data += sizeof(struct eckd_count);
1408 ect->cyl = cyl; 1596 ect->cyl = address.cyl;
1409 ect->head = head; 1597 ect->head = address.head;
1410 ect->record = i + 1; 1598 ect->record = i + 1;
1411 ect->kl = 0; 1599 ect->kl = 0;
1412 ect->dl = fdata->blksize; 1600 ect->dl = fdata->blksize;
1413 /* Check for special tracks 0-1 when formatting CDL */ 1601 /* Check for special tracks 0-1 when formatting CDL */
1414 if ((fdata->intensity & 0x08) && 1602 if ((intensity & 0x08) &&
1415 fdata->start_unit == 0) { 1603 fdata->start_unit == 0) {
1416 if (i < 3) { 1604 if (i < 3) {
1417 ect->kl = 4; 1605 ect->kl = 4;
1418 ect->dl = sizes_trk0[i] - 4; 1606 ect->dl = sizes_trk0[i] - 4;
1419 } 1607 }
1420 } 1608 }
1421 if ((fdata->intensity & 0x08) && 1609 if ((intensity & 0x08) &&
1422 fdata->start_unit == 1) { 1610 fdata->start_unit == 1) {
1423 ect->kl = 44; 1611 ect->kl = 44;
1424 ect->dl = LABEL_SIZE - 44; 1612 ect->dl = LABEL_SIZE - 44;
@@ -1479,57 +1667,69 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1479 struct irb *irb) 1667 struct irb *irb)
1480{ 1668{
1481 char mask; 1669 char mask;
1670 char *sense = NULL;
1482 1671
1483 /* first of all check for state change pending interrupt */ 1672 /* first of all check for state change pending interrupt */
1484 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1673 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1485 if ((irb->scsw.cmd.dstat & mask) == mask) { 1674 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
1486 dasd_generic_handle_state_change(device); 1675 dasd_generic_handle_state_change(device);
1487 return; 1676 return;
1488 } 1677 }
1489 1678
1490 /* summary unit check */ 1679 /* summary unit check */
1491 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 1680 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1492 (irb->ecw[7] == 0x0D)) { 1681 (irb->ecw[7] == 0x0D)) {
1493 dasd_alias_handle_summary_unit_check(device, irb); 1682 dasd_alias_handle_summary_unit_check(device, irb);
1494 return; 1683 return;
1495 } 1684 }
1496 1685
1497 1686 sense = dasd_get_sense(irb);
1498 /* service information message SIM */ 1687 /* service information message SIM */
1499 if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) && 1688 if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
1500 ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 1689 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1501 dasd_3990_erp_handle_sim(device, irb->ecw); 1690 dasd_3990_erp_handle_sim(device, sense);
1502 dasd_schedule_device_bh(device); 1691 dasd_schedule_device_bh(device);
1503 return; 1692 return;
1504 } 1693 }
1505 1694
1506 if ((irb->scsw.cmd.cc == 1) && 1695 if ((scsw_cc(&irb->scsw) == 1) &&
1507 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1696 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1508 (irb->scsw.cmd.actl & SCSW_ACTL_START_PEND) && 1697 (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) &&
1509 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND)) { 1698 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) {
1510 /* fake irb do nothing, they are handled elsewhere */ 1699 /* fake irb do nothing, they are handled elsewhere */
1511 dasd_schedule_device_bh(device); 1700 dasd_schedule_device_bh(device);
1512 return; 1701 return;
1513 } 1702 }
1514 1703
1515 if (!(irb->esw.esw0.erw.cons)) { 1704 if (!sense) {
1516 /* just report other unsolicited interrupts */ 1705 /* just report other unsolicited interrupts */
1517 DEV_MESSAGE(KERN_ERR, device, "%s", 1706 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1518 "unsolicited interrupt received"); 1707 "unsolicited interrupt received");
1519 } else { 1708 } else {
1520 DEV_MESSAGE(KERN_ERR, device, "%s", 1709 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1521 "unsolicited interrupt received " 1710 "unsolicited interrupt received "
1522 "(sense available)"); 1711 "(sense available)");
1523 device->discipline->dump_sense(device, NULL, irb); 1712 device->discipline->dump_sense_dbf(device, NULL, irb,
1713 "unsolicited");
1524 } 1714 }
1525 1715
1526 dasd_schedule_device_bh(device); 1716 dasd_schedule_device_bh(device);
1527 return; 1717 return;
1528}; 1718};
1529 1719
1530static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 1720
1721static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1722 struct dasd_device *startdev,
1531 struct dasd_block *block, 1723 struct dasd_block *block,
1532 struct request *req) 1724 struct request *req,
1725 sector_t first_rec,
1726 sector_t last_rec,
1727 sector_t first_trk,
1728 sector_t last_trk,
1729 unsigned int first_offs,
1730 unsigned int last_offs,
1731 unsigned int blk_per_trk,
1732 unsigned int blksize)
1533{ 1733{
1534 struct dasd_eckd_private *private; 1734 struct dasd_eckd_private *private;
1535 unsigned long *idaws; 1735 unsigned long *idaws;
@@ -1539,11 +1739,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
1539 struct req_iterator iter; 1739 struct req_iterator iter;
1540 struct bio_vec *bv; 1740 struct bio_vec *bv;
1541 char *dst; 1741 char *dst;
1542 unsigned int blksize, blk_per_trk, off; 1742 unsigned int off;
1543 int count, cidaw, cplength, datasize; 1743 int count, cidaw, cplength, datasize;
1544 sector_t recid, first_rec, last_rec; 1744 sector_t recid;
1545 sector_t first_trk, last_trk;
1546 unsigned int first_offs, last_offs;
1547 unsigned char cmd, rcmd; 1745 unsigned char cmd, rcmd;
1548 int use_prefix; 1746 int use_prefix;
1549 struct dasd_device *basedev; 1747 struct dasd_device *basedev;
@@ -1556,15 +1754,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
1556 cmd = DASD_ECKD_CCW_WRITE_MT; 1754 cmd = DASD_ECKD_CCW_WRITE_MT;
1557 else 1755 else
1558 return ERR_PTR(-EINVAL); 1756 return ERR_PTR(-EINVAL);
1559 /* Calculate number of blocks/records per track. */ 1757
1560 blksize = block->bp_block;
1561 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1562 /* Calculate record id of first and last block. */
1563 first_rec = first_trk = req->sector >> block->s2b_shift;
1564 first_offs = sector_div(first_trk, blk_per_trk);
1565 last_rec = last_trk =
1566 (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
1567 last_offs = sector_div(last_trk, blk_per_trk);
1568 /* Check struct bio and count the number of blocks for the request. */ 1758 /* Check struct bio and count the number of blocks for the request. */
1569 count = 0; 1759 count = 0;
1570 cidaw = 0; 1760 cidaw = 0;
@@ -1714,6 +1904,497 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
1714 return cqr; 1904 return cqr;
1715} 1905}
1716 1906
1907static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
1908 struct dasd_device *startdev,
1909 struct dasd_block *block,
1910 struct request *req,
1911 sector_t first_rec,
1912 sector_t last_rec,
1913 sector_t first_trk,
1914 sector_t last_trk,
1915 unsigned int first_offs,
1916 unsigned int last_offs,
1917 unsigned int blk_per_trk,
1918 unsigned int blksize)
1919{
1920 struct dasd_eckd_private *private;
1921 unsigned long *idaws;
1922 struct dasd_ccw_req *cqr;
1923 struct ccw1 *ccw;
1924 struct req_iterator iter;
1925 struct bio_vec *bv;
1926 char *dst, *idaw_dst;
1927 unsigned int cidaw, cplength, datasize;
1928 unsigned int tlf;
1929 sector_t recid;
1930 unsigned char cmd;
1931 struct dasd_device *basedev;
1932 unsigned int trkcount, count, count_to_trk_end;
1933 unsigned int idaw_len, seg_len, part_len, len_to_track_end;
1934 unsigned char new_track, end_idaw;
1935 sector_t trkid;
1936 unsigned int recoffs;
1937
1938 basedev = block->base;
1939 private = (struct dasd_eckd_private *) basedev->private;
1940 if (rq_data_dir(req) == READ)
1941 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
1942 else if (rq_data_dir(req) == WRITE)
1943 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
1944 else
1945 return ERR_PTR(-EINVAL);
1946
1947 /* Track based I/O needs IDAWs for each page, and not just for
1948 * 64 bit addresses. We need additional idals for pages
1949 * that get filled from two tracks, so we use the number
1950 * of records as upper limit.
1951 */
1952 cidaw = last_rec - first_rec + 1;
1953 trkcount = last_trk - first_trk + 1;
1954
1955 /* 1x prefix + one read/write ccw per track */
1956 cplength = 1 + trkcount;
1957
1958 /* on 31-bit we need space for two 32 bit addresses per page
1959 * on 64-bit one 64 bit address
1960 */
1961 datasize = sizeof(struct PFX_eckd_data) +
1962 cidaw * sizeof(unsigned long long);
1963
1964 /* Allocate the ccw request. */
1965 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1966 cplength, datasize, startdev);
1967 if (IS_ERR(cqr))
1968 return cqr;
1969 ccw = cqr->cpaddr;
1970 /* transfer length factor: how many bytes to read from the last track */
1971 if (first_trk == last_trk)
1972 tlf = last_offs - first_offs + 1;
1973 else
1974 tlf = last_offs + 1;
1975 tlf *= blksize;
1976
1977 if (prefix_LRE(ccw++, cqr->data, first_trk,
1978 last_trk, cmd, basedev, startdev,
1979 1 /* format */, first_offs + 1,
1980 trkcount, blksize,
1981 tlf) == -EAGAIN) {
1982 /* Clock not in sync and XRC is enabled.
1983 * Try again later.
1984 */
1985 dasd_sfree_request(cqr, startdev);
1986 return ERR_PTR(-EAGAIN);
1987 }
1988
1989 /*
1990 * The translation of request into ccw programs must meet the
1991 * following conditions:
1992 * - all idaws but the first and the last must address full pages
1993 * (or 2K blocks on 31-bit)
1994 * - the scope of a ccw and it's idal ends with the track boundaries
1995 */
1996 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
1997 recid = first_rec;
1998 new_track = 1;
1999 end_idaw = 0;
2000 len_to_track_end = 0;
2001 idaw_dst = 0;
2002 idaw_len = 0;
2003 rq_for_each_segment(bv, req, iter) {
2004 dst = page_address(bv->bv_page) + bv->bv_offset;
2005 seg_len = bv->bv_len;
2006 while (seg_len) {
2007 if (new_track) {
2008 trkid = recid;
2009 recoffs = sector_div(trkid, blk_per_trk);
2010 count_to_trk_end = blk_per_trk - recoffs;
2011 count = min((last_rec - recid + 1),
2012 (sector_t)count_to_trk_end);
2013 len_to_track_end = count * blksize;
2014 ccw[-1].flags |= CCW_FLAG_CC;
2015 ccw->cmd_code = cmd;
2016 ccw->count = len_to_track_end;
2017 ccw->cda = (__u32)(addr_t)idaws;
2018 ccw->flags = CCW_FLAG_IDA;
2019 ccw++;
2020 recid += count;
2021 new_track = 0;
2022 }
2023 /* If we start a new idaw, everything is fine and the
2024 * start of the new idaw is the start of this segment.
2025 * If we continue an idaw, we must make sure that the
2026 * current segment begins where the so far accumulated
2027 * idaw ends
2028 */
2029 if (!idaw_dst)
2030 idaw_dst = dst;
2031 if ((idaw_dst + idaw_len) != dst) {
2032 dasd_sfree_request(cqr, startdev);
2033 return ERR_PTR(-ERANGE);
2034 }
2035 part_len = min(seg_len, len_to_track_end);
2036 seg_len -= part_len;
2037 dst += part_len;
2038 idaw_len += part_len;
2039 len_to_track_end -= part_len;
2040 /* collected memory area ends on an IDA_BLOCK border,
2041 * -> create an idaw
2042 * idal_create_words will handle cases where idaw_len
2043 * is larger then IDA_BLOCK_SIZE
2044 */
2045 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
2046 end_idaw = 1;
2047 /* We also need to end the idaw at track end */
2048 if (!len_to_track_end) {
2049 new_track = 1;
2050 end_idaw = 1;
2051 }
2052 if (end_idaw) {
2053 idaws = idal_create_words(idaws, idaw_dst,
2054 idaw_len);
2055 idaw_dst = 0;
2056 idaw_len = 0;
2057 end_idaw = 0;
2058 }
2059 }
2060 }
2061
2062 if (blk_noretry_request(req) ||
2063 block->base->features & DASD_FEATURE_FAILFAST)
2064 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2065 cqr->startdev = startdev;
2066 cqr->memdev = startdev;
2067 cqr->block = block;
2068 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
2069 cqr->lpm = private->path_data.ppm;
2070 cqr->retries = 256;
2071 cqr->buildclk = get_clock();
2072 cqr->status = DASD_CQR_FILLED;
2073 return cqr;
2074}
2075
2076static int prepare_itcw(struct itcw *itcw,
2077 unsigned int trk, unsigned int totrk, int cmd,
2078 struct dasd_device *basedev,
2079 struct dasd_device *startdev,
2080 unsigned int rec_on_trk, int count,
2081 unsigned int blksize,
2082 unsigned int total_data_size,
2083 unsigned int tlf,
2084 unsigned int blk_per_trk)
2085{
2086 struct PFX_eckd_data pfxdata;
2087 struct dasd_eckd_private *basepriv, *startpriv;
2088 struct DE_eckd_data *dedata;
2089 struct LRE_eckd_data *lredata;
2090 struct dcw *dcw;
2091
2092 u32 begcyl, endcyl;
2093 u16 heads, beghead, endhead;
2094 u8 pfx_cmd;
2095
2096 int rc = 0;
2097 int sector = 0;
2098 int dn, d;
2099
2100
2101 /* setup prefix data */
2102 basepriv = (struct dasd_eckd_private *) basedev->private;
2103 startpriv = (struct dasd_eckd_private *) startdev->private;
2104 dedata = &pfxdata.define_extent;
2105 lredata = &pfxdata.locate_record;
2106
2107 memset(&pfxdata, 0, sizeof(pfxdata));
2108 pfxdata.format = 1; /* PFX with LRE */
2109 pfxdata.base_address = basepriv->ned->unit_addr;
2110 pfxdata.base_lss = basepriv->ned->ID;
2111 pfxdata.validity.define_extent = 1;
2112
2113 /* private uid is kept up to date, conf_data may be outdated */
2114 if (startpriv->uid.type != UA_BASE_DEVICE) {
2115 pfxdata.validity.verify_base = 1;
2116 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2117 pfxdata.validity.hyper_pav = 1;
2118 }
2119
2120 switch (cmd) {
2121 case DASD_ECKD_CCW_READ_TRACK_DATA:
2122 dedata->mask.perm = 0x1;
2123 dedata->attributes.operation = basepriv->attrib.operation;
2124 dedata->blk_size = blksize;
2125 dedata->ga_extended |= 0x42;
2126 lredata->operation.orientation = 0x0;
2127 lredata->operation.operation = 0x0C;
2128 lredata->auxiliary.check_bytes = 0x01;
2129 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
2130 break;
2131 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
2132 dedata->mask.perm = 0x02;
2133 dedata->attributes.operation = basepriv->attrib.operation;
2134 dedata->blk_size = blksize;
2135 rc = check_XRC_on_prefix(&pfxdata, basedev);
2136 dedata->ga_extended |= 0x42;
2137 lredata->operation.orientation = 0x0;
2138 lredata->operation.operation = 0x3F;
2139 lredata->extended_operation = 0x23;
2140 lredata->auxiliary.check_bytes = 0x2;
2141 pfx_cmd = DASD_ECKD_CCW_PFX;
2142 break;
2143 default:
2144 DBF_DEV_EVENT(DBF_ERR, basedev,
2145 "prepare itcw, unknown opcode 0x%x", cmd);
2146 BUG();
2147 break;
2148 }
2149 if (rc)
2150 return rc;
2151
2152 dedata->attributes.mode = 0x3; /* ECKD */
2153
2154 heads = basepriv->rdc_data.trk_per_cyl;
2155 begcyl = trk / heads;
2156 beghead = trk % heads;
2157 endcyl = totrk / heads;
2158 endhead = totrk % heads;
2159
2160 /* check for sequential prestage - enhance cylinder range */
2161 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
2162 dedata->attributes.operation == DASD_SEQ_ACCESS) {
2163
2164 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
2165 endcyl += basepriv->attrib.nr_cyl;
2166 else
2167 endcyl = (basepriv->real_cyl - 1);
2168 }
2169
2170 set_ch_t(&dedata->beg_ext, begcyl, beghead);
2171 set_ch_t(&dedata->end_ext, endcyl, endhead);
2172
2173 dedata->ep_format = 0x20; /* records per track is valid */
2174 dedata->ep_rec_per_track = blk_per_trk;
2175
2176 if (rec_on_trk) {
2177 switch (basepriv->rdc_data.dev_type) {
2178 case 0x3390:
2179 dn = ceil_quot(blksize + 6, 232);
2180 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
2181 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
2182 break;
2183 case 0x3380:
2184 d = 7 + ceil_quot(blksize + 12, 32);
2185 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
2186 break;
2187 }
2188 }
2189
2190 lredata->auxiliary.length_valid = 1;
2191 lredata->auxiliary.length_scope = 1;
2192 lredata->auxiliary.imbedded_ccw_valid = 1;
2193 lredata->length = tlf;
2194 lredata->imbedded_ccw = cmd;
2195 lredata->count = count;
2196 lredata->sector = sector;
2197 set_ch_t(&lredata->seek_addr, begcyl, beghead);
2198 lredata->search_arg.cyl = lredata->seek_addr.cyl;
2199 lredata->search_arg.head = lredata->seek_addr.head;
2200 lredata->search_arg.record = rec_on_trk;
2201
2202 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2203 &pfxdata, sizeof(pfxdata), total_data_size);
2204
2205 return rc;
2206}
2207
2208static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2209 struct dasd_device *startdev,
2210 struct dasd_block *block,
2211 struct request *req,
2212 sector_t first_rec,
2213 sector_t last_rec,
2214 sector_t first_trk,
2215 sector_t last_trk,
2216 unsigned int first_offs,
2217 unsigned int last_offs,
2218 unsigned int blk_per_trk,
2219 unsigned int blksize)
2220{
2221 struct dasd_eckd_private *private;
2222 struct dasd_ccw_req *cqr;
2223 struct req_iterator iter;
2224 struct bio_vec *bv;
2225 char *dst;
2226 unsigned int trkcount, ctidaw;
2227 unsigned char cmd;
2228 struct dasd_device *basedev;
2229 unsigned int tlf;
2230 struct itcw *itcw;
2231 struct tidaw *last_tidaw = NULL;
2232 int itcw_op;
2233 size_t itcw_size;
2234
2235 basedev = block->base;
2236 private = (struct dasd_eckd_private *) basedev->private;
2237 if (rq_data_dir(req) == READ) {
2238 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2239 itcw_op = ITCW_OP_READ;
2240 } else if (rq_data_dir(req) == WRITE) {
2241 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2242 itcw_op = ITCW_OP_WRITE;
2243 } else
2244 return ERR_PTR(-EINVAL);
2245
2246 /* trackbased I/O needs address all memory via TIDAWs,
2247 * not just for 64 bit addresses. This allows us to map
2248 * each segment directly to one tidaw.
2249 */
2250 trkcount = last_trk - first_trk + 1;
2251 ctidaw = 0;
2252 rq_for_each_segment(bv, req, iter) {
2253 ++ctidaw;
2254 }
2255
2256 /* Allocate the ccw request. */
2257 itcw_size = itcw_calc_size(0, ctidaw, 0);
2258 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
2259 0, itcw_size, startdev);
2260 if (IS_ERR(cqr))
2261 return cqr;
2262
2263 cqr->cpmode = 1;
2264 cqr->startdev = startdev;
2265 cqr->memdev = startdev;
2266 cqr->block = block;
2267 cqr->expires = 100*HZ;
2268 cqr->buildclk = get_clock();
2269 cqr->status = DASD_CQR_FILLED;
2270 cqr->retries = 10;
2271
2272 /* transfer length factor: how many bytes to read from the last track */
2273 if (first_trk == last_trk)
2274 tlf = last_offs - first_offs + 1;
2275 else
2276 tlf = last_offs + 1;
2277 tlf *= blksize;
2278
2279 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2280 cqr->cpaddr = itcw_get_tcw(itcw);
2281
2282 if (prepare_itcw(itcw, first_trk, last_trk,
2283 cmd, basedev, startdev,
2284 first_offs + 1,
2285 trkcount, blksize,
2286 (last_rec - first_rec + 1) * blksize,
2287 tlf, blk_per_trk) == -EAGAIN) {
2288 /* Clock not in sync and XRC is enabled.
2289 * Try again later.
2290 */
2291 dasd_sfree_request(cqr, startdev);
2292 return ERR_PTR(-EAGAIN);
2293 }
2294
2295 /*
2296 * A tidaw can address 4k of memory, but must not cross page boundaries
2297 * We can let the block layer handle this by setting
2298 * blk_queue_segment_boundary to page boundaries and
2299 * blk_max_segment_size to page size when setting up the request queue.
2300 */
2301 rq_for_each_segment(bv, req, iter) {
2302 dst = page_address(bv->bv_page) + bv->bv_offset;
2303 last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len);
2304 if (IS_ERR(last_tidaw))
2305 return (struct dasd_ccw_req *)last_tidaw;
2306 }
2307
2308 last_tidaw->flags |= 0x80;
2309 itcw_finalize(itcw);
2310
2311 if (blk_noretry_request(req) ||
2312 block->base->features & DASD_FEATURE_FAILFAST)
2313 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2314 cqr->startdev = startdev;
2315 cqr->memdev = startdev;
2316 cqr->block = block;
2317 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
2318 cqr->lpm = private->path_data.ppm;
2319 cqr->retries = 256;
2320 cqr->buildclk = get_clock();
2321 cqr->status = DASD_CQR_FILLED;
2322 return cqr;
2323}
2324
2325static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2326 struct dasd_block *block,
2327 struct request *req)
2328{
2329 int tpm, cmdrtd, cmdwtd;
2330 int use_prefix;
2331
2332 struct dasd_eckd_private *private;
2333 int fcx_in_css, fcx_in_gneq, fcx_in_features;
2334 struct dasd_device *basedev;
2335 sector_t first_rec, last_rec;
2336 sector_t first_trk, last_trk;
2337 unsigned int first_offs, last_offs;
2338 unsigned int blk_per_trk, blksize;
2339 int cdlspecial;
2340 struct dasd_ccw_req *cqr;
2341
2342 basedev = block->base;
2343 private = (struct dasd_eckd_private *) basedev->private;
2344
2345 /* Calculate number of blocks/records per track. */
2346 blksize = block->bp_block;
2347 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2348 /* Calculate record id of first and last block. */
2349 first_rec = first_trk = req->sector >> block->s2b_shift;
2350 first_offs = sector_div(first_trk, blk_per_trk);
2351 last_rec = last_trk =
2352 (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
2353 last_offs = sector_div(last_trk, blk_per_trk);
2354 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2355
2356 /* is transport mode supported ? */
2357 fcx_in_css = css_general_characteristics.fcx;
2358 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
2359 fcx_in_features = private->features.feature[40] & 0x80;
2360 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2361
2362 /* is read track data and write track data in command mode supported? */
2363 cmdrtd = private->features.feature[9] & 0x20;
2364 cmdwtd = private->features.feature[12] & 0x40;
2365 use_prefix = private->features.feature[8] & 0x01;
2366
2367 cqr = NULL;
2368 if (cdlspecial || dasd_page_cache) {
2369 /* do nothing, just fall through to the cmd mode single case */
2370 } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) {
2371 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2372 first_rec, last_rec,
2373 first_trk, last_trk,
2374 first_offs, last_offs,
2375 blk_per_trk, blksize);
2376 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2377 cqr = NULL;
2378 } else if (use_prefix &&
2379 (((rq_data_dir(req) == READ) && cmdrtd) ||
2380 ((rq_data_dir(req) == WRITE) && cmdwtd))) {
2381 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
2382 first_rec, last_rec,
2383 first_trk, last_trk,
2384 first_offs, last_offs,
2385 blk_per_trk, blksize);
2386 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2387 cqr = NULL;
2388 }
2389 if (!cqr)
2390 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
2391 first_rec, last_rec,
2392 first_trk, last_trk,
2393 first_offs, last_offs,
2394 blk_per_trk, blksize);
2395 return cqr;
2396}
2397
1717static int 2398static int
1718dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 2399dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1719{ 2400{
@@ -1767,7 +2448,7 @@ out:
1767} 2448}
1768 2449
1769/* 2450/*
1770 * Modify ccw chain in cqr so it can be started on a base device. 2451 * Modify ccw/tcw in cqr so it can be started on a base device.
1771 * 2452 *
1772 * Note that this is not enough to restart the cqr! 2453 * Note that this is not enough to restart the cqr!
1773 * Either reset cqr->startdev as well (summary unit check handling) 2454 * Either reset cqr->startdev as well (summary unit check handling)
@@ -1777,13 +2458,24 @@ void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
1777{ 2458{
1778 struct ccw1 *ccw; 2459 struct ccw1 *ccw;
1779 struct PFX_eckd_data *pfxdata; 2460 struct PFX_eckd_data *pfxdata;
1780 2461 struct tcw *tcw;
1781 ccw = cqr->cpaddr; 2462 struct tccb *tccb;
1782 pfxdata = cqr->data; 2463 struct dcw *dcw;
1783 2464
1784 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 2465 if (cqr->cpmode == 1) {
2466 tcw = cqr->cpaddr;
2467 tccb = tcw_get_tccb(tcw);
2468 dcw = (struct dcw *)&tccb->tca[0];
2469 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
1785 pfxdata->validity.verify_base = 0; 2470 pfxdata->validity.verify_base = 0;
1786 pfxdata->validity.hyper_pav = 0; 2471 pfxdata->validity.hyper_pav = 0;
2472 } else {
2473 ccw = cqr->cpaddr;
2474 pfxdata = cqr->data;
2475 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
2476 pfxdata->validity.verify_base = 0;
2477 pfxdata->validity.hyper_pav = 0;
2478 }
1787 } 2479 }
1788} 2480}
1789 2481
@@ -1861,6 +2553,7 @@ dasd_eckd_release(struct dasd_device *device)
1861{ 2553{
1862 struct dasd_ccw_req *cqr; 2554 struct dasd_ccw_req *cqr;
1863 int rc; 2555 int rc;
2556 struct ccw1 *ccw;
1864 2557
1865 if (!capable(CAP_SYS_ADMIN)) 2558 if (!capable(CAP_SYS_ADMIN))
1866 return -EACCES; 2559 return -EACCES;
@@ -1868,14 +2561,15 @@ dasd_eckd_release(struct dasd_device *device)
1868 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2561 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1869 1, 32, device); 2562 1, 32, device);
1870 if (IS_ERR(cqr)) { 2563 if (IS_ERR(cqr)) {
1871 DEV_MESSAGE(KERN_WARNING, device, "%s", 2564 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1872 "Could not allocate initialization request"); 2565 "Could not allocate initialization request");
1873 return PTR_ERR(cqr); 2566 return PTR_ERR(cqr);
1874 } 2567 }
1875 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE; 2568 ccw = cqr->cpaddr;
1876 cqr->cpaddr->flags |= CCW_FLAG_SLI; 2569 ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
1877 cqr->cpaddr->count = 32; 2570 ccw->flags |= CCW_FLAG_SLI;
1878 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 2571 ccw->count = 32;
2572 ccw->cda = (__u32)(addr_t) cqr->data;
1879 cqr->startdev = device; 2573 cqr->startdev = device;
1880 cqr->memdev = device; 2574 cqr->memdev = device;
1881 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2575 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -1902,6 +2596,7 @@ dasd_eckd_reserve(struct dasd_device *device)
1902{ 2596{
1903 struct dasd_ccw_req *cqr; 2597 struct dasd_ccw_req *cqr;
1904 int rc; 2598 int rc;
2599 struct ccw1 *ccw;
1905 2600
1906 if (!capable(CAP_SYS_ADMIN)) 2601 if (!capable(CAP_SYS_ADMIN))
1907 return -EACCES; 2602 return -EACCES;
@@ -1909,14 +2604,15 @@ dasd_eckd_reserve(struct dasd_device *device)
1909 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2604 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1910 1, 32, device); 2605 1, 32, device);
1911 if (IS_ERR(cqr)) { 2606 if (IS_ERR(cqr)) {
1912 DEV_MESSAGE(KERN_WARNING, device, "%s", 2607 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1913 "Could not allocate initialization request"); 2608 "Could not allocate initialization request");
1914 return PTR_ERR(cqr); 2609 return PTR_ERR(cqr);
1915 } 2610 }
1916 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE; 2611 ccw = cqr->cpaddr;
1917 cqr->cpaddr->flags |= CCW_FLAG_SLI; 2612 ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
1918 cqr->cpaddr->count = 32; 2613 ccw->flags |= CCW_FLAG_SLI;
1919 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 2614 ccw->count = 32;
2615 ccw->cda = (__u32)(addr_t) cqr->data;
1920 cqr->startdev = device; 2616 cqr->startdev = device;
1921 cqr->memdev = device; 2617 cqr->memdev = device;
1922 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2618 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -1942,6 +2638,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1942{ 2638{
1943 struct dasd_ccw_req *cqr; 2639 struct dasd_ccw_req *cqr;
1944 int rc; 2640 int rc;
2641 struct ccw1 *ccw;
1945 2642
1946 if (!capable(CAP_SYS_ADMIN)) 2643 if (!capable(CAP_SYS_ADMIN))
1947 return -EACCES; 2644 return -EACCES;
@@ -1949,14 +2646,15 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1949 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2646 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1950 1, 32, device); 2647 1, 32, device);
1951 if (IS_ERR(cqr)) { 2648 if (IS_ERR(cqr)) {
1952 DEV_MESSAGE(KERN_WARNING, device, "%s", 2649 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1953 "Could not allocate initialization request"); 2650 "Could not allocate initialization request");
1954 return PTR_ERR(cqr); 2651 return PTR_ERR(cqr);
1955 } 2652 }
1956 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK; 2653 ccw = cqr->cpaddr;
1957 cqr->cpaddr->flags |= CCW_FLAG_SLI; 2654 ccw->cmd_code = DASD_ECKD_CCW_SLCK;
1958 cqr->cpaddr->count = 32; 2655 ccw->flags |= CCW_FLAG_SLI;
1959 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 2656 ccw->count = 32;
2657 ccw->cda = (__u32)(addr_t) cqr->data;
1960 cqr->startdev = device; 2658 cqr->startdev = device;
1961 cqr->memdev = device; 2659 cqr->memdev = device;
1962 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2660 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -1990,7 +2688,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
1990 sizeof(struct dasd_rssd_perf_stats_t)), 2688 sizeof(struct dasd_rssd_perf_stats_t)),
1991 device); 2689 device);
1992 if (IS_ERR(cqr)) { 2690 if (IS_ERR(cqr)) {
1993 DEV_MESSAGE(KERN_WARNING, device, "%s", 2691 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1994 "Could not allocate initialization request"); 2692 "Could not allocate initialization request");
1995 return PTR_ERR(cqr); 2693 return PTR_ERR(cqr);
1996 } 2694 }
@@ -2080,9 +2778,9 @@ dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
2080 return -EFAULT; 2778 return -EFAULT;
2081 private->attrib = attrib; 2779 private->attrib = attrib;
2082 2780
2083 DEV_MESSAGE(KERN_INFO, device, 2781 dev_info(&device->cdev->dev,
2084 "cache operation mode set to %x (%i cylinder prestage)", 2782 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2085 private->attrib.operation, private->attrib.nr_cyl); 2783 private->attrib.operation, private->attrib.nr_cyl);
2086 return 0; 2784 return 0;
2087} 2785}
2088 2786
@@ -2133,7 +2831,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2133 /* setup CCWs for PSF + RSSD */ 2831 /* setup CCWs for PSF + RSSD */
2134 cqr = dasd_smalloc_request("ECKD", 2 , 0, device); 2832 cqr = dasd_smalloc_request("ECKD", 2 , 0, device);
2135 if (IS_ERR(cqr)) { 2833 if (IS_ERR(cqr)) {
2136 DEV_MESSAGE(KERN_WARNING, device, "%s", 2834 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2137 "Could not allocate initialization request"); 2835 "Could not allocate initialization request");
2138 rc = PTR_ERR(cqr); 2836 rc = PTR_ERR(cqr);
2139 goto out_free; 2837 goto out_free;
@@ -2242,11 +2940,54 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
2242 return len; 2940 return len;
2243} 2941}
2244 2942
2943static void
2944dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req,
2945 struct irb *irb, char *reason)
2946{
2947 u64 *sense;
2948 int sl;
2949 struct tsb *tsb;
2950
2951 sense = NULL;
2952 tsb = NULL;
2953 if (req && scsw_is_tm(&req->irb.scsw)) {
2954 if (irb->scsw.tm.tcw)
2955 tsb = tcw_get_tsb(
2956 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
2957 if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
2958 switch (tsb->flags & 0x07) {
2959 case 1: /* tsa_iostat */
2960 sense = (u64 *)tsb->tsa.iostat.sense;
2961 break;
2962 case 2: /* ts_ddpc */
2963 sense = (u64 *)tsb->tsa.ddpc.sense;
2964 break;
2965 case 3: /* tsa_intrg */
2966 break;
2967 }
2968 }
2969 } else {
2970 if (irb->esw.esw0.erw.cons)
2971 sense = (u64 *)irb->ecw;
2972 }
2973 if (sense) {
2974 for (sl = 0; sl < 4; sl++) {
2975 DBF_DEV_EVENT(DBF_EMERG, device,
2976 "%s: %016llx %016llx %016llx %016llx",
2977 reason, sense[0], sense[1], sense[2],
2978 sense[3]);
2979 }
2980 } else {
2981 DBF_DEV_EVENT(DBF_EMERG, device, "%s",
2982 "SORRY - NO VALID SENSE AVAILABLE\n");
2983 }
2984}
2985
2245/* 2986/*
2246 * Print sense data and related channel program. 2987 * Print sense data and related channel program.
2247 * Parts are printed because printk buffer is only 1024 bytes. 2988 * Parts are printed because printk buffer is only 1024 bytes.
2248 */ 2989 */
2249static void dasd_eckd_dump_sense(struct dasd_device *device, 2990static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
2250 struct dasd_ccw_req *req, struct irb *irb) 2991 struct dasd_ccw_req *req, struct irb *irb)
2251{ 2992{
2252 char *page; 2993 char *page;
@@ -2255,8 +2996,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2255 2996
2256 page = (char *) get_zeroed_page(GFP_ATOMIC); 2997 page = (char *) get_zeroed_page(GFP_ATOMIC);
2257 if (page == NULL) { 2998 if (page == NULL) {
2258 DEV_MESSAGE(KERN_ERR, device, " %s", 2999 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2259 "No memory to dump sense data"); 3000 "No memory to dump sense data\n");
2260 return; 3001 return;
2261 } 3002 }
2262 /* dump the sense data */ 3003 /* dump the sense data */
@@ -2265,7 +3006,7 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2265 dev_name(&device->cdev->dev)); 3006 dev_name(&device->cdev->dev));
2266 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3007 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2267 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 3008 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
2268 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); 3009 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw));
2269 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3010 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2270 " device %s: Failing CCW: %p\n", 3011 " device %s: Failing CCW: %p\n",
2271 dev_name(&device->cdev->dev), 3012 dev_name(&device->cdev->dev),
@@ -2341,6 +3082,147 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
2341 free_page((unsigned long) page); 3082 free_page((unsigned long) page);
2342} 3083}
2343 3084
3085
3086/*
3087 * Print sense data from a tcw.
3088 */
3089static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3090 struct dasd_ccw_req *req, struct irb *irb)
3091{
3092 char *page;
3093 int len, sl, sct, residual;
3094
3095 struct tsb *tsb;
3096 u8 *sense;
3097
3098
3099 page = (char *) get_zeroed_page(GFP_ATOMIC);
3100 if (page == NULL) {
3101 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
3102 "No memory to dump sense data");
3103 return;
3104 }
3105 /* dump the sense data */
3106 len = sprintf(page, KERN_ERR PRINTK_HEADER
3107 " I/O status report for device %s:\n",
3108 dev_name(&device->cdev->dev));
3109 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3110 " in req: %p CS: 0x%02X DS: 0x%02X "
3111 "fcxs: 0x%02X schxs: 0x%02X\n", req,
3112 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3113 irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
3114 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3115 " device %s: Failing TCW: %p\n",
3116 dev_name(&device->cdev->dev),
3117 (void *) (addr_t) irb->scsw.tm.tcw);
3118
3119 tsb = NULL;
3120 sense = NULL;
3121 if (irb->scsw.tm.tcw)
3122 tsb = tcw_get_tsb(
3123 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3124
3125 if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
3126 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3127 " tsb->length %d\n", tsb->length);
3128 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3129 " tsb->flags %x\n", tsb->flags);
3130 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3131 " tsb->dcw_offset %d\n", tsb->dcw_offset);
3132 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3133 " tsb->count %d\n", tsb->count);
3134 residual = tsb->count - 28;
3135 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3136 " residual %d\n", residual);
3137
3138 switch (tsb->flags & 0x07) {
3139 case 1: /* tsa_iostat */
3140 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3141 " tsb->tsa.iostat.dev_time %d\n",
3142 tsb->tsa.iostat.dev_time);
3143 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3144 " tsb->tsa.iostat.def_time %d\n",
3145 tsb->tsa.iostat.def_time);
3146 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3147 " tsb->tsa.iostat.queue_time %d\n",
3148 tsb->tsa.iostat.queue_time);
3149 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3150 " tsb->tsa.iostat.dev_busy_time %d\n",
3151 tsb->tsa.iostat.dev_busy_time);
3152 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3153 " tsb->tsa.iostat.dev_act_time %d\n",
3154 tsb->tsa.iostat.dev_act_time);
3155 sense = tsb->tsa.iostat.sense;
3156 break;
3157 case 2: /* ts_ddpc */
3158 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3159 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3160 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3161 " tsb->tsa.ddpc.rcq: ");
3162 for (sl = 0; sl < 16; sl++) {
3163 for (sct = 0; sct < 8; sct++) {
3164 len += sprintf(page + len, " %02x",
3165 tsb->tsa.ddpc.rcq[sl]);
3166 }
3167 len += sprintf(page + len, "\n");
3168 }
3169 sense = tsb->tsa.ddpc.sense;
3170 break;
3171 case 3: /* tsa_intrg */
3172 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3173 " tsb->tsa.intrg.: not supportet yet \n");
3174 break;
3175 }
3176
3177 if (sense) {
3178 for (sl = 0; sl < 4; sl++) {
3179 len += sprintf(page + len,
3180 KERN_ERR PRINTK_HEADER
3181 " Sense(hex) %2d-%2d:",
3182 (8 * sl), ((8 * sl) + 7));
3183 for (sct = 0; sct < 8; sct++) {
3184 len += sprintf(page + len, " %02x",
3185 sense[8 * sl + sct]);
3186 }
3187 len += sprintf(page + len, "\n");
3188 }
3189
3190 if (sense[27] & DASD_SENSE_BIT_0) {
3191 /* 24 Byte Sense Data */
3192 sprintf(page + len, KERN_ERR PRINTK_HEADER
3193 " 24 Byte: %x MSG %x, "
3194 "%s MSGb to SYSOP\n",
3195 sense[7] >> 4, sense[7] & 0x0f,
3196 sense[1] & 0x10 ? "" : "no");
3197 } else {
3198 /* 32 Byte Sense Data */
3199 sprintf(page + len, KERN_ERR PRINTK_HEADER
3200 " 32 Byte: Format: %x "
3201 "Exception class %x\n",
3202 sense[6] & 0x0f, sense[22] >> 4);
3203 }
3204 } else {
3205 sprintf(page + len, KERN_ERR PRINTK_HEADER
3206 " SORRY - NO VALID SENSE AVAILABLE\n");
3207 }
3208 } else {
3209 sprintf(page + len, KERN_ERR PRINTK_HEADER
3210 " SORRY - NO TSB DATA AVAILABLE\n");
3211 }
3212 printk("%s", page);
3213 free_page((unsigned long) page);
3214}
3215
3216static void dasd_eckd_dump_sense(struct dasd_device *device,
3217 struct dasd_ccw_req *req, struct irb *irb)
3218{
3219 if (req && scsw_is_tm(&req->irb.scsw))
3220 dasd_eckd_dump_sense_tcw(device, req, irb);
3221 else
3222 dasd_eckd_dump_sense_ccw(device, req, irb);
3223}
3224
3225
2344/* 3226/*
2345 * max_blocks is dependent on the amount of storage that is available 3227 * max_blocks is dependent on the amount of storage that is available
2346 * in the static io buffer for each device. Currently each device has 3228 * in the static io buffer for each device. Currently each device has
@@ -2375,6 +3257,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
2375 .build_cp = dasd_eckd_build_alias_cp, 3257 .build_cp = dasd_eckd_build_alias_cp,
2376 .free_cp = dasd_eckd_free_alias_cp, 3258 .free_cp = dasd_eckd_free_alias_cp,
2377 .dump_sense = dasd_eckd_dump_sense, 3259 .dump_sense = dasd_eckd_dump_sense,
3260 .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
2378 .fill_info = dasd_eckd_fill_info, 3261 .fill_info = dasd_eckd_fill_info,
2379 .ioctl = dasd_eckd_ioctl, 3262 .ioctl = dasd_eckd_ioctl,
2380}; 3263};
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 2476f87d21d0..ad45bcac3ce4 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -38,8 +38,11 @@
38#define DASD_ECKD_CCW_RELEASE 0x94 38#define DASD_ECKD_CCW_RELEASE 0x94
39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e 39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d 40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
41#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
42#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
41#define DASD_ECKD_CCW_RESERVE 0xB4 43#define DASD_ECKD_CCW_RESERVE 0xB4
42#define DASD_ECKD_CCW_PFX 0xE7 44#define DASD_ECKD_CCW_PFX 0xE7
45#define DASD_ECKD_CCW_PFX_READ 0xEA
43#define DASD_ECKD_CCW_RSCK 0xF9 46#define DASD_ECKD_CCW_RSCK 0xF9
44 47
45/* 48/*
@@ -48,6 +51,11 @@
48#define PSF_ORDER_PRSSD 0x18 51#define PSF_ORDER_PRSSD 0x18
49#define PSF_ORDER_SSC 0x1D 52#define PSF_ORDER_SSC 0x1D
50 53
54/*
55 * Size that is reportet for large volumes in the old 16-bit no_cyl field
56 */
57#define LV_COMPAT_CYL 0xFFFE
58
51/***************************************************************************** 59/*****************************************************************************
52 * SECTION: Type Definitions 60 * SECTION: Type Definitions
53 ****************************************************************************/ 61 ****************************************************************************/
@@ -118,7 +126,9 @@ struct DE_eckd_data {
118 unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */ 126 unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
119 __u8 ep_format; /* Extended Parameter format byte */ 127 __u8 ep_format; /* Extended Parameter format byte */
120 __u8 ep_prio; /* Extended Parameter priority I/O byte */ 128 __u8 ep_prio; /* Extended Parameter priority I/O byte */
121 __u8 ep_reserved[6]; /* Extended Parameter Reserved */ 129 __u8 ep_reserved1; /* Extended Parameter Reserved */
130 __u8 ep_rec_per_track; /* Number of records on a track */
131 __u8 ep_reserved[4]; /* Extended Parameter Reserved */
122} __attribute__ ((packed)); 132} __attribute__ ((packed));
123 133
124struct LO_eckd_data { 134struct LO_eckd_data {
@@ -139,11 +149,37 @@ struct LO_eckd_data {
139 __u16 length; 149 __u16 length;
140} __attribute__ ((packed)); 150} __attribute__ ((packed));
141 151
152struct LRE_eckd_data {
153 struct {
154 unsigned char orientation:2;
155 unsigned char operation:6;
156 } __attribute__ ((packed)) operation;
157 struct {
158 unsigned char length_valid:1;
159 unsigned char length_scope:1;
160 unsigned char imbedded_ccw_valid:1;
161 unsigned char check_bytes:2;
162 unsigned char imbedded_count_valid:1;
163 unsigned char reserved:1;
164 unsigned char read_count_suffix:1;
165 } __attribute__ ((packed)) auxiliary;
166 __u8 imbedded_ccw;
167 __u8 count;
168 struct ch_t seek_addr;
169 struct chr_t search_arg;
170 __u8 sector;
171 __u16 length;
172 __u8 imbedded_count;
173 __u8 extended_operation;
174 __u16 extended_parameter_length;
175 __u8 extended_parameter[0];
176} __attribute__ ((packed));
177
142/* Prefix data for format 0x00 and 0x01 */ 178/* Prefix data for format 0x00 and 0x01 */
143struct PFX_eckd_data { 179struct PFX_eckd_data {
144 unsigned char format; 180 unsigned char format;
145 struct { 181 struct {
146 unsigned char define_extend:1; 182 unsigned char define_extent:1;
147 unsigned char time_stamp:1; 183 unsigned char time_stamp:1;
148 unsigned char verify_base:1; 184 unsigned char verify_base:1;
149 unsigned char hyper_pav:1; 185 unsigned char hyper_pav:1;
@@ -153,9 +189,8 @@ struct PFX_eckd_data {
153 __u8 aux; 189 __u8 aux;
154 __u8 base_lss; 190 __u8 base_lss;
155 __u8 reserved[7]; 191 __u8 reserved[7];
156 struct DE_eckd_data define_extend; 192 struct DE_eckd_data define_extent;
157 struct LO_eckd_data locate_record; 193 struct LRE_eckd_data locate_record;
158 __u8 LO_extended_data[4];
159} __attribute__ ((packed)); 194} __attribute__ ((packed));
160 195
161struct dasd_eckd_characteristics { 196struct dasd_eckd_characteristics {
@@ -228,7 +263,8 @@ struct dasd_eckd_characteristics {
228 __u8 factor7; 263 __u8 factor7;
229 __u8 factor8; 264 __u8 factor8;
230 __u8 reserved2[3]; 265 __u8 reserved2[3];
231 __u8 reserved3[10]; 266 __u8 reserved3[6];
267 __u32 long_no_cyl;
232} __attribute__ ((packed)); 268} __attribute__ ((packed));
233 269
234/* elements of the configuration data */ 270/* elements of the configuration data */
@@ -406,6 +442,7 @@ struct dasd_eckd_private {
406 int uses_cdl; 442 int uses_cdl;
407 struct attrib_data_t attrib; /* e.g. cache operations */ 443 struct attrib_data_t attrib; /* e.g. cache operations */
408 struct dasd_rssd_features features; 444 struct dasd_rssd_features features;
445 u32 real_cyl;
409 446
410 /* alias managemnet */ 447 /* alias managemnet */
411 struct dasd_uid uid; 448 struct dasd_uid uid;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index f8e05ce98621..c24c8c30380d 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -6,6 +6,8 @@
6 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 6 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
7 */ 7 */
8 8
9#define KMSG_COMPONENT "dasd"
10
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/fs.h> 12#include <linux/fs.h>
11#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -297,11 +299,12 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
297 struct dasd_eer_header header; 299 struct dasd_eer_header header;
298 unsigned long flags; 300 unsigned long flags;
299 struct eerbuffer *eerb; 301 struct eerbuffer *eerb;
302 char *sense;
300 303
301 /* go through cqr chain and count the valid sense data sets */ 304 /* go through cqr chain and count the valid sense data sets */
302 data_size = 0; 305 data_size = 0;
303 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) 306 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
304 if (temp_cqr->irb.esw.esw0.erw.cons) 307 if (dasd_get_sense(&temp_cqr->irb))
305 data_size += 32; 308 data_size += 32;
306 309
307 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ 310 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
@@ -316,9 +319,11 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
316 list_for_each_entry(eerb, &bufferlist, list) { 319 list_for_each_entry(eerb, &bufferlist, list) {
317 dasd_eer_start_record(eerb, header.total_size); 320 dasd_eer_start_record(eerb, header.total_size);
318 dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header)); 321 dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
319 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) 322 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
320 if (temp_cqr->irb.esw.esw0.erw.cons) 323 sense = dasd_get_sense(&temp_cqr->irb);
321 dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32); 324 if (sense)
325 dasd_eer_write_buffer(eerb, sense, 32);
326 }
322 dasd_eer_write_buffer(eerb, "EOR", 4); 327 dasd_eer_write_buffer(eerb, "EOR", 4);
323 } 328 }
324 spin_unlock_irqrestore(&bufferlock, flags); 329 spin_unlock_irqrestore(&bufferlock, flags);
@@ -451,6 +456,7 @@ int dasd_eer_enable(struct dasd_device *device)
451{ 456{
452 struct dasd_ccw_req *cqr; 457 struct dasd_ccw_req *cqr;
453 unsigned long flags; 458 unsigned long flags;
459 struct ccw1 *ccw;
454 460
455 if (device->eer_cqr) 461 if (device->eer_cqr)
456 return 0; 462 return 0;
@@ -468,10 +474,11 @@ int dasd_eer_enable(struct dasd_device *device)
468 cqr->expires = 10 * HZ; 474 cqr->expires = 10 * HZ;
469 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 475 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
470 476
471 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; 477 ccw = cqr->cpaddr;
472 cqr->cpaddr->count = SNSS_DATA_SIZE; 478 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
473 cqr->cpaddr->flags = 0; 479 ccw->count = SNSS_DATA_SIZE;
474 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 480 ccw->flags = 0;
481 ccw->cda = (__u32)(addr_t) cqr->data;
475 482
476 cqr->buildclk = get_clock(); 483 cqr->buildclk = get_clock();
477 cqr->status = DASD_CQR_FILLED; 484 cqr->status = DASD_CQR_FILLED;
@@ -534,7 +541,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
534 if (eerb->buffer_page_count < 1 || 541 if (eerb->buffer_page_count < 1 ||
535 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { 542 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
536 kfree(eerb); 543 kfree(eerb);
537 MESSAGE(KERN_WARNING, "can't open device since module " 544 DBF_EVENT(DBF_WARNING, "can't open device since module "
538 "parameter eer_pages is smaller than 1 or" 545 "parameter eer_pages is smaller than 1 or"
539 " bigger than %d", (int)(INT_MAX / PAGE_SIZE)); 546 " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
540 unlock_kernel(); 547 unlock_kernel();
@@ -687,7 +694,7 @@ int __init dasd_eer_init(void)
687 if (rc) { 694 if (rc) {
688 kfree(dasd_eer_dev); 695 kfree(dasd_eer_dev);
689 dasd_eer_dev = NULL; 696 dasd_eer_dev = NULL;
690 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " 697 DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
691 "register misc device"); 698 "register misc device");
692 return rc; 699 return rc;
693 } 700 }
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 8f10000851a3..d970ce2814be 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define KMSG_COMPONENT "dasd"
13
12#include <linux/ctype.h> 14#include <linux/ctype.h>
13#include <linux/init.h> 15#include <linux/init.h>
14 16
@@ -91,14 +93,14 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
91 93
92 /* just retry - there is nothing to save ... I got no sense data.... */ 94 /* just retry - there is nothing to save ... I got no sense data.... */
93 if (cqr->retries > 0) { 95 if (cqr->retries > 0) {
94 DEV_MESSAGE (KERN_DEBUG, device, 96 DBF_DEV_EVENT(DBF_DEBUG, device,
95 "default ERP called (%i retries left)", 97 "default ERP called (%i retries left)",
96 cqr->retries); 98 cqr->retries);
97 cqr->lpm = LPM_ANYPATH; 99 cqr->lpm = LPM_ANYPATH;
98 cqr->status = DASD_CQR_FILLED; 100 cqr->status = DASD_CQR_FILLED;
99 } else { 101 } else {
100 DEV_MESSAGE (KERN_WARNING, device, "%s", 102 dev_err(&device->cdev->dev,
101 "default ERP called (NO retry left)"); 103 "default ERP has run out of retries and failed\n");
102 cqr->status = DASD_CQR_FAILED; 104 cqr->status = DASD_CQR_FAILED;
103 cqr->stopclk = get_clock(); 105 cqr->stopclk = get_clock();
104 } 106 }
@@ -162,8 +164,21 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
162 device->discipline->dump_sense(device, cqr, irb); 164 device->discipline->dump_sense(device, cqr, irb);
163} 165}
164 166
167void
168dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
169{
170 struct dasd_device *device;
171
172 device = cqr->startdev;
173 /* dump sense data to s390 debugfeature*/
174 if (device->discipline && device->discipline->dump_sense_dbf)
175 device->discipline->dump_sense_dbf(device, cqr, irb, "log");
176}
177EXPORT_SYMBOL(dasd_log_sense_dbf);
178
165EXPORT_SYMBOL(dasd_default_erp_action); 179EXPORT_SYMBOL(dasd_default_erp_action);
166EXPORT_SYMBOL(dasd_default_erp_postaction); 180EXPORT_SYMBOL(dasd_default_erp_postaction);
167EXPORT_SYMBOL(dasd_alloc_erp_request); 181EXPORT_SYMBOL(dasd_alloc_erp_request);
168EXPORT_SYMBOL(dasd_free_erp_request); 182EXPORT_SYMBOL(dasd_free_erp_request);
169EXPORT_SYMBOL(dasd_log_sense); 183EXPORT_SYMBOL(dasd_log_sense);
184
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index f1d176021694..a3eb6fd14673 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -6,6 +6,8 @@
6 * 6 *
7 */ 7 */
8 8
9#define KMSG_COMPONENT "dasd"
10
9#include <linux/stddef.h> 11#include <linux/stddef.h>
10#include <linux/kernel.h> 12#include <linux/kernel.h>
11#include <asm/debug.h> 13#include <asm/debug.h>
@@ -128,17 +130,18 @@ dasd_fba_check_characteristics(struct dasd_device *device)
128 private = kzalloc(sizeof(struct dasd_fba_private), 130 private = kzalloc(sizeof(struct dasd_fba_private),
129 GFP_KERNEL | GFP_DMA); 131 GFP_KERNEL | GFP_DMA);
130 if (private == NULL) { 132 if (private == NULL) {
131 DEV_MESSAGE(KERN_WARNING, device, "%s", 133 dev_warn(&device->cdev->dev,
132 "memory allocation failed for private " 134 "Allocating memory for private DASD "
133 "data"); 135 "data failed\n");
134 return -ENOMEM; 136 return -ENOMEM;
135 } 137 }
136 device->private = (void *) private; 138 device->private = (void *) private;
137 } 139 }
138 block = dasd_alloc_block(); 140 block = dasd_alloc_block();
139 if (IS_ERR(block)) { 141 if (IS_ERR(block)) {
140 DEV_MESSAGE(KERN_WARNING, device, "%s", 142 DBF_EVENT(DBF_WARNING, "could not allocate dasd block "
141 "could not allocate dasd block structure"); 143 "structure for device: %s",
144 dev_name(&device->cdev->dev));
142 device->private = NULL; 145 device->private = NULL;
143 kfree(private); 146 kfree(private);
144 return PTR_ERR(block); 147 return PTR_ERR(block);
@@ -150,9 +153,9 @@ dasd_fba_check_characteristics(struct dasd_device *device)
150 rdc_data = (void *) &(private->rdc_data); 153 rdc_data = (void *) &(private->rdc_data);
151 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); 154 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32);
152 if (rc) { 155 if (rc) {
153 DEV_MESSAGE(KERN_WARNING, device, 156 DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
154 "Read device characteristics returned error %d", 157 "error %d for device: %s",
155 rc); 158 rc, dev_name(&device->cdev->dev));
156 device->block = NULL; 159 device->block = NULL;
157 dasd_free_block(block); 160 dasd_free_block(block);
158 device->private = NULL; 161 device->private = NULL;
@@ -160,15 +163,16 @@ dasd_fba_check_characteristics(struct dasd_device *device)
160 return rc; 163 return rc;
161 } 164 }
162 165
163 DEV_MESSAGE(KERN_INFO, device, 166 dev_info(&device->cdev->dev,
164 "%04X/%02X(CU:%04X/%02X) %dMB at(%d B/blk)", 167 "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
165 cdev->id.dev_type, 168 "and %d B/blk\n",
166 cdev->id.dev_model, 169 cdev->id.dev_type,
167 cdev->id.cu_type, 170 cdev->id.dev_model,
168 cdev->id.cu_model, 171 cdev->id.cu_type,
169 ((private->rdc_data.blk_bdsa * 172 cdev->id.cu_model,
170 (private->rdc_data.blk_size >> 9)) >> 11), 173 ((private->rdc_data.blk_bdsa *
171 private->rdc_data.blk_size); 174 (private->rdc_data.blk_size >> 9)) >> 11),
175 private->rdc_data.blk_size);
172 return 0; 176 return 0;
173} 177}
174 178
@@ -180,7 +184,7 @@ static int dasd_fba_do_analysis(struct dasd_block *block)
180 private = (struct dasd_fba_private *) block->base->private; 184 private = (struct dasd_fba_private *) block->base->private;
181 rc = dasd_check_blocksize(private->rdc_data.blk_size); 185 rc = dasd_check_blocksize(private->rdc_data.blk_size);
182 if (rc) { 186 if (rc) {
183 DEV_MESSAGE(KERN_INFO, block->base, "unknown blocksize %d", 187 DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
184 private->rdc_data.blk_size); 188 private->rdc_data.blk_size);
185 return rc; 189 return rc;
186 } 190 }
@@ -215,7 +219,7 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
215 if (cqr->function == dasd_default_erp_action) 219 if (cqr->function == dasd_default_erp_action)
216 return dasd_default_erp_postaction; 220 return dasd_default_erp_postaction;
217 221
218 DEV_MESSAGE(KERN_WARNING, cqr->startdev, "unknown ERP action %p", 222 DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
219 cqr->function); 223 cqr->function);
220 return NULL; 224 return NULL;
221} 225}
@@ -233,9 +237,9 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
233 } 237 }
234 238
235 /* check for unsolicited interrupts */ 239 /* check for unsolicited interrupts */
236 DEV_MESSAGE(KERN_DEBUG, device, "%s", 240 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
237 "unsolicited interrupt received"); 241 "unsolicited interrupt received");
238 device->discipline->dump_sense(device, NULL, irb); 242 device->discipline->dump_sense_dbf(device, NULL, irb, "unsolicited");
239 dasd_schedule_device_bh(device); 243 dasd_schedule_device_bh(device);
240 return; 244 return;
241}; 245};
@@ -437,6 +441,25 @@ dasd_fba_fill_info(struct dasd_device * device,
437} 441}
438 442
439static void 443static void
444dasd_fba_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req,
445 struct irb *irb, char *reason)
446{
447 int sl;
448 if (irb->esw.esw0.erw.cons) {
449 for (sl = 0; sl < 4; sl++) {
450 DBF_DEV_EVENT(DBF_EMERG, device,
451 "%s: %08x %08x %08x %08x",
452 reason, irb->ecw[8 * 0], irb->ecw[8 * 1],
453 irb->ecw[8 * 2], irb->ecw[8 * 3]);
454 }
455 } else {
456 DBF_DEV_EVENT(DBF_EMERG, device, "%s",
457 "SORRY - NO VALID SENSE AVAILABLE\n");
458 }
459}
460
461
462static void
440dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, 463dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
441 struct irb *irb) 464 struct irb *irb)
442{ 465{
@@ -446,7 +469,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
446 469
447 page = (char *) get_zeroed_page(GFP_ATOMIC); 470 page = (char *) get_zeroed_page(GFP_ATOMIC);
448 if (page == NULL) { 471 if (page == NULL) {
449 DEV_MESSAGE(KERN_ERR, device, " %s", 472 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
450 "No memory to dump sense data"); 473 "No memory to dump sense data");
451 return; 474 return;
452 } 475 }
@@ -476,8 +499,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
476 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 499 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
477 " SORRY - NO VALID SENSE AVAILABLE\n"); 500 " SORRY - NO VALID SENSE AVAILABLE\n");
478 } 501 }
479 MESSAGE_LOG(KERN_ERR, "%s", 502 printk(KERN_ERR "%s", page);
480 page + sizeof(KERN_ERR PRINTK_HEADER));
481 503
482 /* dump the Channel Program */ 504 /* dump the Channel Program */
483 /* print first CCWs (maximum 8) */ 505 /* print first CCWs (maximum 8) */
@@ -498,8 +520,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
498 len += sprintf(page + len, "\n"); 520 len += sprintf(page + len, "\n");
499 act++; 521 act++;
500 } 522 }
501 MESSAGE_LOG(KERN_ERR, "%s", 523 printk(KERN_ERR "%s", page);
502 page + sizeof(KERN_ERR PRINTK_HEADER));
503 524
504 525
505 /* print failing CCW area */ 526 /* print failing CCW area */
@@ -540,8 +561,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
540 act++; 561 act++;
541 } 562 }
542 if (len > 0) 563 if (len > 0)
543 MESSAGE_LOG(KERN_ERR, "%s", 564 printk(KERN_ERR "%s", page);
544 page + sizeof(KERN_ERR PRINTK_HEADER));
545 free_page((unsigned long) page); 565 free_page((unsigned long) page);
546} 566}
547 567
@@ -576,6 +596,7 @@ static struct dasd_discipline dasd_fba_discipline = {
576 .build_cp = dasd_fba_build_cp, 596 .build_cp = dasd_fba_build_cp,
577 .free_cp = dasd_fba_free_cp, 597 .free_cp = dasd_fba_free_cp,
578 .dump_sense = dasd_fba_dump_sense, 598 .dump_sense = dasd_fba_dump_sense,
599 .dump_sense_dbf = dasd_fba_dump_sense_dbf,
579 .fill_info = dasd_fba_fill_info, 600 .fill_info = dasd_fba_fill_info,
580}; 601};
581 602
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index e99d566b69cc..d3198303b93c 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -11,6 +11,8 @@
11 * 11 *
12 */ 12 */
13 13
14#define KMSG_COMPONENT "dasd"
15
14#include <linux/interrupt.h> 16#include <linux/interrupt.h>
15#include <linux/fs.h> 17#include <linux/fs.h>
16#include <linux/blkpg.h> 18#include <linux/blkpg.h>
@@ -163,9 +165,8 @@ int dasd_gendisk_init(void)
163 /* Register to static dasd major 94 */ 165 /* Register to static dasd major 94 */
164 rc = register_blkdev(DASD_MAJOR, "dasd"); 166 rc = register_blkdev(DASD_MAJOR, "dasd");
165 if (rc != 0) { 167 if (rc != 0) {
166 MESSAGE(KERN_WARNING, 168 pr_warning("Registering the device driver with major number "
167 "Couldn't register successfully to " 169 "%d failed\n", DASD_MAJOR);
168 "major no %d", DASD_MAJOR);
169 return rc; 170 return rc;
170 } 171 }
171 return 0; 172 return 0;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 4a39084d9c95..c1e487f774c6 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -112,6 +112,9 @@ do { \
112 d_data); \ 112 d_data); \
113} while(0) 113} while(0)
114 114
115/* limit size for an errorstring */
116#define ERRORLENGTH 30
117
115/* definition of dbf debug levels */ 118/* definition of dbf debug levels */
116#define DBF_EMERG 0 /* system is unusable */ 119#define DBF_EMERG 0 /* system is unusable */
117#define DBF_ALERT 1 /* action must be taken immediately */ 120#define DBF_ALERT 1 /* action must be taken immediately */
@@ -157,7 +160,8 @@ struct dasd_ccw_req {
157 struct dasd_block *block; /* the originating block device */ 160 struct dasd_block *block; /* the originating block device */
158 struct dasd_device *memdev; /* the device used to allocate this */ 161 struct dasd_device *memdev; /* the device used to allocate this */
159 struct dasd_device *startdev; /* device the request is started on */ 162 struct dasd_device *startdev; /* device the request is started on */
160 struct ccw1 *cpaddr; /* address of channel program */ 163 void *cpaddr; /* address of ccw or tcw */
164 unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
161 char status; /* status of this request */ 165 char status; /* status of this request */
162 short retries; /* A retry counter */ 166 short retries; /* A retry counter */
163 unsigned long flags; /* flags of this request */ 167 unsigned long flags; /* flags of this request */
@@ -280,6 +284,8 @@ struct dasd_discipline {
280 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); 284 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
281 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, 285 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
282 struct irb *); 286 struct irb *);
287 void (*dump_sense_dbf) (struct dasd_device *, struct dasd_ccw_req *,
288 struct irb *, char *);
283 289
284 void (*handle_unsolicited_interrupt) (struct dasd_device *, 290 void (*handle_unsolicited_interrupt) (struct dasd_device *,
285 struct irb *); 291 struct irb *);
@@ -378,7 +384,7 @@ struct dasd_block {
378 struct block_device *bdev; 384 struct block_device *bdev;
379 atomic_t open_count; 385 atomic_t open_count;
380 386
381 unsigned long blocks; /* size of volume in blocks */ 387 unsigned long long blocks; /* size of volume in blocks */
382 unsigned int bp_block; /* bytes per block */ 388 unsigned int bp_block; /* bytes per block */
383 unsigned int s2b_shift; /* log2 (bp_block/512) */ 389 unsigned int s2b_shift; /* log2 (bp_block/512) */
384 390
@@ -573,12 +579,14 @@ int dasd_generic_notify(struct ccw_device *, int);
573void dasd_generic_handle_state_change(struct dasd_device *); 579void dasd_generic_handle_state_change(struct dasd_device *);
574 580
575int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int); 581int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int);
582char *dasd_get_sense(struct irb *);
576 583
577/* externals in dasd_devmap.c */ 584/* externals in dasd_devmap.c */
578extern int dasd_max_devindex; 585extern int dasd_max_devindex;
579extern int dasd_probeonly; 586extern int dasd_probeonly;
580extern int dasd_autodetect; 587extern int dasd_autodetect;
581extern int dasd_nopav; 588extern int dasd_nopav;
589extern int dasd_nofcx;
582 590
583int dasd_devmap_init(void); 591int dasd_devmap_init(void);
584void dasd_devmap_exit(void); 592void dasd_devmap_exit(void);
@@ -623,6 +631,7 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
623 struct dasd_device *); 631 struct dasd_device *);
624void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); 632void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
625void dasd_log_sense(struct dasd_ccw_req *, struct irb *); 633void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
634void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
626 635
627/* externals in dasd_3990_erp.c */ 636/* externals in dasd_3990_erp.c */
628struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *); 637struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index b82d816d9ef7..4ce3f72ee1c1 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -9,6 +9,9 @@
9 * 9 *
10 * i/o controls for the dasd driver. 10 * i/o controls for the dasd driver.
11 */ 11 */
12
13#define KMSG_COMPONENT "dasd"
14
12#include <linux/interrupt.h> 15#include <linux/interrupt.h>
13#include <linux/major.h> 16#include <linux/major.h>
14#include <linux/fs.h> 17#include <linux/fs.h>
@@ -94,7 +97,8 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
94 if (!capable (CAP_SYS_ADMIN)) 97 if (!capable (CAP_SYS_ADMIN))
95 return -EACCES; 98 return -EACCES;
96 99
97 DEV_MESSAGE(KERN_DEBUG, base, "%s", "Quiesce IO on device"); 100 dev_info(&base->cdev->dev, "The DASD has been put in the quiesce "
101 "state\n");
98 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 102 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
99 base->stopped |= DASD_STOPPED_QUIESCE; 103 base->stopped |= DASD_STOPPED_QUIESCE;
100 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 104 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -103,7 +107,7 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
103 107
104 108
105/* 109/*
106 * Quiesce device. 110 * Resume device.
107 */ 111 */
108static int dasd_ioctl_resume(struct dasd_block *block) 112static int dasd_ioctl_resume(struct dasd_block *block)
109{ 113{
@@ -114,7 +118,8 @@ static int dasd_ioctl_resume(struct dasd_block *block)
114 if (!capable (CAP_SYS_ADMIN)) 118 if (!capable (CAP_SYS_ADMIN))
115 return -EACCES; 119 return -EACCES;
116 120
117 DEV_MESSAGE(KERN_DEBUG, base, "%s", "resume IO on device"); 121 dev_info(&base->cdev->dev, "I/O operations have been resumed "
122 "on the DASD\n");
118 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 123 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
119 base->stopped &= ~DASD_STOPPED_QUIESCE; 124 base->stopped &= ~DASD_STOPPED_QUIESCE;
120 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 125 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -140,13 +145,13 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
140 return -EPERM; 145 return -EPERM;
141 146
142 if (base->state != DASD_STATE_BASIC) { 147 if (base->state != DASD_STATE_BASIC) {
143 DEV_MESSAGE(KERN_WARNING, base, "%s", 148 dev_warn(&base->cdev->dev,
144 "dasd_format: device is not disabled! "); 149 "The DASD cannot be formatted while it is enabled\n");
145 return -EBUSY; 150 return -EBUSY;
146 } 151 }
147 152
148 DBF_DEV_EVENT(DBF_NOTICE, base, 153 DBF_DEV_EVENT(DBF_NOTICE, base,
149 "formatting units %d to %d (%d B blocks) flags %d", 154 "formatting units %u to %u (%u B blocks) flags %u",
150 fdata->start_unit, 155 fdata->start_unit,
151 fdata->stop_unit, fdata->blksize, fdata->intensity); 156 fdata->stop_unit, fdata->blksize, fdata->intensity);
152 157
@@ -169,10 +174,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
169 dasd_sfree_request(cqr, cqr->memdev); 174 dasd_sfree_request(cqr, cqr->memdev);
170 if (rc) { 175 if (rc) {
171 if (rc != -ERESTARTSYS) 176 if (rc != -ERESTARTSYS)
172 DEV_MESSAGE(KERN_ERR, base, 177 dev_err(&base->cdev->dev,
173 " Formatting of unit %d failed " 178 "Formatting unit %d failed with "
174 "with rc = %d", 179 "rc=%d\n", fdata->start_unit, rc);
175 fdata->start_unit, rc);
176 return rc; 180 return rc;
177 } 181 }
178 fdata->start_unit++; 182 fdata->start_unit++;
@@ -199,8 +203,9 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
199 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 203 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
200 return -EFAULT; 204 return -EFAULT;
201 if (bdev != bdev->bd_contains) { 205 if (bdev != bdev->bd_contains) {
202 DEV_MESSAGE(KERN_WARNING, block->base, "%s", 206 dev_warn(&block->base->cdev->dev,
203 "Cannot low-level format a partition"); 207 "The specified DASD is a partition and cannot be "
208 "formatted\n");
204 return -EINVAL; 209 return -EINVAL;
205 } 210 }
206 return dasd_format(block, &fdata); 211 return dasd_format(block, &fdata);
@@ -365,9 +370,9 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
365 return ret; 370 return ret;
366} 371}
367 372
368int 373static int
369dasd_ioctl(struct block_device *bdev, fmode_t mode, 374dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
370 unsigned int cmd, unsigned long arg) 375 unsigned int cmd, unsigned long arg)
371{ 376{
372 struct dasd_block *block = bdev->bd_disk->private_data; 377 struct dasd_block *block = bdev->bd_disk->private_data;
373 void __user *argp = (void __user *)arg; 378 void __user *argp = (void __user *)arg;
@@ -420,3 +425,14 @@ dasd_ioctl(struct block_device *bdev, fmode_t mode,
420 return -EINVAL; 425 return -EINVAL;
421 } 426 }
422} 427}
428
429int dasd_ioctl(struct block_device *bdev, fmode_t mode,
430 unsigned int cmd, unsigned long arg)
431{
432 int rc;
433
434 lock_kernel();
435 rc = dasd_do_ioctl(bdev, mode, cmd, arg);
436 unlock_kernel();
437 return rc;
438}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index bf6fd348f20e..2080ba6a69b0 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -11,6 +11,8 @@
11 * 11 *
12 */ 12 */
13 13
14#define KMSG_COMPONENT "dasd"
15
14#include <linux/ctype.h> 16#include <linux/ctype.h>
15#include <linux/seq_file.h> 17#include <linux/seq_file.h>
16#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
@@ -112,7 +114,7 @@ dasd_devices_show(struct seq_file *m, void *v)
112 seq_printf(m, "n/f "); 114 seq_printf(m, "n/f ");
113 else 115 else
114 seq_printf(m, 116 seq_printf(m,
115 "at blocksize: %d, %ld blocks, %ld MB", 117 "at blocksize: %d, %lld blocks, %lld MB",
116 block->bp_block, block->blocks, 118 block->bp_block, block->blocks,
117 ((block->bp_block >> 9) * 119 ((block->bp_block >> 9) *
118 block->blocks) >> 11); 120 block->blocks) >> 11);
@@ -267,7 +269,7 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
267 buffer = dasd_get_user_string(user_buf, user_len); 269 buffer = dasd_get_user_string(user_buf, user_len);
268 if (IS_ERR(buffer)) 270 if (IS_ERR(buffer))
269 return PTR_ERR(buffer); 271 return PTR_ERR(buffer);
270 MESSAGE_LOG(KERN_INFO, "/proc/dasd/statictics: '%s'", buffer); 272 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
271 273
272 /* check for valid verbs */ 274 /* check for valid verbs */
273 for (str = buffer; isspace(*str); str++); 275 for (str = buffer; isspace(*str); str++);
@@ -277,33 +279,33 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
277 if (strcmp(str, "on") == 0) { 279 if (strcmp(str, "on") == 0) {
278 /* switch on statistics profiling */ 280 /* switch on statistics profiling */
279 dasd_profile_level = DASD_PROFILE_ON; 281 dasd_profile_level = DASD_PROFILE_ON;
280 MESSAGE(KERN_INFO, "%s", "Statistics switched on"); 282 pr_info("The statistics feature has been switched "
283 "on\n");
281 } else if (strcmp(str, "off") == 0) { 284 } else if (strcmp(str, "off") == 0) {
282 /* switch off and reset statistics profiling */ 285 /* switch off and reset statistics profiling */
283 memset(&dasd_global_profile, 286 memset(&dasd_global_profile,
284 0, sizeof (struct dasd_profile_info_t)); 287 0, sizeof (struct dasd_profile_info_t));
285 dasd_profile_level = DASD_PROFILE_OFF; 288 dasd_profile_level = DASD_PROFILE_OFF;
286 MESSAGE(KERN_INFO, "%s", "Statistics switched off"); 289 pr_info("The statistics feature has been switched "
290 "off\n");
287 } else 291 } else
288 goto out_error; 292 goto out_error;
289 } else if (strncmp(str, "reset", 5) == 0) { 293 } else if (strncmp(str, "reset", 5) == 0) {
290 /* reset the statistics */ 294 /* reset the statistics */
291 memset(&dasd_global_profile, 0, 295 memset(&dasd_global_profile, 0,
292 sizeof (struct dasd_profile_info_t)); 296 sizeof (struct dasd_profile_info_t));
293 MESSAGE(KERN_INFO, "%s", "Statistics reset"); 297 pr_info("The statistics have been reset\n");
294 } else 298 } else
295 goto out_error; 299 goto out_error;
296 kfree(buffer); 300 kfree(buffer);
297 return user_len; 301 return user_len;
298out_error: 302out_error:
299 MESSAGE(KERN_WARNING, "%s", 303 pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
300 "/proc/dasd/statistics: only 'set on', 'set off' " 304 str);
301 "and 'reset' are supported verbs");
302 kfree(buffer); 305 kfree(buffer);
303 return -EINVAL; 306 return -EINVAL;
304#else 307#else
305 MESSAGE(KERN_WARNING, "%s", 308 pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
306 "/proc/dasd/statistics: is not activated in this kernel");
307 return user_len; 309 return user_len;
308#endif /* CONFIG_DASD_PROFILE */ 310#endif /* CONFIG_DASD_PROFILE */
309} 311}
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index d0d565a05dfe..c07809c8016a 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -324,8 +324,6 @@ static inline void tape_proc_cleanup (void) {;}
324#endif 324#endif
325 325
326/* a function for dumping device sense info */ 326/* a function for dumping device sense info */
327extern void tape_dump_sense(struct tape_device *, struct tape_request *,
328 struct irb *);
329extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *, 327extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
330 struct irb *); 328 struct irb *);
331 329
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 22ca34361ed7..807ded5eb049 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -8,6 +8,8 @@
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape"
12
11#include <linux/module.h> 13#include <linux/module.h>
12#include <linux/init.h> 14#include <linux/init.h>
13#include <linux/bio.h> 15#include <linux/bio.h>
@@ -18,8 +20,6 @@
18#include "tape.h" 20#include "tape.h"
19#include "tape_std.h" 21#include "tape_std.h"
20 22
21#define PRINTK_HEADER "TAPE_34XX: "
22
23/* 23/*
24 * Pointer to debug area. 24 * Pointer to debug area.
25 */ 25 */
@@ -203,8 +203,7 @@ tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
203 tape_34xx_schedule_work(device, TO_MSEN); 203 tape_34xx_schedule_work(device, TO_MSEN);
204 } else { 204 } else {
205 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); 205 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
206 PRINT_WARN("Unsolicited IRQ (Device End) caught.\n"); 206 tape_dump_sense_dbf(device, NULL, irb);
207 tape_dump_sense(device, NULL, irb);
208 } 207 }
209 return TAPE_IO_SUCCESS; 208 return TAPE_IO_SUCCESS;
210} 209}
@@ -226,9 +225,7 @@ tape_34xx_erp_read_opposite(struct tape_device *device,
226 tape_std_read_backward(device, request); 225 tape_std_read_backward(device, request);
227 return tape_34xx_erp_retry(request); 226 return tape_34xx_erp_retry(request);
228 } 227 }
229 if (request->op != TO_RBA) 228
230 PRINT_ERR("read_opposite called with state:%s\n",
231 tape_op_verbose[request->op]);
232 /* 229 /*
233 * We tried to read forward and backward, but hat no 230 * We tried to read forward and backward, but hat no
234 * success -> failed. 231 * success -> failed.
@@ -241,13 +238,9 @@ tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
241 struct irb *irb, int no) 238 struct irb *irb, int no)
242{ 239{
243 if (request->op != TO_ASSIGN) { 240 if (request->op != TO_ASSIGN) {
244 PRINT_WARN("An unexpected condition #%d was caught in " 241 dev_err(&device->cdev->dev, "An unexpected condition %d "
245 "tape error recovery.\n", no); 242 "occurred in tape error recovery\n", no);
246 PRINT_WARN("Please report this incident.\n"); 243 tape_dump_sense_dbf(device, request, irb);
247 if (request)
248 PRINT_WARN("Operation of tape:%s\n",
249 tape_op_verbose[request->op]);
250 tape_dump_sense(device, request, irb);
251 } 244 }
252 return tape_34xx_erp_failed(request, -EIO); 245 return tape_34xx_erp_failed(request, -EIO);
253} 246}
@@ -261,9 +254,8 @@ tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
261 struct irb *irb) 254 struct irb *irb)
262{ 255{
263 if (irb->ecw[3] == 0x40) { 256 if (irb->ecw[3] == 0x40) {
264 PRINT_WARN ("Data overrun error between control-unit " 257 dev_warn (&device->cdev->dev, "A data overrun occurred between"
265 "and drive. Use a faster channel connection, " 258 " the control unit and tape unit\n");
266 "if possible! \n");
267 return tape_34xx_erp_failed(request, -EIO); 259 return tape_34xx_erp_failed(request, -EIO);
268 } 260 }
269 return tape_34xx_erp_bug(device, request, irb, -1); 261 return tape_34xx_erp_bug(device, request, irb, -1);
@@ -280,7 +272,8 @@ tape_34xx_erp_sequence(struct tape_device *device,
280 /* 272 /*
281 * cu detected incorrect block-id sequence on tape. 273 * cu detected incorrect block-id sequence on tape.
282 */ 274 */
283 PRINT_WARN("Illegal block-id sequence found!\n"); 275 dev_warn (&device->cdev->dev, "The block ID sequence on the "
276 "tape is incorrect\n");
284 return tape_34xx_erp_failed(request, -EIO); 277 return tape_34xx_erp_failed(request, -EIO);
285 } 278 }
286 /* 279 /*
@@ -393,8 +386,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
393 /* Writing at physical end of volume */ 386 /* Writing at physical end of volume */
394 return tape_34xx_erp_failed(request, -ENOSPC); 387 return tape_34xx_erp_failed(request, -ENOSPC);
395 default: 388 default:
396 PRINT_ERR("Invalid op in %s:%i\n",
397 __func__, __LINE__);
398 return tape_34xx_erp_failed(request, 0); 389 return tape_34xx_erp_failed(request, 0);
399 } 390 }
400 } 391 }
@@ -420,7 +411,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
420 irb, -4); 411 irb, -4);
421 412
422 /* data check is permanent, CU recovery has failed */ 413 /* data check is permanent, CU recovery has failed */
423 PRINT_WARN("Permanent read error\n"); 414 dev_warn (&device->cdev->dev, "A read error occurred "
415 "that cannot be recovered\n");
424 return tape_34xx_erp_failed(request, -EIO); 416 return tape_34xx_erp_failed(request, -EIO);
425 case 0x25: 417 case 0x25:
426 // a write data check occurred 418 // a write data check occurred
@@ -433,22 +425,26 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
433 irb, -5); 425 irb, -5);
434 426
435 // data check is permanent, cu-recovery has failed 427 // data check is permanent, cu-recovery has failed
436 PRINT_WARN("Permanent write error\n"); 428 dev_warn (&device->cdev->dev, "A write error on the "
429 "tape cannot be recovered\n");
437 return tape_34xx_erp_failed(request, -EIO); 430 return tape_34xx_erp_failed(request, -EIO);
438 case 0x26: 431 case 0x26:
439 /* Data Check (read opposite) occurred. */ 432 /* Data Check (read opposite) occurred. */
440 return tape_34xx_erp_read_opposite(device, request); 433 return tape_34xx_erp_read_opposite(device, request);
441 case 0x28: 434 case 0x28:
442 /* ID-Mark at tape start couldn't be written */ 435 /* ID-Mark at tape start couldn't be written */
443 PRINT_WARN("ID-Mark could not be written.\n"); 436 dev_warn (&device->cdev->dev, "Writing the ID-mark "
437 "failed\n");
444 return tape_34xx_erp_failed(request, -EIO); 438 return tape_34xx_erp_failed(request, -EIO);
445 case 0x31: 439 case 0x31:
446 /* Tape void. Tried to read beyond end of device. */ 440 /* Tape void. Tried to read beyond end of device. */
447 PRINT_WARN("Read beyond end of recorded area.\n"); 441 dev_warn (&device->cdev->dev, "Reading the tape beyond"
442 " the end of the recorded area failed\n");
448 return tape_34xx_erp_failed(request, -ENOSPC); 443 return tape_34xx_erp_failed(request, -ENOSPC);
449 case 0x41: 444 case 0x41:
450 /* Record sequence error. */ 445 /* Record sequence error. */
451 PRINT_WARN("Invalid block-id sequence found.\n"); 446 dev_warn (&device->cdev->dev, "The tape contains an "
447 "incorrect block ID sequence\n");
452 return tape_34xx_erp_failed(request, -EIO); 448 return tape_34xx_erp_failed(request, -EIO);
453 default: 449 default:
454 /* all data checks for 3480 should result in one of 450 /* all data checks for 3480 should result in one of
@@ -470,16 +466,12 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
470 switch (sense[3]) { 466 switch (sense[3]) {
471 case 0x00: 467 case 0x00:
472 /* Unit check with erpa code 0. Report and ignore. */ 468 /* Unit check with erpa code 0. Report and ignore. */
473 PRINT_WARN("Non-error sense was found. "
474 "Unit-check will be ignored.\n");
475 return TAPE_IO_SUCCESS; 469 return TAPE_IO_SUCCESS;
476 case 0x21: 470 case 0x21:
477 /* 471 /*
478 * Data streaming not operational. CU will switch to 472 * Data streaming not operational. CU will switch to
479 * interlock mode. Reissue the command. 473 * interlock mode. Reissue the command.
480 */ 474 */
481 PRINT_WARN("Data streaming not operational. "
482 "Switching to interlock-mode.\n");
483 return tape_34xx_erp_retry(request); 475 return tape_34xx_erp_retry(request);
484 case 0x22: 476 case 0x22:
485 /* 477 /*
@@ -487,11 +479,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
487 * error on the lower interface, internal path not usable, 479 * error on the lower interface, internal path not usable,
488 * or error during cartridge load. 480 * or error during cartridge load.
489 */ 481 */
490 PRINT_WARN("A path equipment check occurred. One of the " 482 dev_warn (&device->cdev->dev, "A path equipment check occurred"
491 "following conditions occurred:\n"); 483 " for the tape device\n");
492 PRINT_WARN("drive adapter error, buffer error on the lower "
493 "interface, internal path not usable, error "
494 "during cartridge load.\n");
495 return tape_34xx_erp_failed(request, -EIO); 484 return tape_34xx_erp_failed(request, -EIO);
496 case 0x24: 485 case 0x24:
497 /* 486 /*
@@ -514,7 +503,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
514 * but the hardware isn't capable to do idrc, or a perform 503 * but the hardware isn't capable to do idrc, or a perform
515 * subsystem func is issued and the CU is not on-line. 504 * subsystem func is issued and the CU is not on-line.
516 */ 505 */
517 PRINT_WARN ("Function incompatible. Try to switch off idrc\n");
518 return tape_34xx_erp_failed(request, -EIO); 506 return tape_34xx_erp_failed(request, -EIO);
519 case 0x2a: 507 case 0x2a:
520 /* 508 /*
@@ -552,23 +540,26 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
552 * reading the format id mark or that that format specified 540 * reading the format id mark or that that format specified
553 * is not supported by the drive. 541 * is not supported by the drive.
554 */ 542 */
555 PRINT_WARN("Drive not capable processing the tape format!\n"); 543 dev_warn (&device->cdev->dev, "The tape unit cannot process "
544 "the tape format\n");
556 return tape_34xx_erp_failed(request, -EMEDIUMTYPE); 545 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
557 case 0x30: 546 case 0x30:
558 /* The medium is write protected. */ 547 /* The medium is write protected. */
559 PRINT_WARN("Medium is write protected!\n"); 548 dev_warn (&device->cdev->dev, "The tape medium is write-"
549 "protected\n");
560 return tape_34xx_erp_failed(request, -EACCES); 550 return tape_34xx_erp_failed(request, -EACCES);
561 case 0x32: 551 case 0x32:
562 // Tension loss. We cannot recover this, it's an I/O error. 552 // Tension loss. We cannot recover this, it's an I/O error.
563 PRINT_WARN("The drive lost tape tension.\n"); 553 dev_warn (&device->cdev->dev, "The tape does not have the "
554 "required tape tension\n");
564 return tape_34xx_erp_failed(request, -EIO); 555 return tape_34xx_erp_failed(request, -EIO);
565 case 0x33: 556 case 0x33:
566 /* 557 /*
567 * Load Failure. The cartridge was not inserted correctly or 558 * Load Failure. The cartridge was not inserted correctly or
568 * the tape is not threaded correctly. 559 * the tape is not threaded correctly.
569 */ 560 */
570 PRINT_WARN("Cartridge load failure. Reload the cartridge " 561 dev_warn (&device->cdev->dev, "The tape unit failed to load"
571 "and try again.\n"); 562 " the cartridge\n");
572 tape_34xx_delete_sbid_from(device, 0); 563 tape_34xx_delete_sbid_from(device, 0);
573 return tape_34xx_erp_failed(request, -EIO); 564 return tape_34xx_erp_failed(request, -EIO);
574 case 0x34: 565 case 0x34:
@@ -576,8 +567,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
576 * Unload failure. The drive cannot maintain tape tension 567 * Unload failure. The drive cannot maintain tape tension
577 * and control tape movement during an unload operation. 568 * and control tape movement during an unload operation.
578 */ 569 */
579 PRINT_WARN("Failure during cartridge unload. " 570 dev_warn (&device->cdev->dev, "Automatic unloading of the tape"
580 "Please try manually.\n"); 571 " cartridge failed\n");
581 if (request->op == TO_RUN) 572 if (request->op == TO_RUN)
582 return tape_34xx_erp_failed(request, -EIO); 573 return tape_34xx_erp_failed(request, -EIO);
583 return tape_34xx_erp_bug(device, request, irb, sense[3]); 574 return tape_34xx_erp_bug(device, request, irb, sense[3]);
@@ -589,8 +580,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
589 * - the cartridge loader does not respond correctly 580 * - the cartridge loader does not respond correctly
590 * - a failure occurs during an index, load, or unload cycle 581 * - a failure occurs during an index, load, or unload cycle
591 */ 582 */
592 PRINT_WARN("Equipment check! Please check the drive and " 583 dev_warn (&device->cdev->dev, "An equipment check has occurred"
593 "the cartridge loader.\n"); 584 " on the tape unit\n");
594 return tape_34xx_erp_failed(request, -EIO); 585 return tape_34xx_erp_failed(request, -EIO);
595 case 0x36: 586 case 0x36:
596 if (device->cdev->id.driver_info == tape_3490) 587 if (device->cdev->id.driver_info == tape_3490)
@@ -603,7 +594,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
603 * Tape length error. The tape is shorter than reported in 594 * Tape length error. The tape is shorter than reported in
604 * the beginning-of-tape data. 595 * the beginning-of-tape data.
605 */ 596 */
606 PRINT_WARN("Tape length error.\n"); 597 dev_warn (&device->cdev->dev, "The tape information states an"
598 " incorrect length\n");
607 return tape_34xx_erp_failed(request, -EIO); 599 return tape_34xx_erp_failed(request, -EIO);
608 case 0x38: 600 case 0x38:
609 /* 601 /*
@@ -620,12 +612,12 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
620 return tape_34xx_erp_failed(request, -EIO); 612 return tape_34xx_erp_failed(request, -EIO);
621 case 0x3a: 613 case 0x3a:
622 /* Drive switched to not ready. */ 614 /* Drive switched to not ready. */
623 PRINT_WARN("Drive not ready. Turn the ready/not ready switch " 615 dev_warn (&device->cdev->dev, "The tape unit is not ready\n");
624 "to ready position and try again.\n");
625 return tape_34xx_erp_failed(request, -EIO); 616 return tape_34xx_erp_failed(request, -EIO);
626 case 0x3b: 617 case 0x3b:
627 /* Manual rewind or unload. This causes an I/O error. */ 618 /* Manual rewind or unload. This causes an I/O error. */
628 PRINT_WARN("Medium was rewound or unloaded manually.\n"); 619 dev_warn (&device->cdev->dev, "The tape medium has been "
620 "rewound or unloaded manually\n");
629 tape_34xx_delete_sbid_from(device, 0); 621 tape_34xx_delete_sbid_from(device, 0);
630 return tape_34xx_erp_failed(request, -EIO); 622 return tape_34xx_erp_failed(request, -EIO);
631 case 0x42: 623 case 0x42:
@@ -633,7 +625,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
633 * Degraded mode. A condition that can cause degraded 625 * Degraded mode. A condition that can cause degraded
634 * performance is detected. 626 * performance is detected.
635 */ 627 */
636 PRINT_WARN("Subsystem is running in degraded mode.\n"); 628 dev_warn (&device->cdev->dev, "The tape subsystem is running "
629 "in degraded mode\n");
637 return tape_34xx_erp_retry(request); 630 return tape_34xx_erp_retry(request);
638 case 0x43: 631 case 0x43:
639 /* Drive not ready. */ 632 /* Drive not ready. */
@@ -652,7 +645,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
652 break; 645 break;
653 } 646 }
654 } 647 }
655 PRINT_WARN("The drive is not ready.\n");
656 return tape_34xx_erp_failed(request, -ENOMEDIUM); 648 return tape_34xx_erp_failed(request, -ENOMEDIUM);
657 case 0x44: 649 case 0x44:
658 /* Locate Block unsuccessful. */ 650 /* Locate Block unsuccessful. */
@@ -663,7 +655,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
663 return tape_34xx_erp_failed(request, -EIO); 655 return tape_34xx_erp_failed(request, -EIO);
664 case 0x45: 656 case 0x45:
665 /* The drive is assigned to a different channel path. */ 657 /* The drive is assigned to a different channel path. */
666 PRINT_WARN("The drive is assigned elsewhere.\n"); 658 dev_warn (&device->cdev->dev, "The tape unit is already "
659 "assigned\n");
667 return tape_34xx_erp_failed(request, -EIO); 660 return tape_34xx_erp_failed(request, -EIO);
668 case 0x46: 661 case 0x46:
669 /* 662 /*
@@ -671,11 +664,12 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
671 * the power supply may be switched off or 664 * the power supply may be switched off or
672 * the drive address may not be set correctly. 665 * the drive address may not be set correctly.
673 */ 666 */
674 PRINT_WARN("The drive is not on-line."); 667 dev_warn (&device->cdev->dev, "The tape unit is not online\n");
675 return tape_34xx_erp_failed(request, -EIO); 668 return tape_34xx_erp_failed(request, -EIO);
676 case 0x47: 669 case 0x47:
677 /* Volume fenced. CU reports volume integrity is lost. */ 670 /* Volume fenced. CU reports volume integrity is lost. */
678 PRINT_WARN("Volume fenced. The volume integrity is lost.\n"); 671 dev_warn (&device->cdev->dev, "The control unit has fenced "
672 "access to the tape volume\n");
679 tape_34xx_delete_sbid_from(device, 0); 673 tape_34xx_delete_sbid_from(device, 0);
680 return tape_34xx_erp_failed(request, -EIO); 674 return tape_34xx_erp_failed(request, -EIO);
681 case 0x48: 675 case 0x48:
@@ -683,20 +677,21 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
683 return tape_34xx_erp_retry(request); 677 return tape_34xx_erp_retry(request);
684 case 0x49: 678 case 0x49:
685 /* Bus out check. A parity check error on the bus was found. */ 679 /* Bus out check. A parity check error on the bus was found. */
686 PRINT_WARN("Bus out check. A data transfer over the bus " 680 dev_warn (&device->cdev->dev, "A parity error occurred on the "
687 "has been corrupted.\n"); 681 "tape bus\n");
688 return tape_34xx_erp_failed(request, -EIO); 682 return tape_34xx_erp_failed(request, -EIO);
689 case 0x4a: 683 case 0x4a:
690 /* Control unit erp failed. */ 684 /* Control unit erp failed. */
691 PRINT_WARN("The control unit I/O error recovery failed.\n"); 685 dev_warn (&device->cdev->dev, "I/O error recovery failed on "
686 "the tape control unit\n");
692 return tape_34xx_erp_failed(request, -EIO); 687 return tape_34xx_erp_failed(request, -EIO);
693 case 0x4b: 688 case 0x4b:
694 /* 689 /*
695 * CU and drive incompatible. The drive requests micro-program 690 * CU and drive incompatible. The drive requests micro-program
696 * patches, which are not available on the CU. 691 * patches, which are not available on the CU.
697 */ 692 */
698 PRINT_WARN("The drive needs microprogram patches from the " 693 dev_warn (&device->cdev->dev, "The tape unit requires a "
699 "control unit, which are not available.\n"); 694 "firmware update\n");
700 return tape_34xx_erp_failed(request, -EIO); 695 return tape_34xx_erp_failed(request, -EIO);
701 case 0x4c: 696 case 0x4c:
702 /* 697 /*
@@ -721,8 +716,8 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
721 * the block to be written is larger than allowed for 716 * the block to be written is larger than allowed for
722 * buffered mode. 717 * buffered mode.
723 */ 718 */
724 PRINT_WARN("Maximum block size for buffered " 719 dev_warn (&device->cdev->dev, "The maximum block size"
725 "mode exceeded.\n"); 720 " for buffered mode is exceeded\n");
726 return tape_34xx_erp_failed(request, -ENOBUFS); 721 return tape_34xx_erp_failed(request, -ENOBUFS);
727 } 722 }
728 /* This erpa is reserved for 3480. */ 723 /* This erpa is reserved for 3480. */
@@ -759,22 +754,20 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
759 return tape_34xx_erp_retry(request); 754 return tape_34xx_erp_retry(request);
760 case 0x55: 755 case 0x55:
761 /* Channel interface recovery (permanent). */ 756 /* Channel interface recovery (permanent). */
762 PRINT_WARN("A permanent channel interface error occurred.\n"); 757 dev_warn (&device->cdev->dev, "A channel interface error cannot be"
758 " recovered\n");
763 return tape_34xx_erp_failed(request, -EIO); 759 return tape_34xx_erp_failed(request, -EIO);
764 case 0x56: 760 case 0x56:
765 /* Channel protocol error. */ 761 /* Channel protocol error. */
766 PRINT_WARN("A channel protocol error occurred.\n"); 762 dev_warn (&device->cdev->dev, "A channel protocol error "
763 "occurred\n");
767 return tape_34xx_erp_failed(request, -EIO); 764 return tape_34xx_erp_failed(request, -EIO);
768 case 0x57: 765 case 0x57:
769 if (device->cdev->id.driver_info == tape_3480) { 766 if (device->cdev->id.driver_info == tape_3480) {
770 /* Attention intercept. */ 767 /* Attention intercept. */
771 PRINT_WARN("An attention intercept occurred, "
772 "which will be recovered.\n");
773 return tape_34xx_erp_retry(request); 768 return tape_34xx_erp_retry(request);
774 } else { 769 } else {
775 /* Global status intercept. */ 770 /* Global status intercept. */
776 PRINT_WARN("An global status intercept was received, "
777 "which will be recovered.\n");
778 return tape_34xx_erp_retry(request); 771 return tape_34xx_erp_retry(request);
779 } 772 }
780 case 0x5a: 773 case 0x5a:
@@ -782,42 +775,31 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
782 * Tape length incompatible. The tape inserted is too long, 775 * Tape length incompatible. The tape inserted is too long,
783 * which could cause damage to the tape or the drive. 776 * which could cause damage to the tape or the drive.
784 */ 777 */
785 PRINT_WARN("Tape Length Incompatible\n"); 778 dev_warn (&device->cdev->dev, "The tape unit does not support "
786 PRINT_WARN("Tape length exceeds IBM enhanced capacity " 779 "the tape length\n");
787 "cartdridge length or a medium\n");
788 PRINT_WARN("with EC-CST identification mark has been mounted "
789 "in a device that writes\n");
790 PRINT_WARN("3480 or 3480 XF format.\n");
791 return tape_34xx_erp_failed(request, -EIO); 780 return tape_34xx_erp_failed(request, -EIO);
792 case 0x5b: 781 case 0x5b:
793 /* Format 3480 XF incompatible */ 782 /* Format 3480 XF incompatible */
794 if (sense[1] & SENSE_BEGINNING_OF_TAPE) 783 if (sense[1] & SENSE_BEGINNING_OF_TAPE)
795 /* The tape will get overwritten. */ 784 /* The tape will get overwritten. */
796 return tape_34xx_erp_retry(request); 785 return tape_34xx_erp_retry(request);
797 PRINT_WARN("Format 3480 XF Incompatible\n"); 786 dev_warn (&device->cdev->dev, "The tape unit does not support"
798 PRINT_WARN("Medium has been created in 3480 format. " 787 " format 3480 XF\n");
799 "To change the format writes\n");
800 PRINT_WARN("must be issued at BOT.\n");
801 return tape_34xx_erp_failed(request, -EIO); 788 return tape_34xx_erp_failed(request, -EIO);
802 case 0x5c: 789 case 0x5c:
803 /* Format 3480-2 XF incompatible */ 790 /* Format 3480-2 XF incompatible */
804 PRINT_WARN("Format 3480-2 XF Incompatible\n"); 791 dev_warn (&device->cdev->dev, "The tape unit does not support tape "
805 PRINT_WARN("Device can only read 3480 or 3480 XF format.\n"); 792 "format 3480-2 XF\n");
806 return tape_34xx_erp_failed(request, -EIO); 793 return tape_34xx_erp_failed(request, -EIO);
807 case 0x5d: 794 case 0x5d:
808 /* Tape length violation. */ 795 /* Tape length violation. */
809 PRINT_WARN("Tape Length Violation\n"); 796 dev_warn (&device->cdev->dev, "The tape unit does not support"
810 PRINT_WARN("The mounted tape exceeds IBM Enhanced Capacity " 797 " the current tape length\n");
811 "Cartdridge System Tape length.\n");
812 PRINT_WARN("This may cause damage to the drive or tape when "
813 "processing to the EOV\n");
814 return tape_34xx_erp_failed(request, -EMEDIUMTYPE); 798 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
815 case 0x5e: 799 case 0x5e:
816 /* Compaction algorithm incompatible. */ 800 /* Compaction algorithm incompatible. */
817 PRINT_WARN("Compaction Algorithm Incompatible\n"); 801 dev_warn (&device->cdev->dev, "The tape unit does not support"
818 PRINT_WARN("The volume is recorded using an incompatible " 802 " the compaction algorithm\n");
819 "compaction algorithm,\n");
820 PRINT_WARN("which is not supported by the device.\n");
821 return tape_34xx_erp_failed(request, -EMEDIUMTYPE); 803 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
822 804
823 /* The following erpas should have been covered earlier. */ 805 /* The following erpas should have been covered earlier. */
@@ -848,7 +830,6 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request,
848 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && 830 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
849 (request->op == TO_WRI)) { 831 (request->op == TO_WRI)) {
850 /* Write at end of volume */ 832 /* Write at end of volume */
851 PRINT_INFO("End of volume\n"); /* XXX */
852 return tape_34xx_erp_failed(request, -ENOSPC); 833 return tape_34xx_erp_failed(request, -ENOSPC);
853 } 834 }
854 835
@@ -869,9 +850,7 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request,
869 } 850 }
870 851
871 DBF_EVENT(6, "xunknownirq\n"); 852 DBF_EVENT(6, "xunknownirq\n");
872 PRINT_ERR("Unexpected interrupt.\n"); 853 tape_dump_sense_dbf(device, request, irb);
873 PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
874 tape_dump_sense(device, request, irb);
875 return TAPE_IO_STOP; 854 return TAPE_IO_STOP;
876} 855}
877 856
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 71605a179d65..fc1d91294143 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -8,12 +8,15 @@
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape"
12
11#include <linux/module.h> 13#include <linux/module.h>
12#include <linux/init.h> 14#include <linux/init.h>
13#include <linux/bio.h> 15#include <linux/bio.h>
14#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
15 17
16#define TAPE_DBF_AREA tape_3590_dbf 18#define TAPE_DBF_AREA tape_3590_dbf
19#define BUFSIZE 512 /* size of buffers for dynamic generated messages */
17 20
18#include "tape.h" 21#include "tape.h"
19#include "tape_std.h" 22#include "tape_std.h"
@@ -36,7 +39,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
36 * - Read Alternate: implemented 39 * - Read Alternate: implemented
37 *******************************************************************/ 40 *******************************************************************/
38 41
39#define PRINTK_HEADER "TAPE_3590: " 42#define KMSG_COMPONENT "tape"
40 43
41static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { 44static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
42 [0x00] = "", 45 [0x00] = "",
@@ -661,8 +664,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
661 ccw++; 664 ccw++;
662 dst += TAPEBLOCK_HSEC_SIZE; 665 dst += TAPEBLOCK_HSEC_SIZE;
663 } 666 }
664 if (off > bv->bv_len) 667 BUG_ON(off > bv->bv_len);
665 BUG();
666 } 668 }
667 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 669 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
668 DBF_EVENT(6, "xBREDccwg\n"); 670 DBF_EVENT(6, "xBREDccwg\n");
@@ -726,7 +728,7 @@ static void tape_3590_med_state_set(struct tape_device *device,
726 } 728 }
727 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; 729 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
728 if (sense->flags & MSENSE_CRYPT_MASK) { 730 if (sense->flags & MSENSE_CRYPT_MASK) {
729 PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags); 731 DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags);
730 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK; 732 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
731 } else { 733 } else {
732 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags); 734 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
@@ -847,8 +849,7 @@ tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
847 tape_3590_schedule_work(device, TO_READ_ATTMSG); 849 tape_3590_schedule_work(device, TO_READ_ATTMSG);
848 } else { 850 } else {
849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); 851 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
850 PRINT_WARN("Unsolicited IRQ (Device End) caught.\n"); 852 tape_dump_sense_dbf(device, NULL, irb);
851 tape_dump_sense(device, NULL, irb);
852 } 853 }
853 /* check medium state */ 854 /* check medium state */
854 tape_3590_schedule_work(device, TO_MSEN); 855 tape_3590_schedule_work(device, TO_MSEN);
@@ -876,8 +877,6 @@ tape_3590_erp_basic(struct tape_device *device, struct tape_request *request,
876 case SENSE_BRA_DRE: 877 case SENSE_BRA_DRE:
877 return tape_3590_erp_failed(device, request, irb, rc); 878 return tape_3590_erp_failed(device, request, irb, rc);
878 default: 879 default:
879 PRINT_ERR("Unknown BRA %x - This should not happen!\n",
880 sense->bra);
881 BUG(); 880 BUG();
882 return TAPE_IO_STOP; 881 return TAPE_IO_STOP;
883 } 882 }
@@ -910,7 +909,8 @@ tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
910 * should proceed with the new tape... this 909 * should proceed with the new tape... this
911 * should probably be done in user space! 910 * should probably be done in user space!
912 */ 911 */
913 PRINT_WARN("(%s): Swap Tape Device!\n", dev_name(&device->cdev->dev)); 912 dev_warn (&device->cdev->dev, "The tape medium must be loaded into a "
913 "different tape unit\n");
914 return tape_3590_erp_basic(device, request, irb, -EIO); 914 return tape_3590_erp_basic(device, request, irb, -EIO);
915} 915}
916 916
@@ -985,8 +985,6 @@ tape_3590_erp_read_opposite(struct tape_device *device,
985 return tape_3590_erp_failed(device, request, irb, -EIO); 985 return tape_3590_erp_failed(device, request, irb, -EIO);
986 break; 986 break;
987 default: 987 default:
988 PRINT_WARN("read_opposite_recovery_called_with_op: %s\n",
989 tape_op_verbose[request->op]);
990 return tape_3590_erp_failed(device, request, irb, -EIO); 988 return tape_3590_erp_failed(device, request, irb, -EIO);
991 } 989 }
992} 990}
@@ -998,50 +996,61 @@ static void
998tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb) 996tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
999{ 997{
1000 struct tape_3590_sense *sense; 998 struct tape_3590_sense *sense;
999 char *exception, *service;
1000
1001 exception = kmalloc(BUFSIZE, GFP_ATOMIC);
1002 service = kmalloc(BUFSIZE, GFP_ATOMIC);
1003
1004 if (!exception || !service)
1005 goto out_nomem;
1001 1006
1002 sense = (struct tape_3590_sense *) irb->ecw; 1007 sense = (struct tape_3590_sense *) irb->ecw;
1003 /* Exception Message */ 1008 /* Exception Message */
1004 switch (sense->fmt.f70.emc) { 1009 switch (sense->fmt.f70.emc) {
1005 case 0x02: 1010 case 0x02:
1006 PRINT_WARN("(%s): Data degraded\n", 1011 snprintf(exception, BUFSIZE, "Data degraded");
1007 dev_name(&device->cdev->dev));
1008 break; 1012 break;
1009 case 0x03: 1013 case 0x03:
1010 PRINT_WARN("(%s): Data degraded in partion %i\n", 1014 snprintf(exception, BUFSIZE, "Data degraded in partion %i",
1011 dev_name(&device->cdev->dev), sense->fmt.f70.mp); 1015 sense->fmt.f70.mp);
1012 break; 1016 break;
1013 case 0x04: 1017 case 0x04:
1014 PRINT_WARN("(%s): Medium degraded\n", 1018 snprintf(exception, BUFSIZE, "Medium degraded");
1015 dev_name(&device->cdev->dev));
1016 break; 1019 break;
1017 case 0x05: 1020 case 0x05:
1018 PRINT_WARN("(%s): Medium degraded in partition %i\n", 1021 snprintf(exception, BUFSIZE, "Medium degraded in partition %i",
1019 dev_name(&device->cdev->dev), sense->fmt.f70.mp); 1022 sense->fmt.f70.mp);
1020 break; 1023 break;
1021 case 0x06: 1024 case 0x06:
1022 PRINT_WARN("(%s): Block 0 Error\n", 1025 snprintf(exception, BUFSIZE, "Block 0 Error");
1023 dev_name(&device->cdev->dev));
1024 break; 1026 break;
1025 case 0x07: 1027 case 0x07:
1026 PRINT_WARN("(%s): Medium Exception 0x%02x\n", 1028 snprintf(exception, BUFSIZE, "Medium Exception 0x%02x",
1027 dev_name(&device->cdev->dev), sense->fmt.f70.md); 1029 sense->fmt.f70.md);
1028 break; 1030 break;
1029 default: 1031 default:
1030 PRINT_WARN("(%s): MIM ExMsg: 0x%02x\n", 1032 snprintf(exception, BUFSIZE, "0x%02x",
1031 dev_name(&device->cdev->dev), sense->fmt.f70.emc); 1033 sense->fmt.f70.emc);
1032 break; 1034 break;
1033 } 1035 }
1034 /* Service Message */ 1036 /* Service Message */
1035 switch (sense->fmt.f70.smc) { 1037 switch (sense->fmt.f70.smc) {
1036 case 0x02: 1038 case 0x02:
1037 PRINT_WARN("(%s): Reference Media maintenance procedure %i\n", 1039 snprintf(service, BUFSIZE, "Reference Media maintenance "
1038 dev_name(&device->cdev->dev), sense->fmt.f70.md); 1040 "procedure %i", sense->fmt.f70.md);
1039 break; 1041 break;
1040 default: 1042 default:
1041 PRINT_WARN("(%s): MIM ServiceMsg: 0x%02x\n", 1043 snprintf(service, BUFSIZE, "0x%02x",
1042 dev_name(&device->cdev->dev), sense->fmt.f70.smc); 1044 sense->fmt.f70.smc);
1043 break; 1045 break;
1044 } 1046 }
1047
1048 dev_warn (&device->cdev->dev, "Tape media information: exception %s, "
1049 "service %s\n", exception, service);
1050
1051out_nomem:
1052 kfree(exception);
1053 kfree(service);
1045} 1054}
1046 1055
1047/* 1056/*
@@ -1051,108 +1060,108 @@ static void
1051tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb) 1060tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
1052{ 1061{
1053 struct tape_3590_sense *sense; 1062 struct tape_3590_sense *sense;
1063 char *exception, *service;
1064
1065 exception = kmalloc(BUFSIZE, GFP_ATOMIC);
1066 service = kmalloc(BUFSIZE, GFP_ATOMIC);
1067
1068 if (!exception || !service)
1069 goto out_nomem;
1054 1070
1055 sense = (struct tape_3590_sense *) irb->ecw; 1071 sense = (struct tape_3590_sense *) irb->ecw;
1056 /* Exception Message */ 1072 /* Exception Message */
1057 switch (sense->fmt.f71.emc) { 1073 switch (sense->fmt.f71.emc) {
1058 case 0x01: 1074 case 0x01:
1059 PRINT_WARN("(%s): Effect of failure is unknown\n", 1075 snprintf(exception, BUFSIZE, "Effect of failure is unknown");
1060 dev_name(&device->cdev->dev));
1061 break; 1076 break;
1062 case 0x02: 1077 case 0x02:
1063 PRINT_WARN("(%s): CU Exception - no performance impact\n", 1078 snprintf(exception, BUFSIZE, "CU Exception - no performance "
1064 dev_name(&device->cdev->dev)); 1079 "impact");
1065 break; 1080 break;
1066 case 0x03: 1081 case 0x03:
1067 PRINT_WARN("(%s): CU Exception on channel interface 0x%02x\n", 1082 snprintf(exception, BUFSIZE, "CU Exception on channel "
1068 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]); 1083 "interface 0x%02x", sense->fmt.f71.md[0]);
1069 break; 1084 break;
1070 case 0x04: 1085 case 0x04:
1071 PRINT_WARN("(%s): CU Exception on device path 0x%02x\n", 1086 snprintf(exception, BUFSIZE, "CU Exception on device path "
1072 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]); 1087 "0x%02x", sense->fmt.f71.md[0]);
1073 break; 1088 break;
1074 case 0x05: 1089 case 0x05:
1075 PRINT_WARN("(%s): CU Exception on library path 0x%02x\n", 1090 snprintf(exception, BUFSIZE, "CU Exception on library path "
1076 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]); 1091 "0x%02x", sense->fmt.f71.md[0]);
1077 break; 1092 break;
1078 case 0x06: 1093 case 0x06:
1079 PRINT_WARN("(%s): CU Exception on node 0x%02x\n", 1094 snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x",
1080 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]); 1095 sense->fmt.f71.md[0]);
1081 break; 1096 break;
1082 case 0x07: 1097 case 0x07:
1083 PRINT_WARN("(%s): CU Exception on partition 0x%02x\n", 1098 snprintf(exception, BUFSIZE, "CU Exception on partition "
1084 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]); 1099 "0x%02x", sense->fmt.f71.md[0]);
1085 break; 1100 break;
1086 default: 1101 default:
1087 PRINT_WARN("(%s): SIM ExMsg: 0x%02x\n", 1102 snprintf(exception, BUFSIZE, "0x%02x",
1088 dev_name(&device->cdev->dev), sense->fmt.f71.emc); 1103 sense->fmt.f71.emc);
1089 } 1104 }
1090 /* Service Message */ 1105 /* Service Message */
1091 switch (sense->fmt.f71.smc) { 1106 switch (sense->fmt.f71.smc) {
1092 case 0x01: 1107 case 0x01:
1093 PRINT_WARN("(%s): Repair impact is unknown\n", 1108 snprintf(service, BUFSIZE, "Repair impact is unknown");
1094 dev_name(&device->cdev->dev));
1095 break; 1109 break;
1096 case 0x02: 1110 case 0x02:
1097 PRINT_WARN("(%s): Repair will not impact cu performance\n", 1111 snprintf(service, BUFSIZE, "Repair will not impact cu "
1098 dev_name(&device->cdev->dev)); 1112 "performance");
1099 break; 1113 break;
1100 case 0x03: 1114 case 0x03:
1101 if (sense->fmt.f71.mdf == 0) 1115 if (sense->fmt.f71.mdf == 0)
1102 PRINT_WARN("(%s): Repair will disable node " 1116 snprintf(service, BUFSIZE, "Repair will disable node "
1103 "0x%x on CU\n", 1117 "0x%x on CU", sense->fmt.f71.md[1]);
1104 dev_name(&device->cdev->dev),
1105 sense->fmt.f71.md[1]);
1106 else 1118 else
1107 PRINT_WARN("(%s): Repair will disable nodes " 1119 snprintf(service, BUFSIZE, "Repair will disable "
1108 "(0x%x-0x%x) on CU\n", 1120 "nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1],
1109 dev_name(&device->cdev->dev), 1121 sense->fmt.f71.md[2]);
1110 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1111 break; 1122 break;
1112 case 0x04: 1123 case 0x04:
1113 if (sense->fmt.f71.mdf == 0) 1124 if (sense->fmt.f71.mdf == 0)
1114 PRINT_WARN("(%s): Repair will disable cannel path " 1125 snprintf(service, BUFSIZE, "Repair will disable "
1115 "0x%x on CU\n", 1126 "channel path 0x%x on CU",
1116 dev_name(&device->cdev->dev), 1127 sense->fmt.f71.md[1]);
1117 sense->fmt.f71.md[1]);
1118 else 1128 else
1119 PRINT_WARN("(%s): Repair will disable cannel paths " 1129 snprintf(service, BUFSIZE, "Repair will disable cannel"
1120 "(0x%x-0x%x) on CU\n", 1130 " paths (0x%x-0x%x) on CU",
1121 dev_name(&device->cdev->dev), 1131 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1122 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1123 break; 1132 break;
1124 case 0x05: 1133 case 0x05:
1125 if (sense->fmt.f71.mdf == 0) 1134 if (sense->fmt.f71.mdf == 0)
1126 PRINT_WARN("(%s): Repair will disable device path " 1135 snprintf(service, BUFSIZE, "Repair will disable device"
1127 "0x%x on CU\n", 1136 " path 0x%x on CU", sense->fmt.f71.md[1]);
1128 dev_name(&device->cdev->dev),
1129 sense->fmt.f71.md[1]);
1130 else 1137 else
1131 PRINT_WARN("(%s): Repair will disable device paths " 1138 snprintf(service, BUFSIZE, "Repair will disable device"
1132 "(0x%x-0x%x) on CU\n", 1139 " paths (0x%x-0x%x) on CU",
1133 dev_name(&device->cdev->dev), 1140 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1134 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1135 break; 1141 break;
1136 case 0x06: 1142 case 0x06:
1137 if (sense->fmt.f71.mdf == 0) 1143 if (sense->fmt.f71.mdf == 0)
1138 PRINT_WARN("(%s): Repair will disable library path " 1144 snprintf(service, BUFSIZE, "Repair will disable "
1139 "0x%x on CU\n", 1145 "library path 0x%x on CU",
1140 dev_name(&device->cdev->dev), 1146 sense->fmt.f71.md[1]);
1141 sense->fmt.f71.md[1]);
1142 else 1147 else
1143 PRINT_WARN("(%s): Repair will disable library paths " 1148 snprintf(service, BUFSIZE, "Repair will disable "
1144 "(0x%x-0x%x) on CU\n", 1149 "library paths (0x%x-0x%x) on CU",
1145 dev_name(&device->cdev->dev), 1150 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1146 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1147 break; 1151 break;
1148 case 0x07: 1152 case 0x07:
1149 PRINT_WARN("(%s): Repair will disable access to CU\n", 1153 snprintf(service, BUFSIZE, "Repair will disable access to CU");
1150 dev_name(&device->cdev->dev));
1151 break; 1154 break;
1152 default: 1155 default:
1153 PRINT_WARN("(%s): SIM ServiceMsg: 0x%02x\n", 1156 snprintf(service, BUFSIZE, "0x%02x",
1154 dev_name(&device->cdev->dev), sense->fmt.f71.smc); 1157 sense->fmt.f71.smc);
1155 } 1158 }
1159
1160 dev_warn (&device->cdev->dev, "I/O subsystem information: exception"
1161 " %s, service %s\n", exception, service);
1162out_nomem:
1163 kfree(exception);
1164 kfree(service);
1156} 1165}
1157 1166
1158/* 1167/*
@@ -1162,111 +1171,109 @@ static void
1162tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb) 1171tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
1163{ 1172{
1164 struct tape_3590_sense *sense; 1173 struct tape_3590_sense *sense;
1174 char *exception, *service;
1175
1176 exception = kmalloc(BUFSIZE, GFP_ATOMIC);
1177 service = kmalloc(BUFSIZE, GFP_ATOMIC);
1178
1179 if (!exception || !service)
1180 goto out_nomem;
1165 1181
1166 sense = (struct tape_3590_sense *) irb->ecw; 1182 sense = (struct tape_3590_sense *) irb->ecw;
1167 /* Exception Message */ 1183 /* Exception Message */
1168 switch (sense->fmt.f71.emc) { 1184 switch (sense->fmt.f71.emc) {
1169 case 0x01: 1185 case 0x01:
1170 PRINT_WARN("(%s): Effect of failure is unknown\n", 1186 snprintf(exception, BUFSIZE, "Effect of failure is unknown");
1171 dev_name(&device->cdev->dev));
1172 break; 1187 break;
1173 case 0x02: 1188 case 0x02:
1174 PRINT_WARN("(%s): DV Exception - no performance impact\n", 1189 snprintf(exception, BUFSIZE, "DV Exception - no performance"
1175 dev_name(&device->cdev->dev)); 1190 " impact");
1176 break; 1191 break;
1177 case 0x03: 1192 case 0x03:
1178 PRINT_WARN("(%s): DV Exception on channel interface 0x%02x\n", 1193 snprintf(exception, BUFSIZE, "DV Exception on channel "
1179 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]); 1194 "interface 0x%02x", sense->fmt.f71.md[0]);
1180 break; 1195 break;
1181 case 0x04: 1196 case 0x04:
1182 PRINT_WARN("(%s): DV Exception on loader 0x%02x\n", 1197 snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x",
1183 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]); 1198 sense->fmt.f71.md[0]);
1184 break; 1199 break;
1185 case 0x05: 1200 case 0x05:
1186 PRINT_WARN("(%s): DV Exception on message display 0x%02x\n", 1201 snprintf(exception, BUFSIZE, "DV Exception on message display"
1187 dev_name(&device->cdev->dev), sense->fmt.f71.md[0]); 1202 " 0x%02x", sense->fmt.f71.md[0]);
1188 break; 1203 break;
1189 case 0x06: 1204 case 0x06:
1190 PRINT_WARN("(%s): DV Exception in tape path\n", 1205 snprintf(exception, BUFSIZE, "DV Exception in tape path");
1191 dev_name(&device->cdev->dev));
1192 break; 1206 break;
1193 case 0x07: 1207 case 0x07:
1194 PRINT_WARN("(%s): DV Exception in drive\n", 1208 snprintf(exception, BUFSIZE, "DV Exception in drive");
1195 dev_name(&device->cdev->dev));
1196 break; 1209 break;
1197 default: 1210 default:
1198 PRINT_WARN("(%s): DSIM ExMsg: 0x%02x\n", 1211 snprintf(exception, BUFSIZE, "0x%02x",
1199 dev_name(&device->cdev->dev), sense->fmt.f71.emc); 1212 sense->fmt.f71.emc);
1200 } 1213 }
1201 /* Service Message */ 1214 /* Service Message */
1202 switch (sense->fmt.f71.smc) { 1215 switch (sense->fmt.f71.smc) {
1203 case 0x01: 1216 case 0x01:
1204 PRINT_WARN("(%s): Repair impact is unknown\n", 1217 snprintf(service, BUFSIZE, "Repair impact is unknown");
1205 dev_name(&device->cdev->dev));
1206 break; 1218 break;
1207 case 0x02: 1219 case 0x02:
1208 PRINT_WARN("(%s): Repair will not impact device performance\n", 1220 snprintf(service, BUFSIZE, "Repair will not impact device "
1209 dev_name(&device->cdev->dev)); 1221 "performance");
1210 break; 1222 break;
1211 case 0x03: 1223 case 0x03:
1212 if (sense->fmt.f71.mdf == 0) 1224 if (sense->fmt.f71.mdf == 0)
1213 PRINT_WARN("(%s): Repair will disable channel path " 1225 snprintf(service, BUFSIZE, "Repair will disable "
1214 "0x%x on DV\n", 1226 "channel path 0x%x on DV",
1215 dev_name(&device->cdev->dev), 1227 sense->fmt.f71.md[1]);
1216 sense->fmt.f71.md[1]);
1217 else 1228 else
1218 PRINT_WARN("(%s): Repair will disable channel path " 1229 snprintf(service, BUFSIZE, "Repair will disable "
1219 "(0x%x-0x%x) on DV\n", 1230 "channel path (0x%x-0x%x) on DV",
1220 dev_name(&device->cdev->dev), 1231 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1221 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1222 break; 1232 break;
1223 case 0x04: 1233 case 0x04:
1224 if (sense->fmt.f71.mdf == 0) 1234 if (sense->fmt.f71.mdf == 0)
1225 PRINT_WARN("(%s): Repair will disable interface 0x%x " 1235 snprintf(service, BUFSIZE, "Repair will disable "
1226 "on DV\n", 1236 "interface 0x%x on DV", sense->fmt.f71.md[1]);
1227 dev_name(&device->cdev->dev),
1228 sense->fmt.f71.md[1]);
1229 else 1237 else
1230 PRINT_WARN("(%s): Repair will disable interfaces " 1238 snprintf(service, BUFSIZE, "Repair will disable "
1231 "(0x%x-0x%x) on DV\n", 1239 "interfaces (0x%x-0x%x) on DV",
1232 dev_name(&device->cdev->dev), 1240 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1233 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1234 break; 1241 break;
1235 case 0x05: 1242 case 0x05:
1236 if (sense->fmt.f71.mdf == 0) 1243 if (sense->fmt.f71.mdf == 0)
1237 PRINT_WARN("(%s): Repair will disable loader 0x%x " 1244 snprintf(service, BUFSIZE, "Repair will disable loader"
1238 "on DV\n", 1245 " 0x%x on DV", sense->fmt.f71.md[1]);
1239 dev_name(&device->cdev->dev),
1240 sense->fmt.f71.md[1]);
1241 else 1246 else
1242 PRINT_WARN("(%s): Repair will disable loader " 1247 snprintf(service, BUFSIZE, "Repair will disable loader"
1243 "(0x%x-0x%x) on DV\n", 1248 " (0x%x-0x%x) on DV",
1244 dev_name(&device->cdev->dev), 1249 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1245 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1246 break; 1250 break;
1247 case 0x07: 1251 case 0x07:
1248 PRINT_WARN("(%s): Repair will disable access to DV\n", 1252 snprintf(service, BUFSIZE, "Repair will disable access to DV");
1249 dev_name(&device->cdev->dev));
1250 break; 1253 break;
1251 case 0x08: 1254 case 0x08:
1252 if (sense->fmt.f71.mdf == 0) 1255 if (sense->fmt.f71.mdf == 0)
1253 PRINT_WARN("(%s): Repair will disable message " 1256 snprintf(service, BUFSIZE, "Repair will disable "
1254 "display 0x%x on DV\n", 1257 "message display 0x%x on DV",
1255 dev_name(&device->cdev->dev), 1258 sense->fmt.f71.md[1]);
1256 sense->fmt.f71.md[1]);
1257 else 1259 else
1258 PRINT_WARN("(%s): Repair will disable message " 1260 snprintf(service, BUFSIZE, "Repair will disable "
1259 "displays (0x%x-0x%x) on DV\n", 1261 "message displays (0x%x-0x%x) on DV",
1260 dev_name(&device->cdev->dev), 1262 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1261 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1262 break; 1263 break;
1263 case 0x09: 1264 case 0x09:
1264 PRINT_WARN("(%s): Clean DV\n", dev_name(&device->cdev->dev)); 1265 snprintf(service, BUFSIZE, "Clean DV");
1265 break; 1266 break;
1266 default: 1267 default:
1267 PRINT_WARN("(%s): DSIM ServiceMsg: 0x%02x\n", 1268 snprintf(service, BUFSIZE, "0x%02x",
1268 dev_name(&device->cdev->dev), sense->fmt.f71.smc); 1269 sense->fmt.f71.smc);
1269 } 1270 }
1271
1272 dev_warn (&device->cdev->dev, "Device subsystem information: exception"
1273 " %s, service %s\n", exception, service);
1274out_nomem:
1275 kfree(exception);
1276 kfree(service);
1270} 1277}
1271 1278
1272/* 1279/*
@@ -1282,46 +1289,44 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1282 return; 1289 return;
1283 if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) { 1290 if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
1284 if (tape_3590_msg[sense->mc] != NULL) 1291 if (tape_3590_msg[sense->mc] != NULL)
1285 PRINT_WARN("(%s): %s\n", dev_name(&device->cdev->dev), 1292 dev_warn (&device->cdev->dev, "The tape unit has "
1286 tape_3590_msg[sense->mc]); 1293 "issued sense message %s\n",
1287 else { 1294 tape_3590_msg[sense->mc]);
1288 PRINT_WARN("(%s): Message Code 0x%x\n", 1295 else
1289 dev_name(&device->cdev->dev), sense->mc); 1296 dev_warn (&device->cdev->dev, "The tape unit has "
1290 } 1297 "issued an unknown sense message code 0x%x\n",
1298 sense->mc);
1291 return; 1299 return;
1292 } 1300 }
1293 if (sense->mc == 0xf0) { 1301 if (sense->mc == 0xf0) {
1294 /* Standard Media Information Message */ 1302 /* Standard Media Information Message */
1295 PRINT_WARN("(%s): MIM SEV=%i, MC=%02x, ES=%x/%x, " 1303 dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, "
1296 "RC=%02x-%04x-%02x\n", dev_name(&device->cdev->dev), 1304 "RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc,
1297 sense->fmt.f70.sev, sense->mc, 1305 sense->fmt.f70.emc, sense->fmt.f70.smc,
1298 sense->fmt.f70.emc, sense->fmt.f70.smc, 1306 sense->fmt.f70.refcode, sense->fmt.f70.mid,
1299 sense->fmt.f70.refcode, sense->fmt.f70.mid, 1307 sense->fmt.f70.fid);
1300 sense->fmt.f70.fid);
1301 tape_3590_print_mim_msg_f0(device, irb); 1308 tape_3590_print_mim_msg_f0(device, irb);
1302 return; 1309 return;
1303 } 1310 }
1304 if (sense->mc == 0xf1) { 1311 if (sense->mc == 0xf1) {
1305 /* Standard I/O Subsystem Service Information Message */ 1312 /* Standard I/O Subsystem Service Information Message */
1306 PRINT_WARN("(%s): IOSIM SEV=%i, DEVTYPE=3590/%02x, " 1313 dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x,"
1307 "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", 1314 " MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
1308 dev_name(&device->cdev->dev), sense->fmt.f71.sev, 1315 sense->fmt.f71.sev, device->cdev->id.dev_model,
1309 device->cdev->id.dev_model, 1316 sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
1310 sense->mc, sense->fmt.f71.emc, 1317 sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
1311 sense->fmt.f71.smc, sense->fmt.f71.refcode1, 1318 sense->fmt.f71.refcode3);
1312 sense->fmt.f71.refcode2, sense->fmt.f71.refcode3);
1313 tape_3590_print_io_sim_msg_f1(device, irb); 1319 tape_3590_print_io_sim_msg_f1(device, irb);
1314 return; 1320 return;
1315 } 1321 }
1316 if (sense->mc == 0xf2) { 1322 if (sense->mc == 0xf2) {
1317 /* Standard Device Service Information Message */ 1323 /* Standard Device Service Information Message */
1318 PRINT_WARN("(%s): DEVSIM SEV=%i, DEVTYPE=3590/%02x, " 1324 dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x"
1319 "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", 1325 ", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
1320 dev_name(&device->cdev->dev), sense->fmt.f71.sev, 1326 sense->fmt.f71.sev, device->cdev->id.dev_model,
1321 device->cdev->id.dev_model, 1327 sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
1322 sense->mc, sense->fmt.f71.emc, 1328 sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
1323 sense->fmt.f71.smc, sense->fmt.f71.refcode1, 1329 sense->fmt.f71.refcode3);
1324 sense->fmt.f71.refcode2, sense->fmt.f71.refcode3);
1325 tape_3590_print_dev_sim_msg_f2(device, irb); 1330 tape_3590_print_dev_sim_msg_f2(device, irb);
1326 return; 1331 return;
1327 } 1332 }
@@ -1329,8 +1334,8 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1329 /* Standard Library Service Information Message */ 1334 /* Standard Library Service Information Message */
1330 return; 1335 return;
1331 } 1336 }
1332 PRINT_WARN("(%s): Device Message(%x)\n", 1337 dev_warn (&device->cdev->dev, "The tape unit has issued an unknown "
1333 dev_name(&device->cdev->dev), sense->mc); 1338 "sense message code %x\n", sense->mc);
1334} 1339}
1335 1340
1336static int tape_3590_crypt_error(struct tape_device *device, 1341static int tape_3590_crypt_error(struct tape_device *device,
@@ -1355,9 +1360,8 @@ static int tape_3590_crypt_error(struct tape_device *device,
1355 /* No connection to EKM */ 1360 /* No connection to EKM */
1356 return tape_3590_erp_basic(device, request, irb, -ENOTCONN); 1361 return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
1357 1362
1358 PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id); 1363 dev_err (&device->cdev->dev, "The tape unit failed to obtain the "
1359 PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc, 1364 "encryption key from EKM\n");
1360 drv_rc, ekm_rc1, ekm_rc2);
1361 1365
1362 return tape_3590_erp_basic(device, request, irb, -ENOKEY); 1366 return tape_3590_erp_basic(device, request, irb, -ENOKEY);
1363} 1367}
@@ -1443,8 +1447,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1443 * print additional msg since default msg 1447 * print additional msg since default msg
1444 * "device intervention" is not very meaningfull 1448 * "device intervention" is not very meaningfull
1445 */ 1449 */
1446 PRINT_WARN("(%s): Tape operation when medium not loaded\n",
1447 dev_name(&device->cdev->dev));
1448 tape_med_state_set(device, MS_UNLOADED); 1450 tape_med_state_set(device, MS_UNLOADED);
1449 tape_3590_schedule_work(device, TO_CRYPT_OFF); 1451 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1450 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1452 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
@@ -1490,19 +1492,13 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1490 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1492 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1491 1493
1492 case 0x6020: 1494 case 0x6020:
1493 PRINT_WARN("(%s): Cartridge of wrong type ?\n",
1494 dev_name(&device->cdev->dev));
1495 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); 1495 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
1496 1496
1497 case 0x8011: 1497 case 0x8011:
1498 PRINT_WARN("(%s): Another host has reserved the tape device\n",
1499 dev_name(&device->cdev->dev));
1500 return tape_3590_erp_basic(device, request, irb, -EPERM); 1498 return tape_3590_erp_basic(device, request, irb, -EPERM);
1501 case 0x8013: 1499 case 0x8013:
1502 PRINT_WARN("(%s): Another host has privileged access to the " 1500 dev_warn (&device->cdev->dev, "A different host has privileged"
1503 "tape device\n", dev_name(&device->cdev->dev)); 1501 " access to the tape unit\n");
1504 PRINT_WARN("(%s): To solve the problem unload the current "
1505 "cartridge!\n", dev_name(&device->cdev->dev));
1506 return tape_3590_erp_basic(device, request, irb, -EPERM); 1502 return tape_3590_erp_basic(device, request, irb, -EPERM);
1507 default: 1503 default:
1508 return tape_3590_erp_basic(device, request, irb, -EIO); 1504 return tape_3590_erp_basic(device, request, irb, -EIO);
@@ -1552,9 +1548,7 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1552 } 1548 }
1553 1549
1554 DBF_EVENT(6, "xunknownirq\n"); 1550 DBF_EVENT(6, "xunknownirq\n");
1555 PRINT_ERR("Unexpected interrupt.\n"); 1551 tape_dump_sense_dbf(device, request, irb);
1556 PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
1557 tape_dump_sense(device, request, irb);
1558 return TAPE_IO_STOP; 1552 return TAPE_IO_STOP;
1559} 1553}
1560 1554
@@ -1609,7 +1603,6 @@ tape_3590_setup_device(struct tape_device *device)
1609 if (rc) 1603 if (rc)
1610 goto fail_rdc_data; 1604 goto fail_rdc_data;
1611 if (rdc_data->data[31] == 0x13) { 1605 if (rdc_data->data[31] == 0x13) {
1612 PRINT_INFO("Device has crypto support\n");
1613 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; 1606 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
1614 tape_3592_disable_crypt(device); 1607 tape_3592_disable_crypt(device);
1615 } else { 1608 } else {
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index ae18baf59f06..f32e89e7c4f2 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -10,6 +10,8 @@
10 * Stefan Bader <shbader@de.ibm.com> 10 * Stefan Bader <shbader@de.ibm.com>
11 */ 11 */
12 12
13#define KMSG_COMPONENT "tape"
14
13#include <linux/fs.h> 15#include <linux/fs.h>
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/blkdev.h> 17#include <linux/blkdev.h>
@@ -23,8 +25,6 @@
23 25
24#include "tape.h" 26#include "tape.h"
25 27
26#define PRINTK_HEADER "TAPE_BLOCK: "
27
28#define TAPEBLOCK_MAX_SEC 100 28#define TAPEBLOCK_MAX_SEC 100
29#define TAPEBLOCK_MIN_REQUEUE 3 29#define TAPEBLOCK_MIN_REQUEUE 3
30 30
@@ -279,8 +279,6 @@ tapeblock_cleanup_device(struct tape_device *device)
279 tape_put_device(device); 279 tape_put_device(device);
280 280
281 if (!device->blk_data.disk) { 281 if (!device->blk_data.disk) {
282 PRINT_ERR("(%s): No gendisk to clean up!\n",
283 dev_name(&device->cdev->dev));
284 goto cleanup_queue; 282 goto cleanup_queue;
285 } 283 }
286 284
@@ -314,7 +312,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
314 if (!device->blk_data.medium_changed) 312 if (!device->blk_data.medium_changed)
315 return 0; 313 return 0;
316 314
317 PRINT_INFO("Detecting media size...\n"); 315 dev_info(&device->cdev->dev, "Determining the size of the recorded "
316 "area...\n");
318 rc = tape_mtop(device, MTFSFM, 1); 317 rc = tape_mtop(device, MTFSFM, 1);
319 if (rc) 318 if (rc)
320 return rc; 319 return rc;
@@ -341,7 +340,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
341 device->bof = rc; 340 device->bof = rc;
342 nr_of_blks -= rc; 341 nr_of_blks -= rc;
343 342
344 PRINT_INFO("Found %i blocks on media\n", nr_of_blks); 343 dev_info(&device->cdev->dev, "The size of the recorded area is %i "
344 "blocks\n", nr_of_blks);
345 set_capacity(device->blk_data.disk, 345 set_capacity(device->blk_data.disk,
346 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); 346 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
347 347
@@ -376,8 +376,8 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
376 376
377 if (device->required_tapemarks) { 377 if (device->required_tapemarks) {
378 DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); 378 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
379 PRINT_ERR("TBLOCK: Refusing to open tape with missing" 379 dev_warn(&device->cdev->dev, "Opening the tape failed because"
380 " end of file marks.\n"); 380 " of missing end-of-file marks\n");
381 rc = -EPERM; 381 rc = -EPERM;
382 goto put_device; 382 goto put_device;
383 } 383 }
@@ -452,7 +452,6 @@ tapeblock_ioctl(
452 rc = -EINVAL; 452 rc = -EINVAL;
453 break; 453 break;
454 default: 454 default:
455 PRINT_WARN("invalid ioctl 0x%x\n", command);
456 rc = -EINVAL; 455 rc = -EINVAL;
457 } 456 }
458 457
@@ -474,7 +473,6 @@ tapeblock_init(void)
474 473
475 if (tapeblock_major == 0) 474 if (tapeblock_major == 0)
476 tapeblock_major = rc; 475 tapeblock_major = rc;
477 PRINT_INFO("tape gets major %d for block device\n", tapeblock_major);
478 return 0; 476 return 0;
479} 477}
480 478
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index be0ce2215c8d..31566c55adfe 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -24,8 +24,6 @@
24#include "tape_std.h" 24#include "tape_std.h"
25#include "tape_class.h" 25#include "tape_class.h"
26 26
27#define PRINTK_HEADER "TAPE_CHAR: "
28
29#define TAPECHAR_MAJOR 0 /* get dynamic major */ 27#define TAPECHAR_MAJOR 0 /* get dynamic major */
30 28
31/* 29/*
@@ -102,8 +100,6 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
102 if (block_size > MAX_BLOCKSIZE) { 100 if (block_size > MAX_BLOCKSIZE) {
103 DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", 101 DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
104 block_size, MAX_BLOCKSIZE); 102 block_size, MAX_BLOCKSIZE);
105 PRINT_ERR("Invalid blocksize (%zd> %d)\n",
106 block_size, MAX_BLOCKSIZE);
107 return -EINVAL; 103 return -EINVAL;
108 } 104 }
109 105
@@ -485,7 +481,6 @@ tapechar_init (void)
485 return -1; 481 return -1;
486 482
487 tapechar_major = MAJOR(dev); 483 tapechar_major = MAJOR(dev);
488 PRINT_INFO("tape gets major %d for character devices\n", MAJOR(dev));
489 484
490 return 0; 485 return 0;
491} 486}
@@ -496,7 +491,5 @@ tapechar_init (void)
496void 491void
497tapechar_exit(void) 492tapechar_exit(void)
498{ 493{
499 PRINT_INFO("tape releases major %d for character devices\n",
500 tapechar_major);
501 unregister_chrdev_region(MKDEV(tapechar_major, 0), 256); 494 unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
502} 495}
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f9bb51fa7f5b..08c09d3503cf 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -11,6 +11,7 @@
11 * Stefan Bader <shbader@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com>
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape"
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/init.h> // for kernel parameters 16#include <linux/init.h> // for kernel parameters
16#include <linux/kmod.h> // for requesting modules 17#include <linux/kmod.h> // for requesting modules
@@ -25,7 +26,6 @@
25#include "tape.h" 26#include "tape.h"
26#include "tape_std.h" 27#include "tape_std.h"
27 28
28#define PRINTK_HEADER "TAPE_CORE: "
29#define LONG_BUSY_TIMEOUT 180 /* seconds */ 29#define LONG_BUSY_TIMEOUT 180 /* seconds */
30 30
31static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 31static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
@@ -214,13 +214,13 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
214 switch(newstate){ 214 switch(newstate){
215 case MS_UNLOADED: 215 case MS_UNLOADED:
216 device->tape_generic_status |= GMT_DR_OPEN(~0); 216 device->tape_generic_status |= GMT_DR_OPEN(~0);
217 PRINT_INFO("(%s): Tape is unloaded\n", 217 dev_info(&device->cdev->dev, "The tape cartridge has been "
218 dev_name(&device->cdev->dev)); 218 "successfully unloaded\n");
219 break; 219 break;
220 case MS_LOADED: 220 case MS_LOADED:
221 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 221 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
222 PRINT_INFO("(%s): Tape has been mounted\n", 222 dev_info(&device->cdev->dev, "A tape cartridge has been "
223 dev_name(&device->cdev->dev)); 223 "mounted\n");
224 break; 224 break;
225 default: 225 default:
226 // print nothing 226 // print nothing
@@ -333,7 +333,6 @@ tape_generic_online(struct tape_device *device,
333 /* Let the discipline have a go at the device. */ 333 /* Let the discipline have a go at the device. */
334 device->discipline = discipline; 334 device->discipline = discipline;
335 if (!try_module_get(discipline->owner)) { 335 if (!try_module_get(discipline->owner)) {
336 PRINT_ERR("Cannot get module. Module gone.\n");
337 return -EINVAL; 336 return -EINVAL;
338 } 337 }
339 338
@@ -391,7 +390,6 @@ int
391tape_generic_offline(struct tape_device *device) 390tape_generic_offline(struct tape_device *device)
392{ 391{
393 if (!device) { 392 if (!device) {
394 PRINT_ERR("tape_generic_offline: no such device\n");
395 return -ENODEV; 393 return -ENODEV;
396 } 394 }
397 395
@@ -413,9 +411,6 @@ tape_generic_offline(struct tape_device *device)
413 DBF_EVENT(3, "(%08x): Set offline failed " 411 DBF_EVENT(3, "(%08x): Set offline failed "
414 "- drive in use.\n", 412 "- drive in use.\n",
415 device->cdev_id); 413 device->cdev_id);
416 PRINT_WARN("(%s): Set offline failed "
417 "- drive in use.\n",
418 dev_name(&device->cdev->dev));
419 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 414 spin_unlock_irq(get_ccwdev_lock(device->cdev));
420 return -EBUSY; 415 return -EBUSY;
421 } 416 }
@@ -435,14 +430,11 @@ tape_alloc_device(void)
435 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); 430 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
436 if (device == NULL) { 431 if (device == NULL) {
437 DBF_EXCEPTION(2, "ti:no mem\n"); 432 DBF_EXCEPTION(2, "ti:no mem\n");
438 PRINT_INFO ("can't allocate memory for "
439 "tape info structure\n");
440 return ERR_PTR(-ENOMEM); 433 return ERR_PTR(-ENOMEM);
441 } 434 }
442 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); 435 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
443 if (device->modeset_byte == NULL) { 436 if (device->modeset_byte == NULL) {
444 DBF_EXCEPTION(2, "ti:no mem\n"); 437 DBF_EXCEPTION(2, "ti:no mem\n");
445 PRINT_INFO("can't allocate memory for modeset byte\n");
446 kfree(device); 438 kfree(device);
447 return ERR_PTR(-ENOMEM); 439 return ERR_PTR(-ENOMEM);
448 } 440 }
@@ -490,7 +482,6 @@ tape_put_device(struct tape_device *device)
490 } else { 482 } else {
491 if (remain < 0) { 483 if (remain < 0) {
492 DBF_EVENT(4, "put device without reference\n"); 484 DBF_EVENT(4, "put device without reference\n");
493 PRINT_ERR("put device without reference\n");
494 } else { 485 } else {
495 DBF_EVENT(4, "tape_free_device(%p)\n", device); 486 DBF_EVENT(4, "tape_free_device(%p)\n", device);
496 kfree(device->modeset_byte); 487 kfree(device->modeset_byte);
@@ -538,8 +529,6 @@ tape_generic_probe(struct ccw_device *cdev)
538 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 529 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
539 if (ret) { 530 if (ret) {
540 tape_put_device(device); 531 tape_put_device(device);
541 PRINT_ERR("probe failed for tape device %s\n",
542 dev_name(&cdev->dev));
543 return ret; 532 return ret;
544 } 533 }
545 cdev->dev.driver_data = device; 534 cdev->dev.driver_data = device;
@@ -547,7 +536,6 @@ tape_generic_probe(struct ccw_device *cdev)
547 device->cdev = cdev; 536 device->cdev = cdev;
548 ccw_device_get_id(cdev, &dev_id); 537 ccw_device_get_id(cdev, &dev_id);
549 device->cdev_id = devid_to_int(&dev_id); 538 device->cdev_id = devid_to_int(&dev_id);
550 PRINT_INFO("tape device %s found\n", dev_name(&cdev->dev));
551 return ret; 539 return ret;
552} 540}
553 541
@@ -584,7 +572,6 @@ tape_generic_remove(struct ccw_device *cdev)
584 572
585 device = cdev->dev.driver_data; 573 device = cdev->dev.driver_data;
586 if (!device) { 574 if (!device) {
587 PRINT_ERR("No device pointer in tape_generic_remove!\n");
588 return; 575 return;
589 } 576 }
590 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 577 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
@@ -615,10 +602,8 @@ tape_generic_remove(struct ccw_device *cdev)
615 */ 602 */
616 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 603 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
617 device->cdev_id); 604 device->cdev_id);
618 PRINT_WARN("(%s): Drive in use vanished - " 605 dev_warn(&device->cdev->dev, "A tape unit was detached"
619 "expect trouble!\n", 606 " while in use\n");
620 dev_name(&device->cdev->dev));
621 PRINT_WARN("State was %i\n", device->tape_state);
622 tape_state_set(device, TS_NOT_OPER); 607 tape_state_set(device, TS_NOT_OPER);
623 __tape_discard_requests(device); 608 __tape_discard_requests(device);
624 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 609 spin_unlock_irq(get_ccwdev_lock(device->cdev));
@@ -639,8 +624,7 @@ tape_alloc_request(int cplength, int datasize)
639{ 624{
640 struct tape_request *request; 625 struct tape_request *request;
641 626
642 if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE) 627 BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
643 BUG();
644 628
645 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 629 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
646 630
@@ -797,8 +781,7 @@ static void tape_long_busy_timeout(unsigned long data)
797 device = (struct tape_device *) data; 781 device = (struct tape_device *) data;
798 spin_lock_irq(get_ccwdev_lock(device->cdev)); 782 spin_lock_irq(get_ccwdev_lock(device->cdev));
799 request = list_entry(device->req_queue.next, struct tape_request, list); 783 request = list_entry(device->req_queue.next, struct tape_request, list);
800 if (request->status != TAPE_REQUEST_LONG_BUSY) 784 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
801 BUG();
802 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 785 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
803 __tape_start_next_request(device); 786 __tape_start_next_request(device);
804 device->lb_timeout.data = (unsigned long) tape_put_device(device); 787 device->lb_timeout.data = (unsigned long) tape_put_device(device);
@@ -830,30 +813,6 @@ __tape_end_request(
830} 813}
831 814
832/* 815/*
833 * Write sense data to console/dbf
834 */
835void
836tape_dump_sense(struct tape_device* device, struct tape_request *request,
837 struct irb *irb)
838{
839 unsigned int *sptr;
840
841 PRINT_INFO("-------------------------------------------------\n");
842 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
843 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa);
844 PRINT_INFO("DEVICE: %s\n", dev_name(&device->cdev->dev));
845 if (request != NULL)
846 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
847
848 sptr = (unsigned int *) irb->ecw;
849 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
850 sptr[0], sptr[1], sptr[2], sptr[3]);
851 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
852 sptr[4], sptr[5], sptr[6], sptr[7]);
853 PRINT_INFO("--------------------------------------------------\n");
854}
855
856/*
857 * Write sense data to dbf 816 * Write sense data to dbf
858 */ 817 */
859void 818void
@@ -1051,8 +1010,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1051 1010
1052 device = (struct tape_device *) cdev->dev.driver_data; 1011 device = (struct tape_device *) cdev->dev.driver_data;
1053 if (device == NULL) { 1012 if (device == NULL) {
1054 PRINT_ERR("could not get device structure for %s "
1055 "in interrupt\n", dev_name(&cdev->dev));
1056 return; 1013 return;
1057 } 1014 }
1058 request = (struct tape_request *) intparm; 1015 request = (struct tape_request *) intparm;
@@ -1064,13 +1021,13 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1064 /* FIXME: What to do with the request? */ 1021 /* FIXME: What to do with the request? */
1065 switch (PTR_ERR(irb)) { 1022 switch (PTR_ERR(irb)) {
1066 case -ETIMEDOUT: 1023 case -ETIMEDOUT:
1067 PRINT_WARN("(%s): Request timed out\n", 1024 DBF_LH(1, "(%s): Request timed out\n",
1068 dev_name(&cdev->dev)); 1025 dev_name(&cdev->dev));
1069 case -EIO: 1026 case -EIO:
1070 __tape_end_request(device, request, -EIO); 1027 __tape_end_request(device, request, -EIO);
1071 break; 1028 break;
1072 default: 1029 default:
1073 PRINT_ERR("(%s): Unexpected i/o error %li\n", 1030 DBF_LH(1, "(%s): Unexpected i/o error %li\n",
1074 dev_name(&cdev->dev), 1031 dev_name(&cdev->dev),
1075 PTR_ERR(irb)); 1032 PTR_ERR(irb));
1076 } 1033 }
@@ -1182,8 +1139,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1182 default: 1139 default:
1183 if (rc > 0) { 1140 if (rc > 0) {
1184 DBF_EVENT(6, "xunknownrc\n"); 1141 DBF_EVENT(6, "xunknownrc\n");
1185 PRINT_ERR("Invalid return code from discipline "
1186 "interrupt function.\n");
1187 __tape_end_request(device, request, -EIO); 1142 __tape_end_request(device, request, -EIO);
1188 } else { 1143 } else {
1189 __tape_end_request(device, request, rc); 1144 __tape_end_request(device, request, rc);
@@ -1323,7 +1278,6 @@ EXPORT_SYMBOL(tape_state_set);
1323EXPORT_SYMBOL(tape_med_state_set); 1278EXPORT_SYMBOL(tape_med_state_set);
1324EXPORT_SYMBOL(tape_alloc_request); 1279EXPORT_SYMBOL(tape_alloc_request);
1325EXPORT_SYMBOL(tape_free_request); 1280EXPORT_SYMBOL(tape_free_request);
1326EXPORT_SYMBOL(tape_dump_sense);
1327EXPORT_SYMBOL(tape_dump_sense_dbf); 1281EXPORT_SYMBOL(tape_dump_sense_dbf);
1328EXPORT_SYMBOL(tape_do_io); 1282EXPORT_SYMBOL(tape_do_io);
1329EXPORT_SYMBOL(tape_do_io_async); 1283EXPORT_SYMBOL(tape_do_io_async);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 8a376af926a7..202f42132939 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -20,8 +20,6 @@
20 20
21#include "tape.h" 21#include "tape.h"
22 22
23#define PRINTK_HEADER "TAPE_PROC: "
24
25static const char *tape_med_st_verbose[MS_SIZE] = 23static const char *tape_med_st_verbose[MS_SIZE] =
26{ 24{
27 [MS_UNKNOWN] = "UNKNOWN ", 25 [MS_UNKNOWN] = "UNKNOWN ",
@@ -128,7 +126,6 @@ tape_proc_init(void)
128 proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL, 126 proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
129 &tape_proc_ops); 127 &tape_proc_ops);
130 if (tape_proc_devices == NULL) { 128 if (tape_proc_devices == NULL) {
131 PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
132 return; 129 return;
133 } 130 }
134} 131}
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 5bd573d144d6..1a9420ba518d 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -26,8 +26,6 @@
26#include "tape.h" 26#include "tape.h"
27#include "tape_std.h" 27#include "tape_std.h"
28 28
29#define PRINTK_HEADER "TAPE_STD: "
30
31/* 29/*
32 * tape_std_assign 30 * tape_std_assign
33 */ 31 */
@@ -39,16 +37,15 @@ tape_std_assign_timeout(unsigned long data)
39 int rc; 37 int rc;
40 38
41 request = (struct tape_request *) data; 39 request = (struct tape_request *) data;
42 if ((device = request->device) == NULL) 40 device = request->device;
43 BUG(); 41 BUG_ON(!device);
44 42
45 DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", 43 DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
46 device->cdev_id); 44 device->cdev_id);
47 rc = tape_cancel_io(device, request); 45 rc = tape_cancel_io(device, request);
48 if(rc) 46 if(rc)
49 PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n", 47 DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n",
50 dev_name(&device->cdev->dev), rc); 48 dev_name(&device->cdev->dev), rc);
51
52} 49}
53 50
54int 51int
@@ -82,8 +79,6 @@ tape_std_assign(struct tape_device *device)
82 del_timer(&timeout); 79 del_timer(&timeout);
83 80
84 if (rc != 0) { 81 if (rc != 0) {
85 PRINT_WARN("%s: assign failed - device might be busy\n",
86 dev_name(&device->cdev->dev));
87 DBF_EVENT(3, "%08x: assign failed - device might be busy\n", 82 DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
88 device->cdev_id); 83 device->cdev_id);
89 } else { 84 } else {
@@ -105,8 +100,6 @@ tape_std_unassign (struct tape_device *device)
105 if (device->tape_state == TS_NOT_OPER) { 100 if (device->tape_state == TS_NOT_OPER) {
106 DBF_EVENT(3, "(%08x): Can't unassign device\n", 101 DBF_EVENT(3, "(%08x): Can't unassign device\n",
107 device->cdev_id); 102 device->cdev_id);
108 PRINT_WARN("(%s): Can't unassign device - device gone\n",
109 dev_name(&device->cdev->dev));
110 return -EIO; 103 return -EIO;
111 } 104 }
112 105
@@ -120,8 +113,6 @@ tape_std_unassign (struct tape_device *device)
120 113
121 if ((rc = tape_do_io(device, request)) != 0) { 114 if ((rc = tape_do_io(device, request)) != 0) {
122 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id); 115 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
123 PRINT_WARN("%s: Unassign failed\n",
124 dev_name(&device->cdev->dev));
125 } else { 116 } else {
126 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id); 117 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
127 } 118 }
@@ -242,8 +233,6 @@ tape_std_mtsetblk(struct tape_device *device, int count)
242 if (count > MAX_BLOCKSIZE) { 233 if (count > MAX_BLOCKSIZE) {
243 DBF_EVENT(3, "Invalid block size (%d > %d) given.\n", 234 DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
244 count, MAX_BLOCKSIZE); 235 count, MAX_BLOCKSIZE);
245 PRINT_ERR("Invalid block size (%d > %d) given.\n",
246 count, MAX_BLOCKSIZE);
247 return -EINVAL; 236 return -EINVAL;
248 } 237 }
249 238
@@ -633,14 +622,6 @@ tape_std_mtcompression(struct tape_device *device, int mt_count)
633 622
634 if (mt_count < 0 || mt_count > 1) { 623 if (mt_count < 0 || mt_count > 1) {
635 DBF_EXCEPTION(6, "xcom parm\n"); 624 DBF_EXCEPTION(6, "xcom parm\n");
636 if (*device->modeset_byte & 0x08)
637 PRINT_INFO("(%s) Compression is currently on\n",
638 dev_name(&device->cdev->dev));
639 else
640 PRINT_INFO("(%s) Compression is currently off\n",
641 dev_name(&device->cdev->dev));
642 PRINT_INFO("Use 1 to switch compression on, 0 to "
643 "switch it off\n");
644 return -EINVAL; 625 return -EINVAL;
645 } 626 }
646 request = tape_alloc_request(2, 0); 627 request = tape_alloc_request(2, 0);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index eefc6611412e..1bbae433fbd8 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * For more information please refer to Documentation/s390/zfcpdump.txt 6 * For more information please refer to Documentation/s390/zfcpdump.txt
7 * 7 *
8 * Copyright IBM Corp. 2003,2007 8 * Copyright IBM Corp. 2003,2008
9 * Author(s): Michael Holzheu 9 * Author(s): Michael Holzheu
10 */ 10 */
11 11
@@ -24,6 +24,7 @@
24#include <asm/debug.h> 24#include <asm/debug.h>
25#include <asm/processor.h> 25#include <asm/processor.h>
26#include <asm/irqflags.h> 26#include <asm/irqflags.h>
27#include <asm/checksum.h>
27#include "sclp.h" 28#include "sclp.h"
28 29
29#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) 30#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
@@ -48,12 +49,19 @@ struct sys_info {
48 union save_area lc_mask; 49 union save_area lc_mask;
49}; 50};
50 51
52struct ipib_info {
53 unsigned long ipib;
54 u32 checksum;
55} __attribute__((packed));
56
51static struct sys_info sys_info; 57static struct sys_info sys_info;
52static struct debug_info *zcore_dbf; 58static struct debug_info *zcore_dbf;
53static int hsa_available; 59static int hsa_available;
54static struct dentry *zcore_dir; 60static struct dentry *zcore_dir;
55static struct dentry *zcore_file; 61static struct dentry *zcore_file;
56static struct dentry *zcore_memmap_file; 62static struct dentry *zcore_memmap_file;
63static struct dentry *zcore_reipl_file;
64static struct ipl_parameter_block *ipl_block;
57 65
58/* 66/*
59 * Copy memory from HSA to kernel or user memory (not reentrant): 67 * Copy memory from HSA to kernel or user memory (not reentrant):
@@ -527,6 +535,33 @@ static const struct file_operations zcore_memmap_fops = {
527 .release = zcore_memmap_release, 535 .release = zcore_memmap_release,
528}; 536};
529 537
538static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
539 size_t count, loff_t *ppos)
540{
541 if (ipl_block) {
542 diag308(DIAG308_SET, ipl_block);
543 diag308(DIAG308_IPL, NULL);
544 }
545 return count;
546}
547
548static int zcore_reipl_open(struct inode *inode, struct file *filp)
549{
550 return 0;
551}
552
553static int zcore_reipl_release(struct inode *inode, struct file *filp)
554{
555 return 0;
556}
557
558static const struct file_operations zcore_reipl_fops = {
559 .owner = THIS_MODULE,
560 .write = zcore_reipl_write,
561 .open = zcore_reipl_open,
562 .release = zcore_reipl_release,
563};
564
530 565
531static void __init set_s390_lc_mask(union save_area *map) 566static void __init set_s390_lc_mask(union save_area *map)
532{ 567{
@@ -645,6 +680,40 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
645 return 0; 680 return 0;
646} 681}
647 682
683/*
684 * Provide IPL parameter information block from either HSA or memory
685 * for future reipl
686 */
687static int __init zcore_reipl_init(void)
688{
689 struct ipib_info ipib_info;
690 int rc;
691
692 rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
693 if (rc)
694 return rc;
695 if (ipib_info.ipib == 0)
696 return 0;
697 ipl_block = (void *) __get_free_page(GFP_KERNEL);
698 if (!ipl_block)
699 return -ENOMEM;
700 if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
701 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
702 else
703 rc = memcpy_real(ipl_block, ipib_info.ipib, PAGE_SIZE);
704 if (rc) {
705 free_page((unsigned long) ipl_block);
706 return rc;
707 }
708 if (csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
709 ipib_info.checksum) {
710 TRACE("Checksum does not match\n");
711 free_page((unsigned long) ipl_block);
712 ipl_block = NULL;
713 }
714 return 0;
715}
716
648static int __init zcore_init(void) 717static int __init zcore_init(void)
649{ 718{
650 unsigned char arch; 719 unsigned char arch;
@@ -690,6 +759,10 @@ static int __init zcore_init(void)
690 if (rc) 759 if (rc)
691 goto fail; 760 goto fail;
692 761
762 rc = zcore_reipl_init();
763 if (rc)
764 goto fail;
765
693 zcore_dir = debugfs_create_dir("zcore" , NULL); 766 zcore_dir = debugfs_create_dir("zcore" , NULL);
694 if (!zcore_dir) { 767 if (!zcore_dir) {
695 rc = -ENOMEM; 768 rc = -ENOMEM;
@@ -707,9 +780,17 @@ static int __init zcore_init(void)
707 rc = -ENOMEM; 780 rc = -ENOMEM;
708 goto fail_file; 781 goto fail_file;
709 } 782 }
783 zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
784 NULL, &zcore_reipl_fops);
785 if (!zcore_reipl_file) {
786 rc = -ENOMEM;
787 goto fail_memmap_file;
788 }
710 hsa_available = 1; 789 hsa_available = 1;
711 return 0; 790 return 0;
712 791
792fail_memmap_file:
793 debugfs_remove(zcore_memmap_file);
713fail_file: 794fail_file:
714 debugfs_remove(zcore_file); 795 debugfs_remove(zcore_file);
715fail_dir: 796fail_dir:
@@ -723,10 +804,15 @@ static void __exit zcore_exit(void)
723{ 804{
724 debug_unregister(zcore_dbf); 805 debug_unregister(zcore_dbf);
725 sclp_sdias_exit(); 806 sclp_sdias_exit();
807 free_page((unsigned long) ipl_block);
808 debugfs_remove(zcore_reipl_file);
809 debugfs_remove(zcore_memmap_file);
810 debugfs_remove(zcore_file);
811 debugfs_remove(zcore_dir);
726 diag308(DIAG308_REL_HSA, NULL); 812 diag308(DIAG308_REL_HSA, NULL);
727} 813}
728 814
729MODULE_AUTHOR("Copyright IBM Corp. 2003,2007"); 815MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
730MODULE_DESCRIPTION("zcore module for zfcpdump support"); 816MODULE_DESCRIPTION("zcore module for zfcpdump support");
731MODULE_LICENSE("GPL"); 817MODULE_LICENSE("GPL");
732 818
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index bd79bd165396..adb3dd301528 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
6 fcx.o itcw.o 6 fcx.o itcw.o crw.o
7ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index fe6cea15bbaf..65d2e769dfa1 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -34,8 +34,8 @@ struct airq_t {
34 void *drv_data; 34 void *drv_data;
35}; 35};
36 36
37static union indicator_t indicators[MAX_ISC]; 37static union indicator_t indicators[MAX_ISC+1];
38static struct airq_t *airqs[MAX_ISC][NR_AIRQS]; 38static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS];
39 39
40static int register_airq(struct airq_t *airq, u8 isc) 40static int register_airq(struct airq_t *airq, u8 isc)
41{ 41{
@@ -133,6 +133,8 @@ void do_adapter_IO(u8 isc)
133 while (word) { 133 while (word) {
134 if (word & INDICATOR_MASK) { 134 if (word & INDICATOR_MASK) {
135 airq = airqs[isc][i]; 135 airq = airqs[isc][i];
136 /* Make sure gcc reads from airqs only once. */
137 barrier();
136 if (likely(airq)) 138 if (likely(airq))
137 airq->handler(&indicators[isc].byte[i], 139 airq->handler(&indicators[isc].byte[i],
138 airq->drv_data); 140 airq->drv_data);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index fe00be3675cd..6565f027791e 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -336,8 +336,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
336 size_t user_len, loff_t *offset) 336 size_t user_len, loff_t *offset)
337{ 337{
338 char *buf; 338 char *buf;
339 size_t i; 339 ssize_t rc, ret, i;
340 ssize_t rc, ret;
341 340
342 if (*offset) 341 if (*offset)
343 return -EINVAL; 342 return -EINVAL;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index b91c1719b075..22ce765d537e 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -315,16 +315,32 @@ error:
315} 315}
316EXPORT_SYMBOL(ccwgroup_create_from_string); 316EXPORT_SYMBOL(ccwgroup_create_from_string);
317 317
318static int __init 318static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
319init_ccwgroup (void) 319 void *data);
320
321static struct notifier_block ccwgroup_nb = {
322 .notifier_call = ccwgroup_notifier
323};
324
325static int __init init_ccwgroup(void)
320{ 326{
321 return bus_register (&ccwgroup_bus_type); 327 int ret;
328
329 ret = bus_register(&ccwgroup_bus_type);
330 if (ret)
331 return ret;
332
333 ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
334 if (ret)
335 bus_unregister(&ccwgroup_bus_type);
336
337 return ret;
322} 338}
323 339
324static void __exit 340static void __exit cleanup_ccwgroup(void)
325cleanup_ccwgroup (void)
326{ 341{
327 bus_unregister (&ccwgroup_bus_type); 342 bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
343 bus_unregister(&ccwgroup_bus_type);
328} 344}
329 345
330module_init(init_ccwgroup); 346module_init(init_ccwgroup);
@@ -392,27 +408,28 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
392 unsigned long value; 408 unsigned long value;
393 int ret; 409 int ret;
394 410
395 gdev = to_ccwgroupdev(dev);
396 if (!dev->driver) 411 if (!dev->driver)
397 return count; 412 return -ENODEV;
413
414 gdev = to_ccwgroupdev(dev);
415 gdrv = to_ccwgroupdrv(dev->driver);
398 416
399 gdrv = to_ccwgroupdrv (gdev->dev.driver);
400 if (!try_module_get(gdrv->owner)) 417 if (!try_module_get(gdrv->owner))
401 return -EINVAL; 418 return -EINVAL;
402 419
403 ret = strict_strtoul(buf, 0, &value); 420 ret = strict_strtoul(buf, 0, &value);
404 if (ret) 421 if (ret)
405 goto out; 422 goto out;
406 ret = count; 423
407 if (value == 1) 424 if (value == 1)
408 ccwgroup_set_online(gdev); 425 ret = ccwgroup_set_online(gdev);
409 else if (value == 0) 426 else if (value == 0)
410 ccwgroup_set_offline(gdev); 427 ret = ccwgroup_set_offline(gdev);
411 else 428 else
412 ret = -EINVAL; 429 ret = -EINVAL;
413out: 430out:
414 module_put(gdrv->owner); 431 module_put(gdrv->owner);
415 return ret; 432 return (ret == 0) ? count : ret;
416} 433}
417 434
418static ssize_t 435static ssize_t
@@ -454,13 +471,18 @@ ccwgroup_remove (struct device *dev)
454 struct ccwgroup_device *gdev; 471 struct ccwgroup_device *gdev;
455 struct ccwgroup_driver *gdrv; 472 struct ccwgroup_driver *gdrv;
456 473
474 device_remove_file(dev, &dev_attr_online);
475 device_remove_file(dev, &dev_attr_ungroup);
476
477 if (!dev->driver)
478 return 0;
479
457 gdev = to_ccwgroupdev(dev); 480 gdev = to_ccwgroupdev(dev);
458 gdrv = to_ccwgroupdrv(dev->driver); 481 gdrv = to_ccwgroupdrv(dev->driver);
459 482
460 device_remove_file(dev, &dev_attr_online); 483 if (gdrv->remove)
461
462 if (gdrv && gdrv->remove)
463 gdrv->remove(gdev); 484 gdrv->remove(gdev);
485
464 return 0; 486 return 0;
465} 487}
466 488
@@ -469,9 +491,13 @@ static void ccwgroup_shutdown(struct device *dev)
469 struct ccwgroup_device *gdev; 491 struct ccwgroup_device *gdev;
470 struct ccwgroup_driver *gdrv; 492 struct ccwgroup_driver *gdrv;
471 493
494 if (!dev->driver)
495 return;
496
472 gdev = to_ccwgroupdev(dev); 497 gdev = to_ccwgroupdev(dev);
473 gdrv = to_ccwgroupdrv(dev->driver); 498 gdrv = to_ccwgroupdrv(dev->driver);
474 if (gdrv && gdrv->shutdown) 499
500 if (gdrv->shutdown)
475 gdrv->shutdown(gdev); 501 gdrv->shutdown(gdev);
476} 502}
477 503
@@ -484,6 +510,19 @@ static struct bus_type ccwgroup_bus_type = {
484 .shutdown = ccwgroup_shutdown, 510 .shutdown = ccwgroup_shutdown,
485}; 511};
486 512
513
514static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
515 void *data)
516{
517 struct device *dev = data;
518
519 if (action == BUS_NOTIFY_UNBIND_DRIVER)
520 device_schedule_callback(dev, ccwgroup_ungroup_callback);
521
522 return NOTIFY_OK;
523}
524
525
487/** 526/**
488 * ccwgroup_driver_register() - register a ccw group driver 527 * ccwgroup_driver_register() - register a ccw group driver
489 * @cdriver: driver to be registered 528 * @cdriver: driver to be registered
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1246f61a5338..3e5f304ad88f 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -17,8 +17,8 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <asm/chpid.h> 18#include <asm/chpid.h>
19#include <asm/sclp.h> 19#include <asm/sclp.h>
20#include <asm/crw.h>
20 21
21#include "../s390mach.h"
22#include "cio.h" 22#include "cio.h"
23#include "css.h" 23#include "css.h"
24#include "ioasm.h" 24#include "ioasm.h"
@@ -706,12 +706,12 @@ static int __init chp_init(void)
706 struct chp_id chpid; 706 struct chp_id chpid;
707 int ret; 707 int ret;
708 708
709 ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw); 709 ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
710 if (ret) 710 if (ret)
711 return ret; 711 return ret;
712 chp_wq = create_singlethread_workqueue("cio_chp"); 712 chp_wq = create_singlethread_workqueue("cio_chp");
713 if (!chp_wq) { 713 if (!chp_wq) {
714 s390_unregister_crw_handler(CRW_RSC_CPATH); 714 crw_unregister_handler(CRW_RSC_CPATH);
715 return -ENOMEM; 715 return -ENOMEM;
716 } 716 }
717 INIT_WORK(&cfg_work, cfg_func); 717 INIT_WORK(&cfg_work, cfg_func);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index ebab6ea4659b..883f16f96f22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -19,8 +19,8 @@
19#include <asm/cio.h> 19#include <asm/cio.h>
20#include <asm/chpid.h> 20#include <asm/chpid.h>
21#include <asm/chsc.h> 21#include <asm/chsc.h>
22#include <asm/crw.h>
22 23
23#include "../s390mach.h"
24#include "css.h" 24#include "css.h"
25#include "cio.h" 25#include "cio.h"
26#include "cio_debug.h" 26#include "cio_debug.h"
@@ -589,6 +589,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
589 case 0x0102: 589 case 0x0102:
590 case 0x0103: 590 case 0x0103:
591 ret = -EINVAL; 591 ret = -EINVAL;
592 break;
592 default: 593 default:
593 ret = chsc_error_from_response(secm_area->response.code); 594 ret = chsc_error_from_response(secm_area->response.code);
594 } 595 }
@@ -820,7 +821,7 @@ int __init chsc_alloc_sei_area(void)
820 "chsc machine checks!\n"); 821 "chsc machine checks!\n");
821 return -ENOMEM; 822 return -ENOMEM;
822 } 823 }
823 ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw); 824 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
824 if (ret) 825 if (ret)
825 kfree(sei_page); 826 kfree(sei_page);
826 return ret; 827 return ret;
@@ -828,7 +829,7 @@ int __init chsc_alloc_sei_area(void)
828 829
829void __init chsc_free_sei_area(void) 830void __init chsc_free_sei_area(void)
830{ 831{
831 s390_unregister_crw_handler(CRW_RSC_CSS); 832 crw_unregister_handler(CRW_RSC_CSS);
832 kfree(sei_page); 833 kfree(sei_page);
833} 834}
834 835
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 659f8a791656..2aebb9823044 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -30,6 +30,8 @@
30#include <asm/isc.h> 30#include <asm/isc.h>
31#include <asm/cpu.h> 31#include <asm/cpu.h>
32#include <asm/fcx.h> 32#include <asm/fcx.h>
33#include <asm/nmi.h>
34#include <asm/crw.h>
33#include "cio.h" 35#include "cio.h"
34#include "css.h" 36#include "css.h"
35#include "chsc.h" 37#include "chsc.h"
@@ -38,7 +40,6 @@
38#include "blacklist.h" 40#include "blacklist.h"
39#include "cio_debug.h" 41#include "cio_debug.h"
40#include "chp.h" 42#include "chp.h"
41#include "../s390mach.h"
42 43
43debug_info_t *cio_debug_msg_id; 44debug_info_t *cio_debug_msg_id;
44debug_info_t *cio_debug_trace_id; 45debug_info_t *cio_debug_trace_id;
@@ -471,6 +472,7 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
471int cio_disable_subchannel(struct subchannel *sch) 472int cio_disable_subchannel(struct subchannel *sch)
472{ 473{
473 char dbf_txt[15]; 474 char dbf_txt[15];
475 int retry;
474 int ret; 476 int ret;
475 477
476 CIO_TRACE_EVENT (2, "dissch"); 478 CIO_TRACE_EVENT (2, "dissch");
@@ -481,16 +483,17 @@ int cio_disable_subchannel(struct subchannel *sch)
481 if (cio_update_schib(sch)) 483 if (cio_update_schib(sch))
482 return -ENODEV; 484 return -ENODEV;
483 485
484 if (scsw_actl(&sch->schib.scsw) != 0)
485 /*
486 * the disable function must not be called while there are
487 * requests pending for completion !
488 */
489 return -EBUSY;
490
491 sch->config.ena = 0; 486 sch->config.ena = 0;
492 ret = cio_commit_config(sch);
493 487
488 for (retry = 0; retry < 3; retry++) {
489 ret = cio_commit_config(sch);
490 if (ret == -EBUSY) {
491 struct irb irb;
492 if (tsch(sch->schid, &irb) != 0)
493 break;
494 } else
495 break;
496 }
494 sprintf (dbf_txt, "ret:%d", ret); 497 sprintf (dbf_txt, "ret:%d", ret);
495 CIO_TRACE_EVENT (2, dbf_txt); 498 CIO_TRACE_EVENT (2, dbf_txt);
496 return ret; 499 return ret;
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
new file mode 100644
index 000000000000..d157665d0e76
--- /dev/null
+++ b/drivers/s390/cio/crw.c
@@ -0,0 +1,159 @@
1/*
2 * Channel report handling code
3 *
4 * Copyright IBM Corp. 2000,2009
5 * Author(s): Ingo Adlung <adlung@de.ibm.com>,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Cornelia Huck <cornelia.huck@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */
10
11#include <linux/semaphore.h>
12#include <linux/mutex.h>
13#include <linux/kthread.h>
14#include <linux/init.h>
15#include <asm/crw.h>
16
17static struct semaphore crw_semaphore;
18static DEFINE_MUTEX(crw_handler_mutex);
19static crw_handler_t crw_handlers[NR_RSCS];
20
21/**
22 * crw_register_handler() - register a channel report word handler
23 * @rsc: reporting source code to handle
24 * @handler: handler to be registered
25 *
26 * Returns %0 on success and a negative error value otherwise.
27 */
28int crw_register_handler(int rsc, crw_handler_t handler)
29{
30 int rc = 0;
31
32 if ((rsc < 0) || (rsc >= NR_RSCS))
33 return -EINVAL;
34 mutex_lock(&crw_handler_mutex);
35 if (crw_handlers[rsc])
36 rc = -EBUSY;
37 else
38 crw_handlers[rsc] = handler;
39 mutex_unlock(&crw_handler_mutex);
40 return rc;
41}
42
43/**
44 * crw_unregister_handler() - unregister a channel report word handler
45 * @rsc: reporting source code to handle
46 */
47void crw_unregister_handler(int rsc)
48{
49 if ((rsc < 0) || (rsc >= NR_RSCS))
50 return;
51 mutex_lock(&crw_handler_mutex);
52 crw_handlers[rsc] = NULL;
53 mutex_unlock(&crw_handler_mutex);
54}
55
56/*
57 * Retrieve CRWs and call function to handle event.
58 */
59static int crw_collect_info(void *unused)
60{
61 struct crw crw[2];
62 int ccode;
63 unsigned int chain;
64 int ignore;
65
66repeat:
67 ignore = down_interruptible(&crw_semaphore);
68 chain = 0;
69 while (1) {
70 crw_handler_t handler;
71
72 if (unlikely(chain > 1)) {
73 struct crw tmp_crw;
74
75 printk(KERN_WARNING"%s: Code does not support more "
76 "than two chained crws; please report to "
77 "linux390@de.ibm.com!\n", __func__);
78 ccode = stcrw(&tmp_crw);
79 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
80 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
81 __func__, tmp_crw.slct, tmp_crw.oflw,
82 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
83 tmp_crw.erc, tmp_crw.rsid);
84 printk(KERN_WARNING"%s: This was crw number %x in the "
85 "chain\n", __func__, chain);
86 if (ccode != 0)
87 break;
88 chain = tmp_crw.chn ? chain + 1 : 0;
89 continue;
90 }
91 ccode = stcrw(&crw[chain]);
92 if (ccode != 0)
93 break;
94 printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
95 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
96 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
97 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
98 crw[chain].rsid);
99 /* Check for overflows. */
100 if (crw[chain].oflw) {
101 int i;
102
103 pr_debug("%s: crw overflow detected!\n", __func__);
104 mutex_lock(&crw_handler_mutex);
105 for (i = 0; i < NR_RSCS; i++) {
106 if (crw_handlers[i])
107 crw_handlers[i](NULL, NULL, 1);
108 }
109 mutex_unlock(&crw_handler_mutex);
110 chain = 0;
111 continue;
112 }
113 if (crw[0].chn && !chain) {
114 chain++;
115 continue;
116 }
117 mutex_lock(&crw_handler_mutex);
118 handler = crw_handlers[crw[chain].rsc];
119 if (handler)
120 handler(&crw[0], chain ? &crw[1] : NULL, 0);
121 mutex_unlock(&crw_handler_mutex);
122 /* chain is always 0 or 1 here. */
123 chain = crw[chain].chn ? chain + 1 : 0;
124 }
125 goto repeat;
126 return 0;
127}
128
129void crw_handle_channel_report(void)
130{
131 up(&crw_semaphore);
132}
133
134/*
135 * Separate initcall needed for semaphore initialization since
136 * crw_handle_channel_report might be called before crw_machine_check_init.
137 */
138static int __init crw_init_semaphore(void)
139{
140 init_MUTEX_LOCKED(&crw_semaphore);
141 return 0;
142}
143pure_initcall(crw_init_semaphore);
144
145/*
146 * Machine checks for the channel subsystem must be enabled
147 * after the channel subsystem is initialized
148 */
149static int __init crw_machine_check_init(void)
150{
151 struct task_struct *task;
152
153 task = kthread_run(crw_collect_info, NULL, "kmcheck");
154 if (IS_ERR(task))
155 return PTR_ERR(task);
156 ctl_set_bit(14, 28); /* enable channel report MCH */
157 return 0;
158}
159device_initcall(crw_machine_check_init);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 427d11d88069..0085d8901792 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -18,8 +18,8 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <asm/isc.h> 20#include <asm/isc.h>
21#include <asm/crw.h>
21 22
22#include "../s390mach.h"
23#include "css.h" 23#include "css.h"
24#include "cio.h" 24#include "cio.h"
25#include "cio_debug.h" 25#include "cio_debug.h"
@@ -83,6 +83,25 @@ static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
83 return rc; 83 return rc;
84} 84}
85 85
86static int call_fn_all_sch(struct subchannel_id schid, void *data)
87{
88 struct cb_data *cb = data;
89 struct subchannel *sch;
90 int rc = 0;
91
92 sch = get_subchannel_by_schid(schid);
93 if (sch) {
94 if (cb->fn_known_sch)
95 rc = cb->fn_known_sch(sch, cb->data);
96 put_device(&sch->dev);
97 } else {
98 if (cb->fn_unknown_sch)
99 rc = cb->fn_unknown_sch(schid, cb->data);
100 }
101
102 return rc;
103}
104
86int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 105int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
87 int (*fn_unknown)(struct subchannel_id, 106 int (*fn_unknown)(struct subchannel_id,
88 void *), void *data) 107 void *), void *data)
@@ -90,13 +109,17 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
90 struct cb_data cb; 109 struct cb_data cb;
91 int rc; 110 int rc;
92 111
93 cb.set = idset_sch_new();
94 if (!cb.set)
95 return -ENOMEM;
96 idset_fill(cb.set);
97 cb.data = data; 112 cb.data = data;
98 cb.fn_known_sch = fn_known; 113 cb.fn_known_sch = fn_known;
99 cb.fn_unknown_sch = fn_unknown; 114 cb.fn_unknown_sch = fn_unknown;
115
116 cb.set = idset_sch_new();
117 if (!cb.set)
118 /* fall back to brute force scanning in case of oom */
119 return for_each_subchannel(call_fn_all_sch, &cb);
120
121 idset_fill(cb.set);
122
100 /* Process registered subchannels. */ 123 /* Process registered subchannels. */
101 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 124 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
102 if (rc) 125 if (rc)
@@ -510,6 +533,17 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
510 return ret; 533 return ret;
511} 534}
512 535
536static void reprobe_after_idle(struct work_struct *unused)
537{
538 /* Make sure initial subchannel scan is done. */
539 wait_event(ccw_device_init_wq,
540 atomic_read(&ccw_device_init_count) == 0);
541 if (need_reprobe)
542 css_schedule_reprobe();
543}
544
545static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
546
513/* Work function used to reprobe all unregistered subchannels. */ 547/* Work function used to reprobe all unregistered subchannels. */
514static void reprobe_all(struct work_struct *unused) 548static void reprobe_all(struct work_struct *unused)
515{ 549{
@@ -517,10 +551,12 @@ static void reprobe_all(struct work_struct *unused)
517 551
518 CIO_MSG_EVENT(4, "reprobe start\n"); 552 CIO_MSG_EVENT(4, "reprobe start\n");
519 553
520 need_reprobe = 0;
521 /* Make sure initial subchannel scan is done. */ 554 /* Make sure initial subchannel scan is done. */
522 wait_event(ccw_device_init_wq, 555 if (atomic_read(&ccw_device_init_count) != 0) {
523 atomic_read(&ccw_device_init_count) == 0); 556 queue_work(ccw_device_work, &reprobe_idle_work);
557 return;
558 }
559 need_reprobe = 0;
524 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); 560 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
525 561
526 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 562 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
@@ -619,7 +655,7 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
619 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 655 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
620 } else { 656 } else {
621#ifdef CONFIG_SMP 657#ifdef CONFIG_SMP
622 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); 658 css->global_pgid.pgid_high.cpu_addr = stap();
623#else 659#else
624 css->global_pgid.pgid_high.cpu_addr = 0; 660 css->global_pgid.pgid_high.cpu_addr = 0;
625#endif 661#endif
@@ -765,7 +801,7 @@ init_channel_subsystem (void)
765 if (ret) 801 if (ret)
766 goto out; 802 goto out;
767 803
768 ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw); 804 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
769 if (ret) 805 if (ret)
770 goto out; 806 goto out;
771 807
@@ -845,7 +881,7 @@ out_unregister:
845out_bus: 881out_bus:
846 bus_unregister(&css_bus_type); 882 bus_unregister(&css_bus_type);
847out: 883out:
848 s390_unregister_crw_handler(CRW_RSC_CSS); 884 crw_unregister_handler(CRW_RSC_CSS);
849 chsc_free_sei_area(); 885 chsc_free_sei_area();
850 kfree(slow_subchannel_set); 886 kfree(slow_subchannel_set);
851 pr_alert("The CSS device driver initialization failed with " 887 pr_alert("The CSS device driver initialization failed with "
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e28f8ae53453..c4d2f667a2f6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -457,12 +457,13 @@ int ccw_device_set_online(struct ccw_device *cdev)
457 return (ret == 0) ? -ENODEV : ret; 457 return (ret == 0) ? -ENODEV : ret;
458} 458}
459 459
460static void online_store_handle_offline(struct ccw_device *cdev) 460static int online_store_handle_offline(struct ccw_device *cdev)
461{ 461{
462 if (cdev->private->state == DEV_STATE_DISCONNECTED) 462 if (cdev->private->state == DEV_STATE_DISCONNECTED)
463 ccw_device_remove_disconnected(cdev); 463 ccw_device_remove_disconnected(cdev);
464 else if (cdev->drv && cdev->drv->set_offline) 464 else if (cdev->online && cdev->drv && cdev->drv->set_offline)
465 ccw_device_set_offline(cdev); 465 return ccw_device_set_offline(cdev);
466 return 0;
466} 467}
467 468
468static int online_store_recog_and_online(struct ccw_device *cdev) 469static int online_store_recog_and_online(struct ccw_device *cdev)
@@ -530,13 +531,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
530 goto out; 531 goto out;
531 switch (i) { 532 switch (i) {
532 case 0: 533 case 0:
533 online_store_handle_offline(cdev); 534 ret = online_store_handle_offline(cdev);
534 ret = count;
535 break; 535 break;
536 case 1: 536 case 1:
537 ret = online_store_handle_online(cdev, force); 537 ret = online_store_handle_online(cdev, force);
538 if (!ret)
539 ret = count;
540 break; 538 break;
541 default: 539 default:
542 ret = -EINVAL; 540 ret = -EINVAL;
@@ -545,7 +543,7 @@ out:
545 if (cdev->drv) 543 if (cdev->drv)
546 module_put(cdev->drv->owner); 544 module_put(cdev->drv->owner);
547 atomic_set(&cdev->private->onoff, 0); 545 atomic_set(&cdev->private->onoff, 0);
548 return ret; 546 return (ret < 0) ? ret : count;
549} 547}
550 548
551static ssize_t 549static ssize_t
@@ -681,35 +679,22 @@ get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
681 return dev ? to_ccwdev(dev) : NULL; 679 return dev ? to_ccwdev(dev) : NULL;
682} 680}
683 681
684static void 682void ccw_device_do_unbind_bind(struct work_struct *work)
685ccw_device_add_changed(struct work_struct *work)
686{
687 struct ccw_device_private *priv;
688 struct ccw_device *cdev;
689
690 priv = container_of(work, struct ccw_device_private, kick_work);
691 cdev = priv->cdev;
692 if (device_add(&cdev->dev)) {
693 put_device(&cdev->dev);
694 return;
695 }
696 set_bit(1, &cdev->private->registered);
697}
698
699void ccw_device_do_unreg_rereg(struct work_struct *work)
700{ 683{
701 struct ccw_device_private *priv; 684 struct ccw_device_private *priv;
702 struct ccw_device *cdev; 685 struct ccw_device *cdev;
703 struct subchannel *sch; 686 struct subchannel *sch;
687 int ret;
704 688
705 priv = container_of(work, struct ccw_device_private, kick_work); 689 priv = container_of(work, struct ccw_device_private, kick_work);
706 cdev = priv->cdev; 690 cdev = priv->cdev;
707 sch = to_subchannel(cdev->dev.parent); 691 sch = to_subchannel(cdev->dev.parent);
708 692
709 ccw_device_unregister(cdev); 693 if (test_bit(1, &cdev->private->registered)) {
710 PREPARE_WORK(&cdev->private->kick_work, 694 device_release_driver(&cdev->dev);
711 ccw_device_add_changed); 695 ret = device_attach(&cdev->dev);
712 queue_work(ccw_device_work, &cdev->private->kick_work); 696 WARN_ON(ret == -ENODEV);
697 }
713} 698}
714 699
715static void 700static void
@@ -1035,8 +1020,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
1035void 1020void
1036io_subchannel_recog_done(struct ccw_device *cdev) 1021io_subchannel_recog_done(struct ccw_device *cdev)
1037{ 1022{
1038 struct subchannel *sch;
1039
1040 if (css_init_done == 0) { 1023 if (css_init_done == 0) {
1041 cdev->private->flags.recog_done = 1; 1024 cdev->private->flags.recog_done = 1;
1042 return; 1025 return;
@@ -1047,7 +1030,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1047 /* Remove device found not operational. */ 1030 /* Remove device found not operational. */
1048 if (!get_device(&cdev->dev)) 1031 if (!get_device(&cdev->dev))
1049 break; 1032 break;
1050 sch = to_subchannel(cdev->dev.parent);
1051 PREPARE_WORK(&cdev->private->kick_work, 1033 PREPARE_WORK(&cdev->private->kick_work,
1052 ccw_device_call_sch_unregister); 1034 ccw_device_call_sch_unregister);
1053 queue_work(slow_path_wq, &cdev->private->kick_work); 1035 queue_work(slow_path_wq, &cdev->private->kick_work);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 0f2e63ea48de..85e01846ca65 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -80,7 +80,7 @@ void io_subchannel_init_config(struct subchannel *sch);
80 80
81int ccw_device_cancel_halt_clear(struct ccw_device *); 81int ccw_device_cancel_halt_clear(struct ccw_device *);
82 82
83void ccw_device_do_unreg_rereg(struct work_struct *); 83void ccw_device_do_unbind_bind(struct work_struct *);
84void ccw_device_move_to_orphanage(struct work_struct *); 84void ccw_device_move_to_orphanage(struct work_struct *);
85int ccw_device_is_orphan(struct ccw_device *); 85int ccw_device_is_orphan(struct ccw_device *);
86 86
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8df5eaafc5ab..87b4bfca080f 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -194,7 +194,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
194 cdev->id.dev_type != cdev->private->senseid.dev_type || 194 cdev->id.dev_type != cdev->private->senseid.dev_type ||
195 cdev->id.dev_model != cdev->private->senseid.dev_model) { 195 cdev->id.dev_model != cdev->private->senseid.dev_model) {
196 PREPARE_WORK(&cdev->private->kick_work, 196 PREPARE_WORK(&cdev->private->kick_work,
197 ccw_device_do_unreg_rereg); 197 ccw_device_do_unbind_bind);
198 queue_work(ccw_device_work, &cdev->private->kick_work); 198 queue_work(ccw_device_work, &cdev->private->kick_work);
199 return 0; 199 return 0;
200 } 200 }
@@ -366,7 +366,7 @@ static void ccw_device_oper_notify(struct ccw_device *cdev)
366 } 366 }
367 /* Driver doesn't want device back. */ 367 /* Driver doesn't want device back. */
368 ccw_device_set_notoper(cdev); 368 ccw_device_set_notoper(cdev);
369 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg); 369 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind);
370 queue_work(ccw_device_work, &cdev->private->kick_work); 370 queue_work(ccw_device_work, &cdev->private->kick_work);
371} 371}
372 372
@@ -728,7 +728,7 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev,
728{ 728{
729 struct subchannel *sch; 729 struct subchannel *sch;
730 730
731 cdev->private->state = DEV_STATE_NOT_OPER; 731 ccw_device_set_notoper(cdev);
732 sch = to_subchannel(cdev->dev.parent); 732 sch = to_subchannel(cdev->dev.parent);
733 css_schedule_eval(sch->schid); 733 css_schedule_eval(sch->schid);
734} 734}
@@ -1052,7 +1052,7 @@ ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1052 sch = to_subchannel(cdev->dev.parent); 1052 sch = to_subchannel(cdev->dev.parent);
1053 /* 1053 /*
1054 * An interrupt in state offline means a previous disable was not 1054 * An interrupt in state offline means a previous disable was not
1055 * successful. Try again. 1055 * successful - should not happen, but we try to disable again.
1056 */ 1056 */
1057 cio_disable_subchannel(sch); 1057 cio_disable_subchannel(sch);
1058} 1058}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index eabcc42d63df..151754d54745 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -680,7 +680,7 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
680 if (cdev->private->state != DEV_STATE_ONLINE) 680 if (cdev->private->state != DEV_STATE_ONLINE)
681 return -EIO; 681 return -EIO;
682 if (!scsw_is_tm(&sch->schib.scsw) || 682 if (!scsw_is_tm(&sch->schib.scsw) ||
683 !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND)) 683 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
684 return -EINVAL; 684 return -EINVAL;
685 return cio_tm_intrg(sch); 685 return cio_tm_intrg(sch);
686} 686}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 42f2b09631b6..13bcb8114388 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -186,6 +186,9 @@ struct qdio_input_q {
186 /* input buffer acknowledgement flag */ 186 /* input buffer acknowledgement flag */
187 int polling; 187 int polling;
188 188
189 /* first ACK'ed buffer */
190 int ack_start;
191
189 /* how much sbals are acknowledged with qebsm */ 192 /* how much sbals are acknowledged with qebsm */
190 int ack_count; 193 int ack_count;
191 194
@@ -234,7 +237,7 @@ struct qdio_q {
234 int first_to_check; 237 int first_to_check;
235 238
236 /* first_to_check of the last time */ 239 /* first_to_check of the last time */
237 int last_move_ftc; 240 int last_move;
238 241
239 /* beginning position for calling the program */ 242 /* beginning position for calling the program */
240 int first_to_kick; 243 int first_to_kick;
@@ -244,7 +247,6 @@ struct qdio_q {
244 247
245 struct qdio_irq *irq_ptr; 248 struct qdio_irq *irq_ptr;
246 struct tasklet_struct tasklet; 249 struct tasklet_struct tasklet;
247 spinlock_t lock;
248 250
249 /* error condition during a data transfer */ 251 /* error condition during a data transfer */
250 unsigned int qdio_error; 252 unsigned int qdio_error;
@@ -354,7 +356,7 @@ int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
354 int auto_ack); 356 int auto_ack);
355void qdio_check_outbound_after_thinint(struct qdio_q *q); 357void qdio_check_outbound_after_thinint(struct qdio_q *q);
356int qdio_inbound_q_moved(struct qdio_q *q); 358int qdio_inbound_q_moved(struct qdio_q *q);
357void qdio_kick_inbound_handler(struct qdio_q *q); 359void qdio_kick_handler(struct qdio_q *q);
358void qdio_stop_polling(struct qdio_q *q); 360void qdio_stop_polling(struct qdio_q *q);
359int qdio_siga_sync_q(struct qdio_q *q); 361int qdio_siga_sync_q(struct qdio_q *q);
360 362
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index da7afb04e71f..e3434b34f86c 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -63,8 +63,9 @@ static int qstat_show(struct seq_file *m, void *v)
63 seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); 63 seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci);
64 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); 64 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
65 seq_printf(m, "ftc: %d\n", q->first_to_check); 65 seq_printf(m, "ftc: %d\n", q->first_to_check);
66 seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); 66 seq_printf(m, "last_move: %d\n", q->last_move);
67 seq_printf(m, "polling: %d\n", q->u.in.polling); 67 seq_printf(m, "polling: %d\n", q->u.in.polling);
68 seq_printf(m, "ack start: %d\n", q->u.in.ack_start);
68 seq_printf(m, "ack count: %d\n", q->u.in.ack_count); 69 seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
69 seq_printf(m, "slsb buffer states:\n"); 70 seq_printf(m, "slsb buffer states:\n");
70 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 71 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 10cb0f8726e5..9e8a2914259b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -380,11 +380,11 @@ inline void qdio_stop_polling(struct qdio_q *q)
380 380
381 /* show the card that we are not polling anymore */ 381 /* show the card that we are not polling anymore */
382 if (is_qebsm(q)) { 382 if (is_qebsm(q)) {
383 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, 383 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
384 q->u.in.ack_count); 384 q->u.in.ack_count);
385 q->u.in.ack_count = 0; 385 q->u.in.ack_count = 0;
386 } else 386 } else
387 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); 387 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
388} 388}
389 389
390static void announce_buffer_error(struct qdio_q *q, int count) 390static void announce_buffer_error(struct qdio_q *q, int count)
@@ -419,15 +419,15 @@ static inline void inbound_primed(struct qdio_q *q, int count)
419 if (!q->u.in.polling) { 419 if (!q->u.in.polling) {
420 q->u.in.polling = 1; 420 q->u.in.polling = 1;
421 q->u.in.ack_count = count; 421 q->u.in.ack_count = count;
422 q->last_move_ftc = q->first_to_check; 422 q->u.in.ack_start = q->first_to_check;
423 return; 423 return;
424 } 424 }
425 425
426 /* delete the previous ACK's */ 426 /* delete the previous ACK's */
427 set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, 427 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
428 q->u.in.ack_count); 428 q->u.in.ack_count);
429 q->u.in.ack_count = count; 429 q->u.in.ack_count = count;
430 q->last_move_ftc = q->first_to_check; 430 q->u.in.ack_start = q->first_to_check;
431 return; 431 return;
432 } 432 }
433 433
@@ -439,14 +439,13 @@ static inline void inbound_primed(struct qdio_q *q, int count)
439 if (q->u.in.polling) { 439 if (q->u.in.polling) {
440 /* reset the previous ACK but first set the new one */ 440 /* reset the previous ACK but first set the new one */
441 set_buf_state(q, new, SLSB_P_INPUT_ACK); 441 set_buf_state(q, new, SLSB_P_INPUT_ACK);
442 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); 442 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
443 } 443 } else {
444 else {
445 q->u.in.polling = 1; 444 q->u.in.polling = 1;
446 set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK); 445 set_buf_state(q, new, SLSB_P_INPUT_ACK);
447 } 446 }
448 447
449 q->last_move_ftc = new; 448 q->u.in.ack_start = new;
450 count--; 449 count--;
451 if (!count) 450 if (!count)
452 return; 451 return;
@@ -455,7 +454,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
455 * Need to change all PRIMED buffers to NOT_INIT, otherwise 454 * Need to change all PRIMED buffers to NOT_INIT, otherwise
456 * we're loosing initiative in the thinint code. 455 * we're loosing initiative in the thinint code.
457 */ 456 */
458 set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT, 457 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
459 count); 458 count);
460} 459}
461 460
@@ -523,7 +522,8 @@ int qdio_inbound_q_moved(struct qdio_q *q)
523 522
524 bufnr = get_inbound_buffer_frontier(q); 523 bufnr = get_inbound_buffer_frontier(q);
525 524
526 if ((bufnr != q->last_move_ftc) || q->qdio_error) { 525 if ((bufnr != q->last_move) || q->qdio_error) {
526 q->last_move = bufnr;
527 if (!need_siga_sync(q) && !pci_out_supported(q)) 527 if (!need_siga_sync(q) && !pci_out_supported(q))
528 q->u.in.timestamp = get_usecs(); 528 q->u.in.timestamp = get_usecs();
529 529
@@ -570,29 +570,30 @@ static int qdio_inbound_q_done(struct qdio_q *q)
570 } 570 }
571} 571}
572 572
573void qdio_kick_inbound_handler(struct qdio_q *q) 573void qdio_kick_handler(struct qdio_q *q)
574{ 574{
575 int count, start, end; 575 int start = q->first_to_kick;
576 576 int end = q->first_to_check;
577 qdio_perf_stat_inc(&perf_stats.inbound_handler); 577 int count;
578
579 start = q->first_to_kick;
580 end = q->first_to_check;
581 if (end >= start)
582 count = end - start;
583 else
584 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
585
586 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
587 578
588 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 579 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
589 return; 580 return;
590 581
591 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, 582 count = sub_buf(end, start);
592 start, count, q->irq_ptr->int_parm); 583
584 if (q->is_input_q) {
585 qdio_perf_stat_inc(&perf_stats.inbound_handler);
586 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
587 } else {
588 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr);
589 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
590 }
591
592 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
593 q->irq_ptr->int_parm);
593 594
594 /* for the next time */ 595 /* for the next time */
595 q->first_to_kick = q->first_to_check; 596 q->first_to_kick = end;
596 q->qdio_error = 0; 597 q->qdio_error = 0;
597} 598}
598 599
@@ -603,7 +604,7 @@ again:
603 if (!qdio_inbound_q_moved(q)) 604 if (!qdio_inbound_q_moved(q))
604 return; 605 return;
605 606
606 qdio_kick_inbound_handler(q); 607 qdio_kick_handler(q);
607 608
608 if (!qdio_inbound_q_done(q)) 609 if (!qdio_inbound_q_done(q))
609 /* means poll time is not yet over */ 610 /* means poll time is not yet over */
@@ -698,21 +699,21 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
698 699
699 bufnr = get_outbound_buffer_frontier(q); 700 bufnr = get_outbound_buffer_frontier(q);
700 701
701 if ((bufnr != q->last_move_ftc) || q->qdio_error) { 702 if ((bufnr != q->last_move) || q->qdio_error) {
702 q->last_move_ftc = bufnr; 703 q->last_move = bufnr;
703 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); 704 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
704 return 1; 705 return 1;
705 } else 706 } else
706 return 0; 707 return 0;
707} 708}
708 709
709static void qdio_kick_outbound_q(struct qdio_q *q) 710static int qdio_kick_outbound_q(struct qdio_q *q)
710{ 711{
711 unsigned int busy_bit; 712 unsigned int busy_bit;
712 int cc; 713 int cc;
713 714
714 if (!need_siga_out(q)) 715 if (!need_siga_out(q))
715 return; 716 return 0;
716 717
717 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 718 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
718 qdio_perf_stat_inc(&perf_stats.siga_out); 719 qdio_perf_stat_inc(&perf_stats.siga_out);
@@ -724,75 +725,37 @@ static void qdio_kick_outbound_q(struct qdio_q *q)
724 case 2: 725 case 2:
725 if (busy_bit) { 726 if (busy_bit) {
726 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); 727 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
727 q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY; 728 cc |= QDIO_ERROR_SIGA_BUSY;
728 } else { 729 } else
729 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", 730 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
730 q->nr);
731 q->qdio_error = cc;
732 }
733 break; 731 break;
734 case 1: 732 case 1:
735 case 3: 733 case 3:
736 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 734 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
737 q->qdio_error = cc;
738 break; 735 break;
739 } 736 }
740} 737 return cc;
741
742static void qdio_kick_outbound_handler(struct qdio_q *q)
743{
744 int start, end, count;
745
746 start = q->first_to_kick;
747 end = q->last_move_ftc;
748 if (end >= start)
749 count = end - start;
750 else
751 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
752
753 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr);
754 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
755
756 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
757 return;
758
759 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
760 q->irq_ptr->int_parm);
761
762 /* for the next time: */
763 q->first_to_kick = q->last_move_ftc;
764 q->qdio_error = 0;
765} 738}
766 739
767static void __qdio_outbound_processing(struct qdio_q *q) 740static void __qdio_outbound_processing(struct qdio_q *q)
768{ 741{
769 unsigned long flags;
770
771 qdio_perf_stat_inc(&perf_stats.tasklet_outbound); 742 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
772 spin_lock_irqsave(&q->lock, flags);
773
774 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 743 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
775 744
776 if (qdio_outbound_q_moved(q)) 745 if (qdio_outbound_q_moved(q))
777 qdio_kick_outbound_handler(q); 746 qdio_kick_handler(q);
778
779 spin_unlock_irqrestore(&q->lock, flags);
780 747
781 if (queue_type(q) == QDIO_ZFCP_QFMT) { 748 if (queue_type(q) == QDIO_ZFCP_QFMT)
782 if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) 749 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
783 tasklet_schedule(&q->tasklet); 750 goto sched;
784 return;
785 }
786 751
787 /* bail out for HiperSockets unicast queues */ 752 /* bail out for HiperSockets unicast queues */
788 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) 753 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
789 return; 754 return;
790 755
791 if ((queue_type(q) == QDIO_IQDIO_QFMT) && 756 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
792 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) { 757 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
793 tasklet_schedule(&q->tasklet); 758 goto sched;
794 return;
795 }
796 759
797 if (q->u.out.pci_out_enabled) 760 if (q->u.out.pci_out_enabled)
798 return; 761 return;
@@ -810,6 +773,12 @@ static void __qdio_outbound_processing(struct qdio_q *q)
810 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer); 773 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
811 } 774 }
812 } 775 }
776 return;
777
778sched:
779 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
780 return;
781 tasklet_schedule(&q->tasklet);
813} 782}
814 783
815/* outbound tasklet */ 784/* outbound tasklet */
@@ -822,6 +791,9 @@ void qdio_outbound_processing(unsigned long data)
822void qdio_outbound_timer(unsigned long data) 791void qdio_outbound_timer(unsigned long data)
823{ 792{
824 struct qdio_q *q = (struct qdio_q *)data; 793 struct qdio_q *q = (struct qdio_q *)data;
794
795 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
796 return;
825 tasklet_schedule(&q->tasklet); 797 tasklet_schedule(&q->tasklet);
826} 798}
827 799
@@ -863,6 +835,9 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
863 int i; 835 int i;
864 struct qdio_q *q; 836 struct qdio_q *q;
865 837
838 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
839 return;
840
866 qdio_perf_stat_inc(&perf_stats.pci_int); 841 qdio_perf_stat_inc(&perf_stats.pci_int);
867 842
868 for_each_input_queue(irq_ptr, q, i) 843 for_each_input_queue(irq_ptr, q, i)
@@ -1065,8 +1040,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1065 * @cdev: associated ccw device 1040 * @cdev: associated ccw device
1066 * @how: use halt or clear to shutdown 1041 * @how: use halt or clear to shutdown
1067 * 1042 *
1068 * This function calls qdio_shutdown() for @cdev with method @how 1043 * This function calls qdio_shutdown() for @cdev with method @how.
1069 * and on success qdio_free() for @cdev. 1044 * and qdio_free(). The qdio_free() return value is ignored since
1045 * !irq_ptr is already checked.
1070 */ 1046 */
1071int qdio_cleanup(struct ccw_device *cdev, int how) 1047int qdio_cleanup(struct ccw_device *cdev, int how)
1072{ 1048{
@@ -1077,8 +1053,8 @@ int qdio_cleanup(struct ccw_device *cdev, int how)
1077 return -ENODEV; 1053 return -ENODEV;
1078 1054
1079 rc = qdio_shutdown(cdev, how); 1055 rc = qdio_shutdown(cdev, how);
1080 if (rc == 0) 1056
1081 rc = qdio_free(cdev); 1057 qdio_free(cdev);
1082 return rc; 1058 return rc;
1083} 1059}
1084EXPORT_SYMBOL_GPL(qdio_cleanup); 1060EXPORT_SYMBOL_GPL(qdio_cleanup);
@@ -1090,11 +1066,11 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
1090 int i; 1066 int i;
1091 1067
1092 for_each_input_queue(irq_ptr, q, i) 1068 for_each_input_queue(irq_ptr, q, i)
1093 tasklet_disable(&q->tasklet); 1069 tasklet_kill(&q->tasklet);
1094 1070
1095 for_each_output_queue(irq_ptr, q, i) { 1071 for_each_output_queue(irq_ptr, q, i) {
1096 tasklet_disable(&q->tasklet);
1097 del_timer(&q->u.out.timer); 1072 del_timer(&q->u.out.timer);
1073 tasklet_kill(&q->tasklet);
1098 } 1074 }
1099} 1075}
1100 1076
@@ -1112,6 +1088,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
1112 if (!irq_ptr) 1088 if (!irq_ptr)
1113 return -ENODEV; 1089 return -ENODEV;
1114 1090
1091 BUG_ON(irqs_disabled());
1115 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); 1092 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1116 1093
1117 mutex_lock(&irq_ptr->setup_mutex); 1094 mutex_lock(&irq_ptr->setup_mutex);
@@ -1124,6 +1101,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
1124 return 0; 1101 return 0;
1125 } 1102 }
1126 1103
1104 /*
1105 * Indicate that the device is going down. Scheduling the queue
1106 * tasklets is forbidden from here on.
1107 */
1108 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1109
1127 tiqdio_remove_input_queues(irq_ptr); 1110 tiqdio_remove_input_queues(irq_ptr);
1128 qdio_shutdown_queues(cdev); 1111 qdio_shutdown_queues(cdev);
1129 qdio_shutdown_debug_entries(irq_ptr, cdev); 1112 qdio_shutdown_debug_entries(irq_ptr, cdev);
@@ -1403,9 +1386,8 @@ int qdio_activate(struct ccw_device *cdev)
1403 switch (irq_ptr->state) { 1386 switch (irq_ptr->state) {
1404 case QDIO_IRQ_STATE_STOPPED: 1387 case QDIO_IRQ_STATE_STOPPED:
1405 case QDIO_IRQ_STATE_ERR: 1388 case QDIO_IRQ_STATE_ERR:
1406 mutex_unlock(&irq_ptr->setup_mutex); 1389 rc = -EIO;
1407 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1390 break;
1408 return -EIO;
1409 default: 1391 default:
1410 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); 1392 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1411 rc = 0; 1393 rc = 0;
@@ -1442,10 +1424,10 @@ static inline int buf_in_between(int bufnr, int start, int count)
1442 * @bufnr: first buffer to process 1424 * @bufnr: first buffer to process
1443 * @count: how many buffers are emptied 1425 * @count: how many buffers are emptied
1444 */ 1426 */
1445static void handle_inbound(struct qdio_q *q, unsigned int callflags, 1427static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1446 int bufnr, int count) 1428 int bufnr, int count)
1447{ 1429{
1448 int used, cc, diff; 1430 int used, diff;
1449 1431
1450 if (!q->u.in.polling) 1432 if (!q->u.in.polling)
1451 goto set; 1433 goto set;
@@ -1456,19 +1438,18 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1456 q->u.in.polling = 0; 1438 q->u.in.polling = 0;
1457 q->u.in.ack_count = 0; 1439 q->u.in.ack_count = 0;
1458 goto set; 1440 goto set;
1459 } else if (buf_in_between(q->last_move_ftc, bufnr, count)) { 1441 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1460 if (is_qebsm(q)) { 1442 if (is_qebsm(q)) {
1461 /* partial overwrite, just update last_move_ftc */ 1443 /* partial overwrite, just update ack_start */
1462 diff = add_buf(bufnr, count); 1444 diff = add_buf(bufnr, count);
1463 diff = sub_buf(diff, q->last_move_ftc); 1445 diff = sub_buf(diff, q->u.in.ack_start);
1464 q->u.in.ack_count -= diff; 1446 q->u.in.ack_count -= diff;
1465 if (q->u.in.ack_count <= 0) { 1447 if (q->u.in.ack_count <= 0) {
1466 q->u.in.polling = 0; 1448 q->u.in.polling = 0;
1467 q->u.in.ack_count = 0; 1449 q->u.in.ack_count = 0;
1468 /* TODO: must we set last_move_ftc to something meaningful? */
1469 goto set; 1450 goto set;
1470 } 1451 }
1471 q->last_move_ftc = add_buf(q->last_move_ftc, diff); 1452 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1472 } 1453 }
1473 else 1454 else
1474 /* the only ACK will be deleted, so stop polling */ 1455 /* the only ACK will be deleted, so stop polling */
@@ -1483,13 +1464,11 @@ set:
1483 1464
1484 /* no need to signal as long as the adapter had free buffers */ 1465 /* no need to signal as long as the adapter had free buffers */
1485 if (used) 1466 if (used)
1486 return; 1467 return 0;
1487 1468
1488 if (need_siga_in(q)) { 1469 if (need_siga_in(q))
1489 cc = qdio_siga_input(q); 1470 return qdio_siga_input(q);
1490 if (cc) 1471 return 0;
1491 q->qdio_error = cc;
1492 }
1493} 1472}
1494 1473
1495/** 1474/**
@@ -1499,11 +1478,11 @@ set:
1499 * @bufnr: first buffer to process 1478 * @bufnr: first buffer to process
1500 * @count: how many buffers are filled 1479 * @count: how many buffers are filled
1501 */ 1480 */
1502static void handle_outbound(struct qdio_q *q, unsigned int callflags, 1481static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1503 int bufnr, int count) 1482 int bufnr, int count)
1504{ 1483{
1505 unsigned char state; 1484 unsigned char state;
1506 int used; 1485 int used, rc = 0;
1507 1486
1508 qdio_perf_stat_inc(&perf_stats.outbound_handler); 1487 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1509 1488
@@ -1518,27 +1497,26 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1518 1497
1519 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1498 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1520 if (multicast_outbound(q)) 1499 if (multicast_outbound(q))
1521 qdio_kick_outbound_q(q); 1500 rc = qdio_kick_outbound_q(q);
1522 else 1501 else
1523 if ((q->irq_ptr->ssqd_desc.mmwc > 1) && 1502 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1524 (count > 1) && 1503 (count > 1) &&
1525 (count <= q->irq_ptr->ssqd_desc.mmwc)) { 1504 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1526 /* exploit enhanced SIGA */ 1505 /* exploit enhanced SIGA */
1527 q->u.out.use_enh_siga = 1; 1506 q->u.out.use_enh_siga = 1;
1528 qdio_kick_outbound_q(q); 1507 rc = qdio_kick_outbound_q(q);
1529 } else { 1508 } else {
1530 /* 1509 /*
1531 * One siga-w per buffer required for unicast 1510 * One siga-w per buffer required for unicast
1532 * HiperSockets. 1511 * HiperSockets.
1533 */ 1512 */
1534 q->u.out.use_enh_siga = 0; 1513 q->u.out.use_enh_siga = 0;
1535 while (count--) 1514 while (count--) {
1536 qdio_kick_outbound_q(q); 1515 rc = qdio_kick_outbound_q(q);
1516 if (rc)
1517 goto out;
1518 }
1537 } 1519 }
1538
1539 /* report CC=2 conditions synchronously */
1540 if (q->qdio_error)
1541 __qdio_outbound_processing(q);
1542 goto out; 1520 goto out;
1543 } 1521 }
1544 1522
@@ -1550,14 +1528,14 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1550 /* try to fast requeue buffers */ 1528 /* try to fast requeue buffers */
1551 get_buf_state(q, prev_buf(bufnr), &state, 0); 1529 get_buf_state(q, prev_buf(bufnr), &state, 0);
1552 if (state != SLSB_CU_OUTPUT_PRIMED) 1530 if (state != SLSB_CU_OUTPUT_PRIMED)
1553 qdio_kick_outbound_q(q); 1531 rc = qdio_kick_outbound_q(q);
1554 else { 1532 else {
1555 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); 1533 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
1556 qdio_perf_stat_inc(&perf_stats.fast_requeue); 1534 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1557 } 1535 }
1558out: 1536out:
1559 /* Fixme: could wait forever if called from process context */
1560 tasklet_schedule(&q->tasklet); 1537 tasklet_schedule(&q->tasklet);
1538 return rc;
1561} 1539}
1562 1540
1563/** 1541/**
@@ -1596,14 +1574,12 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1596 return -EBUSY; 1574 return -EBUSY;
1597 1575
1598 if (callflags & QDIO_FLAG_SYNC_INPUT) 1576 if (callflags & QDIO_FLAG_SYNC_INPUT)
1599 handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, 1577 return handle_inbound(irq_ptr->input_qs[q_nr],
1600 count); 1578 callflags, bufnr, count);
1601 else if (callflags & QDIO_FLAG_SYNC_OUTPUT) 1579 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1602 handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr, 1580 return handle_outbound(irq_ptr->output_qs[q_nr],
1603 count); 1581 callflags, bufnr, count);
1604 else 1582 return -EINVAL;
1605 return -EINVAL;
1606 return 0;
1607} 1583}
1608EXPORT_SYMBOL_GPL(do_QDIO); 1584EXPORT_SYMBOL_GPL(do_QDIO);
1609 1585
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index c08356b95bf5..18d54fc21ce9 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -117,7 +117,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
117 q->mask = 1 << (31 - i); 117 q->mask = 1 << (31 - i);
118 q->nr = i; 118 q->nr = i;
119 q->handler = handler; 119 q->handler = handler;
120 spin_lock_init(&q->lock);
121} 120}
122 121
123static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, 122static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8e90e147b746..c655d011a78d 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -31,6 +31,7 @@
31 31
32/* list of thin interrupt input queues */ 32/* list of thin interrupt input queues */
33static LIST_HEAD(tiq_list); 33static LIST_HEAD(tiq_list);
34DEFINE_MUTEX(tiq_list_lock);
34 35
35/* adapter local summary indicator */ 36/* adapter local summary indicator */
36static unsigned char *tiqdio_alsi; 37static unsigned char *tiqdio_alsi;
@@ -95,12 +96,11 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
95 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) 96 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
96 css_qdio_omit_svs = 1; 97 css_qdio_omit_svs = 1;
97 98
98 for_each_input_queue(irq_ptr, q, i) { 99 mutex_lock(&tiq_list_lock);
100 for_each_input_queue(irq_ptr, q, i)
99 list_add_rcu(&q->entry, &tiq_list); 101 list_add_rcu(&q->entry, &tiq_list);
100 synchronize_rcu(); 102 mutex_unlock(&tiq_list_lock);
101 }
102 xchg(irq_ptr->dsci, 1); 103 xchg(irq_ptr->dsci, 1);
103 tasklet_schedule(&tiqdio_tasklet);
104} 104}
105 105
106/* 106/*
@@ -118,7 +118,10 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
118 /* if establish triggered an error */ 118 /* if establish triggered an error */
119 if (!q || !q->entry.prev || !q->entry.next) 119 if (!q || !q->entry.prev || !q->entry.next)
120 continue; 120 continue;
121
122 mutex_lock(&tiq_list_lock);
121 list_del_rcu(&q->entry); 123 list_del_rcu(&q->entry);
124 mutex_unlock(&tiq_list_lock);
122 synchronize_rcu(); 125 synchronize_rcu();
123 } 126 }
124} 127}
@@ -155,15 +158,15 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
155 */ 158 */
156 qdio_check_outbound_after_thinint(q); 159 qdio_check_outbound_after_thinint(q);
157 160
158again:
159 if (!qdio_inbound_q_moved(q)) 161 if (!qdio_inbound_q_moved(q))
160 return; 162 return;
161 163
162 qdio_kick_inbound_handler(q); 164 qdio_kick_handler(q);
163 165
164 if (!tiqdio_inbound_q_done(q)) { 166 if (!tiqdio_inbound_q_done(q)) {
165 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 167 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
166 goto again; 168 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
169 tasklet_schedule(&q->tasklet);
167 } 170 }
168 171
169 qdio_stop_polling(q); 172 qdio_stop_polling(q);
@@ -173,7 +176,8 @@ again:
173 */ 176 */
174 if (!tiqdio_inbound_q_done(q)) { 177 if (!tiqdio_inbound_q_done(q)) {
175 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 178 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
176 goto again; 179 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
180 tasklet_schedule(&q->tasklet);
177 } 181 }
178} 182}
179 183
@@ -366,10 +370,11 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
366 370
367void __exit tiqdio_unregister_thinints(void) 371void __exit tiqdio_unregister_thinints(void)
368{ 372{
369 tasklet_disable(&tiqdio_tasklet); 373 WARN_ON(!list_empty(&tiq_list));
370 374
371 if (tiqdio_alsi) { 375 if (tiqdio_alsi) {
372 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); 376 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
373 isc_unregister(QDIO_AIRQ_ISC); 377 isc_unregister(QDIO_AIRQ_ISC);
374 } 378 }
379 tasklet_kill(&tiqdio_tasklet);
375} 380}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index cb22b97944b8..65b6a96afe6b 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -128,8 +128,7 @@ static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
128 if (l == zdev->list.prev) 128 if (l == zdev->list.prev)
129 return; 129 return;
130 /* Move zdev behind l */ 130 /* Move zdev behind l */
131 list_del(&zdev->list); 131 list_move(&zdev->list, l);
132 list_add(&zdev->list, l);
133} 132}
134 133
135/** 134/**
@@ -157,8 +156,7 @@ static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
157 if (l == zdev->list.next) 156 if (l == zdev->list.next)
158 return; 157 return;
159 /* Move zdev before l */ 158 /* Move zdev before l */
160 list_del(&zdev->list); 159 list_move_tail(&zdev->list, l);
161 list_add_tail(&zdev->list, l);
162} 160}
163 161
164static void zcrypt_device_release(struct kref *kref) 162static void zcrypt_device_release(struct kref *kref)
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index e7a1e22e77ac..c20d4790258e 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -781,8 +781,7 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
781 /* Signal pending. */ 781 /* Signal pending. */
782 ap_cancel_message(zdev->ap_dev, &ap_msg); 782 ap_cancel_message(zdev->ap_dev, &ap_msg);
783out_free: 783out_free:
784 memset(ap_msg.message, 0x0, ap_msg.length); 784 kzfree(ap_msg.message);
785 kfree(ap_msg.message);
786 return rc; 785 return rc;
787} 786}
788 787
diff --git a/drivers/s390/ebcdic.c b/drivers/s390/ebcdic.c
deleted file mode 100644
index 99c98da15473..000000000000
--- a/drivers/s390/ebcdic.c
+++ /dev/null
@@ -1,246 +0,0 @@
1/*
2 * arch/s390/kernel/ebcdic.c
3 * ECBDIC -> ASCII, ASCII -> ECBDIC conversion tables.
4 *
5 * S390 version
6 * Copyright (C) 1998 IBM Corporation
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <asm/types.h>
11
12/*
13 * ASCII -> EBCDIC
14 */
15__u8 _ascebc[256] =
16{
17 /*00 NL SH SX EX ET NQ AK BL */
18 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
19 /*08 BS HT LF VT FF CR SO SI */
20 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
21 /*10 DL D1 D2 D3 D4 NK SN EB */
22 0x10, 0x11, 0x12, 0x13, 0x3C, 0x15, 0x32, 0x26,
23 /*18 CN EM SB EC FS GS RS US */
24 0x18, 0x19, 0x3F, 0x27, 0x1C, 0x1D, 0x1E, 0x1F,
25 /*20 SP ! " # $ % & ' */
26 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
27 /*28 ( ) * + , - . / */
28 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
29 /*30 0 1 2 3 4 5 6 7 */
30 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
31 /*38 8 9 : ; < = > ? */
32 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
33 /*40 @ A B C D E F G */
34 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
35 /*48 H I J K L M N O */
36 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
37 /*50 P Q R S T U V W */
38 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
39 /*58 X Y Z [ \ ] ^ _ */
40 0xE7, 0xE8, 0xE9, 0xAD, 0xE0, 0xBD, 0x5F, 0x6D,
41 /*60 ` a b c d e f g */
42 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
43 /*68 h i j k l m n o */
44 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
45 /*70 p q r s t u v w */
46 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
47 /*78 x y z { | } ~ DL */
48 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
49 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
50 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
51 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
52 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
53 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
54 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
55 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
56 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
57 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
58 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
59 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
60 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
61 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
62 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
63 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
64 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0xFF
65};
66
67/*
68 * EBCDIC -> ASCII
69 */
70__u8 _ebcasc[256] =
71{
72 /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
73 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
74 /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
75 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
76 /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
77 -ENP ->LF */
78 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
79 /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
80 -IUS */
81 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
82 /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
83 -INP */
84 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
85 /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
86 -SW */
87 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
88 /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
89 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
90 /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
91 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
92 /* 0x40 SP RSP ä ---- */
93 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
94 /* 0x48 . < ( + | */
95 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
96 /* 0x50 & ---- */
97 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
98 /* 0x58 ß ! $ * ) ; */
99 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
100 /* 0x60 - / ---- Ä ---- ---- ---- */
101 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
102 /* 0x68 ---- , % _ > ? */
103 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
104 /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
105 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
106 /* 0x78 * ` : # @ ' = " */
107 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
108 /* 0x80 * a b c d e f g */
109 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
110 /* 0x88 h i ---- ---- ---- */
111 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
112 /* 0x90 ° j k l m n o p */
113 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
114 /* 0x98 q r ---- ---- */
115 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
116 /* 0xA0 ~ s t u v w x */
117 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
118 /* 0xA8 y z ---- ---- ---- ---- */
119 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
120 /* 0xB0 ^ ---- § ---- */
121 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
122 /* 0xB8 ---- [ ] ---- ---- ---- ---- */
123 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
124 /* 0xC0 { A B C D E F G */
125 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
126 /* 0xC8 H I ---- ö ---- */
127 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
128 /* 0xD0 } J K L M N O P */
129 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
130 /* 0xD8 Q R ---- ü */
131 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
132 /* 0xE0 \ S T U V W X */
133 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
134 /* 0xE8 Y Z ---- Ö ---- ---- ---- */
135 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
136 /* 0xF0 0 1 2 3 4 5 6 7 */
137 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
138 /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
139 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
140};
141
142/*
143 * EBCDIC (capitals) -> ASCII (small case)
144 */
145__u8 _ebcasc_reduce_case[256] =
146{
147 /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
148 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
149
150 /* 0x08 -GE -SPS -RPT VT FF CR SO SI */
151 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
152
153 /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
154 -ENP ->LF */
155 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
156
157 /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
158 -IUS */
159 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
160
161 /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
162 -INP */
163 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
164
165 /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
166 -SW */
167 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
168
169 /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
170 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
171
172 /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
173 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
174
175 /* 0x40 SP RSP ä ---- */
176 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
177
178 /* 0x48 . < ( + | */
179 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
180
181 /* 0x50 & ---- */
182 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
183
184 /* 0x58 ß ! $ * ) ; */
185 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
186
187 /* 0x60 - / ---- Ä ---- ---- ---- */
188 0x2D, 0x2F, 0x07, 0x84, 0x07, 0x07, 0x07, 0x8F,
189
190 /* 0x68 ---- , % _ > ? */
191 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
192
193 /* 0x70 ---- ---- ---- ---- ---- ---- ---- */
194 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
195
196 /* 0x78 * ` : # @ ' = " */
197 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
198
199 /* 0x80 * a b c d e f g */
200 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
201
202 /* 0x88 h i ---- ---- ---- */
203 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
204
205 /* 0x90 ° j k l m n o p */
206 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
207
208 /* 0x98 q r ---- ---- */
209 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
210
211 /* 0xA0 ~ s t u v w x */
212 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
213
214 /* 0xA8 y z ---- ---- ---- ---- */
215 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
216
217 /* 0xB0 ^ ---- § ---- */
218 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
219
220 /* 0xB8 ---- [ ] ---- ---- ---- ---- */
221 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
222
223 /* 0xC0 { A B C D E F G */
224 0x7B, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
225
226 /* 0xC8 H I ---- ö ---- */
227 0x68, 0x69, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
228
229 /* 0xD0 } J K L M N O P */
230 0x7D, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
231
232 /* 0xD8 Q R ---- ü */
233 0x71, 0x72, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
234
235 /* 0xE0 \ S T U V W X */
236 0x5C, 0xF6, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
237
238 /* 0xE8 Y Z ---- Ö ---- ---- ---- */
239 0x79, 0x7A, 0xFD, 0x07, 0x94, 0x07, 0x07, 0x07,
240
241 /* 0xF0 0 1 2 3 4 5 6 7 */
242 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
243
244 /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */
245 0x38, 0x39, 0x07, 0x07, 0x81, 0x07, 0x07, 0x07
246};
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6fec3cfcf978..c827d69b5a91 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2680,40 +2680,21 @@ static int qeth_handle_send_error(struct qeth_card *card,
2680 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 2680 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2681{ 2681{
2682 int sbalf15 = buffer->buffer->element[15].flags & 0xff; 2682 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2683 int cc = qdio_err & 3;
2684 2683
2685 QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); 2684 QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
2686 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr"); 2685 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
2687 switch (cc) { 2686
2688 case 0: 2687 if (!qdio_err)
2689 if (qdio_err) {
2690 QETH_DBF_TEXT(TRACE, 1, "lnkfail");
2691 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2692 QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
2693 (u16)qdio_err, (u8)sbalf15);
2694 return QETH_SEND_ERROR_LINK_FAILURE;
2695 }
2696 return QETH_SEND_ERROR_NONE; 2688 return QETH_SEND_ERROR_NONE;
2697 case 2: 2689
2698 if (qdio_err & QDIO_ERROR_SIGA_BUSY) { 2690 if ((sbalf15 >= 15) && (sbalf15 <= 31))
2699 QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B"); 2691 return QETH_SEND_ERROR_RETRY;
2700 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 2692
2701 return QETH_SEND_ERROR_KICK_IT; 2693 QETH_DBF_TEXT(TRACE, 1, "lnkfail");
2702 } 2694 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2703 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 2695 QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
2704 return QETH_SEND_ERROR_RETRY; 2696 (u16)qdio_err, (u8)sbalf15);
2705 return QETH_SEND_ERROR_LINK_FAILURE; 2697 return QETH_SEND_ERROR_LINK_FAILURE;
2706 /* look at qdio_error and sbalf 15 */
2707 case 1:
2708 QETH_DBF_TEXT(TRACE, 1, "SIGAcc1");
2709 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2710 return QETH_SEND_ERROR_LINK_FAILURE;
2711 case 3:
2712 default:
2713 QETH_DBF_TEXT(TRACE, 1, "SIGAcc3");
2714 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2715 return QETH_SEND_ERROR_KICK_IT;
2716 }
2717} 2698}
2718 2699
2719/* 2700/*
@@ -2849,10 +2830,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2849 qeth_get_micros() - 2830 qeth_get_micros() -
2850 queue->card->perf_stats.outbound_do_qdio_start_time; 2831 queue->card->perf_stats.outbound_do_qdio_start_time;
2851 if (rc) { 2832 if (rc) {
2833 queue->card->stats.tx_errors += count;
2834 /* ignore temporary SIGA errors without busy condition */
2835 if (rc == QDIO_ERROR_SIGA_TARGET)
2836 return;
2852 QETH_DBF_TEXT(TRACE, 2, "flushbuf"); 2837 QETH_DBF_TEXT(TRACE, 2, "flushbuf");
2853 QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); 2838 QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
2854 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card)); 2839 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
2855 queue->card->stats.tx_errors += count; 2840
2856 /* this must not happen under normal circumstances. if it 2841 /* this must not happen under normal circumstances. if it
2857 * happens something is really wrong -> recover */ 2842 * happens something is really wrong -> recover */
2858 qeth_schedule_recovery(queue->card); 2843 qeth_schedule_recovery(queue->card);
@@ -2927,13 +2912,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2927 } 2912 }
2928 for (i = first_element; i < (first_element + count); ++i) { 2913 for (i = first_element; i < (first_element + count); ++i) {
2929 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2914 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2930 /*we only handle the KICK_IT error by doing a recovery */ 2915 qeth_handle_send_error(card, buffer, qdio_error);
2931 if (qeth_handle_send_error(card, buffer, qdio_error)
2932 == QETH_SEND_ERROR_KICK_IT){
2933 netif_stop_queue(card->dev);
2934 qeth_schedule_recovery(card);
2935 return;
2936 }
2937 qeth_clear_output_buffer(queue, buffer); 2916 qeth_clear_output_buffer(queue, buffer);
2938 } 2917 }
2939 atomic_sub(count, &queue->used_buffers); 2918 atomic_sub(count, &queue->used_buffers);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
deleted file mode 100644
index 92b0417f8e12..000000000000
--- a/drivers/s390/s390mach.c
+++ /dev/null
@@ -1,538 +0,0 @@
1/*
2 * drivers/s390/s390mach.c
3 * S/390 machine check handler
4 *
5 * Copyright IBM Corp. 2000,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/workqueue.h>
15#include <linux/time.h>
16#include <linux/device.h>
17#include <linux/kthread.h>
18#include <asm/etr.h>
19#include <asm/lowcore.h>
20#include <asm/cio.h>
21#include <asm/cpu.h>
22#include "s390mach.h"
23
24static struct semaphore m_sem;
25
26static NORET_TYPE void
27s390_handle_damage(char *msg)
28{
29#ifdef CONFIG_SMP
30 smp_send_stop();
31#endif
32 disabled_wait((unsigned long) __builtin_return_address(0));
33 for(;;);
34}
35
36static crw_handler_t crw_handlers[NR_RSCS];
37
38/**
39 * s390_register_crw_handler() - register a channel report word handler
40 * @rsc: reporting source code to handle
41 * @handler: handler to be registered
42 *
43 * Returns %0 on success and a negative error value otherwise.
44 */
45int s390_register_crw_handler(int rsc, crw_handler_t handler)
46{
47 if ((rsc < 0) || (rsc >= NR_RSCS))
48 return -EINVAL;
49 if (!cmpxchg(&crw_handlers[rsc], NULL, handler))
50 return 0;
51 return -EBUSY;
52}
53
54/**
55 * s390_unregister_crw_handler() - unregister a channel report word handler
56 * @rsc: reporting source code to handle
57 */
58void s390_unregister_crw_handler(int rsc)
59{
60 if ((rsc < 0) || (rsc >= NR_RSCS))
61 return;
62 xchg(&crw_handlers[rsc], NULL);
63 synchronize_sched();
64}
65
66/*
67 * Retrieve CRWs and call function to handle event.
68 */
69static int s390_collect_crw_info(void *param)
70{
71 struct crw crw[2];
72 int ccode;
73 struct semaphore *sem;
74 unsigned int chain;
75 int ignore;
76
77 sem = (struct semaphore *)param;
78repeat:
79 ignore = down_interruptible(sem);
80 chain = 0;
81 while (1) {
82 if (unlikely(chain > 1)) {
83 struct crw tmp_crw;
84
85 printk(KERN_WARNING"%s: Code does not support more "
86 "than two chained crws; please report to "
87 "linux390@de.ibm.com!\n", __func__);
88 ccode = stcrw(&tmp_crw);
89 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
90 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
91 __func__, tmp_crw.slct, tmp_crw.oflw,
92 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
93 tmp_crw.erc, tmp_crw.rsid);
94 printk(KERN_WARNING"%s: This was crw number %x in the "
95 "chain\n", __func__, chain);
96 if (ccode != 0)
97 break;
98 chain = tmp_crw.chn ? chain + 1 : 0;
99 continue;
100 }
101 ccode = stcrw(&crw[chain]);
102 if (ccode != 0)
103 break;
104 printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
105 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
106 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
107 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
108 crw[chain].rsid);
109 /* Check for overflows. */
110 if (crw[chain].oflw) {
111 int i;
112
113 pr_debug("%s: crw overflow detected!\n", __func__);
114 for (i = 0; i < NR_RSCS; i++) {
115 if (crw_handlers[i])
116 crw_handlers[i](NULL, NULL, 1);
117 }
118 chain = 0;
119 continue;
120 }
121 if (crw[0].chn && !chain) {
122 chain++;
123 continue;
124 }
125 if (crw_handlers[crw[chain].rsc])
126 crw_handlers[crw[chain].rsc](&crw[0],
127 chain ? &crw[1] : NULL,
128 0);
129 /* chain is always 0 or 1 here. */
130 chain = crw[chain].chn ? chain + 1 : 0;
131 }
132 goto repeat;
133 return 0;
134}
135
136struct mcck_struct {
137 int kill_task;
138 int channel_report;
139 int warning;
140 unsigned long long mcck_code;
141};
142
143static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
144
145/*
146 * Main machine check handler function. Will be called with interrupts enabled
147 * or disabled and machine checks enabled or disabled.
148 */
149void
150s390_handle_mcck(void)
151{
152 unsigned long flags;
153 struct mcck_struct mcck;
154
155 /*
156 * Disable machine checks and get the current state of accumulated
157 * machine checks. Afterwards delete the old state and enable machine
158 * checks again.
159 */
160 local_irq_save(flags);
161 local_mcck_disable();
162 mcck = __get_cpu_var(cpu_mcck);
163 memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct));
164 clear_thread_flag(TIF_MCCK_PENDING);
165 local_mcck_enable();
166 local_irq_restore(flags);
167
168 if (mcck.channel_report)
169 up(&m_sem);
170
171#ifdef CONFIG_MACHCHK_WARNING
172/*
173 * The warning may remain for a prolonged period on the bare iron.
174 * (actually till the machine is powered off, or until the problem is gone)
175 * So we just stop listening for the WARNING MCH and prevent continuously
176 * being interrupted. One caveat is however, that we must do this per
177 * processor and cannot use the smp version of ctl_clear_bit().
178 * On VM we only get one interrupt per virtally presented machinecheck.
179 * Though one suffices, we may get one interrupt per (virtual) processor.
180 */
181 if (mcck.warning) { /* WARNING pending ? */
182 static int mchchk_wng_posted = 0;
183 /*
184 * Use single machine clear, as we cannot handle smp right now
185 */
186 __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
187 if (xchg(&mchchk_wng_posted, 1) == 0)
188 kill_cad_pid(SIGPWR, 1);
189 }
190#endif
191
192 if (mcck.kill_task) {
193 local_irq_enable();
194 printk(KERN_EMERG "mcck: Terminating task because of machine "
195 "malfunction (code 0x%016llx).\n", mcck.mcck_code);
196 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
197 current->comm, current->pid);
198 do_exit(SIGSEGV);
199 }
200}
201EXPORT_SYMBOL_GPL(s390_handle_mcck);
202
203/*
204 * returns 0 if all registers could be validated
205 * returns 1 otherwise
206 */
207static int
208s390_revalidate_registers(struct mci *mci)
209{
210 int kill_task;
211 u64 tmpclock;
212 u64 zero;
213 void *fpt_save_area, *fpt_creg_save_area;
214
215 kill_task = 0;
216 zero = 0;
217 /* General purpose registers */
218 if (!mci->gr)
219 /*
220 * General purpose registers couldn't be restored and have
221 * unknown contents. Process needs to be terminated.
222 */
223 kill_task = 1;
224
225 /* Revalidate floating point registers */
226 if (!mci->fp)
227 /*
228 * Floating point registers can't be restored and
229 * therefore the process needs to be terminated.
230 */
231 kill_task = 1;
232
233#ifndef CONFIG_64BIT
234 asm volatile(
235 " ld 0,0(%0)\n"
236 " ld 2,8(%0)\n"
237 " ld 4,16(%0)\n"
238 " ld 6,24(%0)"
239 : : "a" (&S390_lowcore.floating_pt_save_area));
240#endif
241
242 if (MACHINE_HAS_IEEE) {
243#ifdef CONFIG_64BIT
244 fpt_save_area = &S390_lowcore.floating_pt_save_area;
245 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
246#else
247 fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
248 fpt_creg_save_area = fpt_save_area+128;
249#endif
250 /* Floating point control register */
251 if (!mci->fc) {
252 /*
253 * Floating point control register can't be restored.
254 * Task will be terminated.
255 */
256 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
257 kill_task = 1;
258
259 } else
260 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
261
262 asm volatile(
263 " ld 0,0(%0)\n"
264 " ld 1,8(%0)\n"
265 " ld 2,16(%0)\n"
266 " ld 3,24(%0)\n"
267 " ld 4,32(%0)\n"
268 " ld 5,40(%0)\n"
269 " ld 6,48(%0)\n"
270 " ld 7,56(%0)\n"
271 " ld 8,64(%0)\n"
272 " ld 9,72(%0)\n"
273 " ld 10,80(%0)\n"
274 " ld 11,88(%0)\n"
275 " ld 12,96(%0)\n"
276 " ld 13,104(%0)\n"
277 " ld 14,112(%0)\n"
278 " ld 15,120(%0)\n"
279 : : "a" (fpt_save_area));
280 }
281
282 /* Revalidate access registers */
283 asm volatile(
284 " lam 0,15,0(%0)"
285 : : "a" (&S390_lowcore.access_regs_save_area));
286 if (!mci->ar)
287 /*
288 * Access registers have unknown contents.
289 * Terminating task.
290 */
291 kill_task = 1;
292
293 /* Revalidate control registers */
294 if (!mci->cr)
295 /*
296 * Control registers have unknown contents.
297 * Can't recover and therefore stopping machine.
298 */
299 s390_handle_damage("invalid control registers.");
300 else
301#ifdef CONFIG_64BIT
302 asm volatile(
303 " lctlg 0,15,0(%0)"
304 : : "a" (&S390_lowcore.cregs_save_area));
305#else
306 asm volatile(
307 " lctl 0,15,0(%0)"
308 : : "a" (&S390_lowcore.cregs_save_area));
309#endif
310
311 /*
312 * We don't even try to revalidate the TOD register, since we simply
313 * can't write something sensible into that register.
314 */
315
316#ifdef CONFIG_64BIT
317 /*
318 * See if we can revalidate the TOD programmable register with its
319 * old contents (should be zero) otherwise set it to zero.
320 */
321 if (!mci->pr)
322 asm volatile(
323 " sr 0,0\n"
324 " sckpf"
325 : : : "0", "cc");
326 else
327 asm volatile(
328 " l 0,0(%0)\n"
329 " sckpf"
330 : : "a" (&S390_lowcore.tod_progreg_save_area)
331 : "0", "cc");
332#endif
333
334 /* Revalidate clock comparator register */
335 asm volatile(
336 " stck 0(%1)\n"
337 " sckc 0(%1)"
338 : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
339
340 /* Check if old PSW is valid */
341 if (!mci->wp)
342 /*
343 * Can't tell if we come from user or kernel mode
344 * -> stopping machine.
345 */
346 s390_handle_damage("old psw invalid.");
347
348 if (!mci->ms || !mci->pm || !mci->ia)
349 kill_task = 1;
350
351 return kill_task;
352}
353
354#define MAX_IPD_COUNT 29
355#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
356
357/*
358 * machine check handler.
359 */
360void
361s390_do_machine_check(struct pt_regs *regs)
362{
363 static DEFINE_SPINLOCK(ipd_lock);
364 static unsigned long long last_ipd;
365 static int ipd_count;
366 unsigned long long tmp;
367 struct mci *mci;
368 struct mcck_struct *mcck;
369 int umode;
370
371 lockdep_off();
372
373 s390_idle_check();
374
375 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
376 mcck = &__get_cpu_var(cpu_mcck);
377 umode = user_mode(regs);
378
379 if (mci->sd)
380 /* System damage -> stopping machine */
381 s390_handle_damage("received system damage machine check.");
382
383 if (mci->pd) {
384 if (mci->b) {
385 /* Processing backup -> verify if we can survive this */
386 u64 z_mcic, o_mcic, t_mcic;
387#ifdef CONFIG_64BIT
388 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
389 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
390 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
391 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
392 1ULL<<16);
393#else
394 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
395 1ULL<<29);
396 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
397 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
398 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
399#endif
400 t_mcic = *(u64 *)mci;
401
402 if (((t_mcic & z_mcic) != 0) ||
403 ((t_mcic & o_mcic) != o_mcic)) {
404 s390_handle_damage("processing backup machine "
405 "check with damage.");
406 }
407
408 /*
409 * Nullifying exigent condition, therefore we might
410 * retry this instruction.
411 */
412
413 spin_lock(&ipd_lock);
414
415 tmp = get_clock();
416
417 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
418 ipd_count++;
419 else
420 ipd_count = 1;
421
422 last_ipd = tmp;
423
424 if (ipd_count == MAX_IPD_COUNT)
425 s390_handle_damage("too many ipd retries.");
426
427 spin_unlock(&ipd_lock);
428 }
429 else {
430 /* Processing damage -> stopping machine */
431 s390_handle_damage("received instruction processing "
432 "damage machine check.");
433 }
434 }
435 if (s390_revalidate_registers(mci)) {
436 if (umode) {
437 /*
438 * Couldn't restore all register contents while in
439 * user mode -> mark task for termination.
440 */
441 mcck->kill_task = 1;
442 mcck->mcck_code = *(unsigned long long *) mci;
443 set_thread_flag(TIF_MCCK_PENDING);
444 }
445 else
446 /*
447 * Couldn't restore all register contents while in
448 * kernel mode -> stopping machine.
449 */
450 s390_handle_damage("unable to revalidate registers.");
451 }
452
453 if (mci->cd) {
454 /* Timing facility damage */
455 s390_handle_damage("TOD clock damaged");
456 }
457
458 if (mci->ed && mci->ec) {
459 /* External damage */
460 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
461 etr_sync_check();
462 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
463 etr_switch_to_local();
464 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
465 stp_sync_check();
466 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
467 stp_island_check();
468 }
469
470 if (mci->se)
471 /* Storage error uncorrected */
472 s390_handle_damage("received storage error uncorrected "
473 "machine check.");
474
475 if (mci->ke)
476 /* Storage key-error uncorrected */
477 s390_handle_damage("received storage key-error uncorrected "
478 "machine check.");
479
480 if (mci->ds && mci->fa)
481 /* Storage degradation */
482 s390_handle_damage("received storage degradation machine "
483 "check.");
484
485 if (mci->cp) {
486 /* Channel report word pending */
487 mcck->channel_report = 1;
488 set_thread_flag(TIF_MCCK_PENDING);
489 }
490
491 if (mci->w) {
492 /* Warning pending */
493 mcck->warning = 1;
494 set_thread_flag(TIF_MCCK_PENDING);
495 }
496 lockdep_on();
497}
498
499/*
500 * s390_init_machine_check
501 *
502 * initialize machine check handling
503 */
504static int
505machine_check_init(void)
506{
507 init_MUTEX_LOCKED(&m_sem);
508 ctl_set_bit(14, 25); /* enable external damage MCH */
509 ctl_set_bit(14, 27); /* enable system recovery MCH */
510#ifdef CONFIG_MACHCHK_WARNING
511 ctl_set_bit(14, 24); /* enable warning MCH */
512#endif
513 return 0;
514}
515
516/*
517 * Initialize the machine check handler really early to be able to
518 * catch all machine checks that happen during boot
519 */
520arch_initcall(machine_check_init);
521
522/*
523 * Machine checks for the channel subsystem must be enabled
524 * after the channel subsystem is initialized
525 */
526static int __init
527machine_check_crw_init (void)
528{
529 struct task_struct *task;
530
531 task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
532 if (IS_ERR(task))
533 return PTR_ERR(task);
534 ctl_set_bit(14, 28); /* enable channel report MCH */
535 return 0;
536}
537
538device_initcall (machine_check_crw_init);
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
deleted file mode 100644
index d39f8b697d27..000000000000
--- a/drivers/s390/s390mach.h
+++ /dev/null
@@ -1,122 +0,0 @@
1/*
2 * drivers/s390/s390mach.h
3 * S/390 data definitions for machine check processing
4 *
5 * S390 version
6 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 */
9
10#ifndef __s390mach_h
11#define __s390mach_h
12
13#include <asm/types.h>
14
15struct mci {
16 __u32 sd : 1; /* 00 system damage */
17 __u32 pd : 1; /* 01 instruction-processing damage */
18 __u32 sr : 1; /* 02 system recovery */
19 __u32 to_be_defined_1 : 1; /* 03 */
20 __u32 cd : 1; /* 04 timing-facility damage */
21 __u32 ed : 1; /* 05 external damage */
22 __u32 to_be_defined_2 : 1; /* 06 */
23 __u32 dg : 1; /* 07 degradation */
24 __u32 w : 1; /* 08 warning pending */
25 __u32 cp : 1; /* 09 channel-report pending */
26 __u32 sp : 1; /* 10 service-processor damage */
27 __u32 ck : 1; /* 11 channel-subsystem damage */
28 __u32 to_be_defined_3 : 2; /* 12-13 */
29 __u32 b : 1; /* 14 backed up */
30 __u32 to_be_defined_4 : 1; /* 15 */
31 __u32 se : 1; /* 16 storage error uncorrected */
32 __u32 sc : 1; /* 17 storage error corrected */
33 __u32 ke : 1; /* 18 storage-key error uncorrected */
34 __u32 ds : 1; /* 19 storage degradation */
35 __u32 wp : 1; /* 20 psw mwp validity */
36 __u32 ms : 1; /* 21 psw mask and key validity */
37 __u32 pm : 1; /* 22 psw program mask and cc validity */
38 __u32 ia : 1; /* 23 psw instruction address validity */
39 __u32 fa : 1; /* 24 failing storage address validity */
40 __u32 to_be_defined_5 : 1; /* 25 */
41 __u32 ec : 1; /* 26 external damage code validity */
42 __u32 fp : 1; /* 27 floating point register validity */
43 __u32 gr : 1; /* 28 general register validity */
44 __u32 cr : 1; /* 29 control register validity */
45 __u32 to_be_defined_6 : 1; /* 30 */
46 __u32 st : 1; /* 31 storage logical validity */
47 __u32 ie : 1; /* 32 indirect storage error */
48 __u32 ar : 1; /* 33 access register validity */
49 __u32 da : 1; /* 34 delayed access exception */
50 __u32 to_be_defined_7 : 7; /* 35-41 */
51 __u32 pr : 1; /* 42 tod programmable register validity */
52 __u32 fc : 1; /* 43 fp control register validity */
53 __u32 ap : 1; /* 44 ancillary report */
54 __u32 to_be_defined_8 : 1; /* 45 */
55 __u32 ct : 1; /* 46 cpu timer validity */
56 __u32 cc : 1; /* 47 clock comparator validity */
57 __u32 to_be_defined_9 : 16; /* 47-63 */
58};
59
60/*
61 * Channel Report Word
62 */
63struct crw {
64 __u32 res1 : 1; /* reserved zero */
65 __u32 slct : 1; /* solicited */
66 __u32 oflw : 1; /* overflow */
67 __u32 chn : 1; /* chained */
68 __u32 rsc : 4; /* reporting source code */
69 __u32 anc : 1; /* ancillary report */
70 __u32 res2 : 1; /* reserved zero */
71 __u32 erc : 6; /* error-recovery code */
72 __u32 rsid : 16; /* reporting-source ID */
73} __attribute__ ((packed));
74
75typedef void (*crw_handler_t)(struct crw *, struct crw *, int);
76
77extern int s390_register_crw_handler(int rsc, crw_handler_t handler);
78extern void s390_unregister_crw_handler(int rsc);
79
80#define NR_RSCS 16
81
82#define CRW_RSC_MONITOR 0x2 /* monitoring facility */
83#define CRW_RSC_SCH 0x3 /* subchannel */
84#define CRW_RSC_CPATH 0x4 /* channel path */
85#define CRW_RSC_CONFIG 0x9 /* configuration-alert facility */
86#define CRW_RSC_CSS 0xB /* channel subsystem */
87
88#define CRW_ERC_EVENT 0x00 /* event information pending */
89#define CRW_ERC_AVAIL 0x01 /* available */
90#define CRW_ERC_INIT 0x02 /* initialized */
91#define CRW_ERC_TERROR 0x03 /* temporary error */
92#define CRW_ERC_IPARM 0x04 /* installed parm initialized */
93#define CRW_ERC_TERM 0x05 /* terminal */
94#define CRW_ERC_PERRN 0x06 /* perm. error, fac. not init */
95#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */
96#define CRW_ERC_PMOD 0x08 /* installed parameters modified */
97
98static inline int stcrw(struct crw *pcrw )
99{
100 int ccode;
101
102 __asm__ __volatile__(
103 "stcrw 0(%2)\n\t"
104 "ipm %0\n\t"
105 "srl %0,28\n\t"
106 : "=d" (ccode), "=m" (*pcrw)
107 : "a" (pcrw)
108 : "cc" );
109 return ccode;
110}
111
112#define ED_ETR_SYNC 12 /* External damage ETR sync check */
113#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
114
115#define ED_STP_SYNC 7 /* External damage STP sync check */
116#define ED_STP_ISLAND 6 /* External damage STP island check */
117
118struct pt_regs;
119
120void s390_handle_mcck(void);
121void s390_do_machine_check(struct pt_regs *regs);
122#endif /* __s390mach */
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
deleted file mode 100644
index 0eea90781385..000000000000
--- a/drivers/s390/sysinfo.c
+++ /dev/null
@@ -1,469 +0,0 @@
1/*
2 * drivers/s390/sysinfo.c
3 *
4 * Copyright IBM Corp. 2001, 2008
5 * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com)
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/proc_fs.h>
12#include <linux/seq_file.h>
13#include <linux/init.h>
14#include <linux/delay.h>
15#include <linux/module.h>
16#include <asm/ebcdic.h>
17#include <asm/sysinfo.h>
18#include <asm/cpcmd.h>
19
20/* Sigh, math-emu. Don't ask. */
21#include <asm/sfp-util.h>
22#include <math-emu/soft-fp.h>
23#include <math-emu/single.h>
24
25static inline int stsi_0(void)
26{
27 int rc = stsi (NULL, 0, 0, 0);
28 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
29}
30
31static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
32{
33 if (stsi(info, 1, 1, 1) == -ENOSYS)
34 return len;
35
36 EBCASC(info->manufacturer, sizeof(info->manufacturer));
37 EBCASC(info->type, sizeof(info->type));
38 EBCASC(info->model, sizeof(info->model));
39 EBCASC(info->sequence, sizeof(info->sequence));
40 EBCASC(info->plant, sizeof(info->plant));
41 EBCASC(info->model_capacity, sizeof(info->model_capacity));
42 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
43 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
44 len += sprintf(page + len, "Manufacturer: %-16.16s\n",
45 info->manufacturer);
46 len += sprintf(page + len, "Type: %-4.4s\n",
47 info->type);
48 if (info->model[0] != '\0')
49 /*
50 * Sigh: the model field has been renamed with System z9
51 * to model_capacity and a new model field has been added
52 * after the plant field. To avoid confusing older programs
53 * the "Model:" prints "model_capacity model" or just
54 * "model_capacity" if the model string is empty .
55 */
56 len += sprintf(page + len,
57 "Model: %-16.16s %-16.16s\n",
58 info->model_capacity, info->model);
59 else
60 len += sprintf(page + len, "Model: %-16.16s\n",
61 info->model_capacity);
62 len += sprintf(page + len, "Sequence Code: %-16.16s\n",
63 info->sequence);
64 len += sprintf(page + len, "Plant: %-4.4s\n",
65 info->plant);
66 len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n",
67 info->model_capacity, *(u32 *) info->model_cap_rating);
68 if (info->model_perm_cap[0] != '\0')
69 len += sprintf(page + len,
70 "Model Perm. Capacity: %-16.16s %08u\n",
71 info->model_perm_cap,
72 *(u32 *) info->model_perm_cap_rating);
73 if (info->model_temp_cap[0] != '\0')
74 len += sprintf(page + len,
75 "Model Temp. Capacity: %-16.16s %08u\n",
76 info->model_temp_cap,
77 *(u32 *) info->model_temp_cap_rating);
78 return len;
79}
80
81#if 0 /* Currently unused */
82static int stsi_1_2_1(struct sysinfo_1_2_1 *info, char *page, int len)
83{
84 if (stsi(info, 1, 2, 1) == -ENOSYS)
85 return len;
86
87 len += sprintf(page + len, "\n");
88 EBCASC(info->sequence, sizeof(info->sequence));
89 EBCASC(info->plant, sizeof(info->plant));
90 len += sprintf(page + len, "Sequence Code of CPU: %-16.16s\n",
91 info->sequence);
92 len += sprintf(page + len, "Plant of CPU: %-16.16s\n",
93 info->plant);
94 return len;
95}
96#endif
97
98static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
99{
100 struct sysinfo_1_2_2_extension *ext;
101 int i;
102
103 if (stsi(info, 1, 2, 2) == -ENOSYS)
104 return len;
105 ext = (struct sysinfo_1_2_2_extension *)
106 ((unsigned long) info + info->acc_offset);
107
108 len += sprintf(page + len, "\n");
109 len += sprintf(page + len, "CPUs Total: %d\n",
110 info->cpus_total);
111 len += sprintf(page + len, "CPUs Configured: %d\n",
112 info->cpus_configured);
113 len += sprintf(page + len, "CPUs Standby: %d\n",
114 info->cpus_standby);
115 len += sprintf(page + len, "CPUs Reserved: %d\n",
116 info->cpus_reserved);
117
118 if (info->format == 1) {
119 /*
120 * Sigh 2. According to the specification the alternate
121 * capability field is a 32 bit floating point number
122 * if the higher order 8 bits are not zero. Printing
123 * a floating point number in the kernel is a no-no,
124 * always print the number as 32 bit unsigned integer.
125 * The user-space needs to know about the strange
126 * encoding of the alternate cpu capability.
127 */
128 len += sprintf(page + len, "Capability: %u %u\n",
129 info->capability, ext->alt_capability);
130 for (i = 2; i <= info->cpus_total; i++)
131 len += sprintf(page + len,
132 "Adjustment %02d-way: %u %u\n",
133 i, info->adjustment[i-2],
134 ext->alt_adjustment[i-2]);
135
136 } else {
137 len += sprintf(page + len, "Capability: %u\n",
138 info->capability);
139 for (i = 2; i <= info->cpus_total; i++)
140 len += sprintf(page + len,
141 "Adjustment %02d-way: %u\n",
142 i, info->adjustment[i-2]);
143 }
144
145 if (info->secondary_capability != 0)
146 len += sprintf(page + len, "Secondary Capability: %d\n",
147 info->secondary_capability);
148
149 return len;
150}
151
152#if 0 /* Currently unused */
153static int stsi_2_2_1(struct sysinfo_2_2_1 *info, char *page, int len)
154{
155 if (stsi(info, 2, 2, 1) == -ENOSYS)
156 return len;
157
158 len += sprintf(page + len, "\n");
159 EBCASC (info->sequence, sizeof(info->sequence));
160 EBCASC (info->plant, sizeof(info->plant));
161 len += sprintf(page + len, "Sequence Code of logical CPU: %-16.16s\n",
162 info->sequence);
163 len += sprintf(page + len, "Plant of logical CPU: %-16.16s\n",
164 info->plant);
165 return len;
166}
167#endif
168
169static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len)
170{
171 if (stsi(info, 2, 2, 2) == -ENOSYS)
172 return len;
173
174 EBCASC (info->name, sizeof(info->name));
175
176 len += sprintf(page + len, "\n");
177 len += sprintf(page + len, "LPAR Number: %d\n",
178 info->lpar_number);
179
180 len += sprintf(page + len, "LPAR Characteristics: ");
181 if (info->characteristics & LPAR_CHAR_DEDICATED)
182 len += sprintf(page + len, "Dedicated ");
183 if (info->characteristics & LPAR_CHAR_SHARED)
184 len += sprintf(page + len, "Shared ");
185 if (info->characteristics & LPAR_CHAR_LIMITED)
186 len += sprintf(page + len, "Limited ");
187 len += sprintf(page + len, "\n");
188
189 len += sprintf(page + len, "LPAR Name: %-8.8s\n",
190 info->name);
191
192 len += sprintf(page + len, "LPAR Adjustment: %d\n",
193 info->caf);
194
195 len += sprintf(page + len, "LPAR CPUs Total: %d\n",
196 info->cpus_total);
197 len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
198 info->cpus_configured);
199 len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
200 info->cpus_standby);
201 len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
202 info->cpus_reserved);
203 len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
204 info->cpus_dedicated);
205 len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
206 info->cpus_shared);
207 return len;
208}
209
210static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len)
211{
212 int i;
213
214 if (stsi(info, 3, 2, 2) == -ENOSYS)
215 return len;
216 for (i = 0; i < info->count; i++) {
217 EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
218 EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
219 len += sprintf(page + len, "\n");
220 len += sprintf(page + len, "VM%02d Name: %-8.8s\n",
221 i, info->vm[i].name);
222 len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n",
223 i, info->vm[i].cpi);
224
225 len += sprintf(page + len, "VM%02d Adjustment: %d\n",
226 i, info->vm[i].caf);
227
228 len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
229 i, info->vm[i].cpus_total);
230 len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
231 i, info->vm[i].cpus_configured);
232 len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
233 i, info->vm[i].cpus_standby);
234 len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
235 i, info->vm[i].cpus_reserved);
236 }
237 return len;
238}
239
240
241static int proc_read_sysinfo(char *page, char **start,
242 off_t off, int count,
243 int *eof, void *data)
244{
245 unsigned long info = get_zeroed_page (GFP_KERNEL);
246 int level, len;
247
248 if (!info)
249 return 0;
250
251 len = 0;
252 level = stsi_0();
253 if (level >= 1)
254 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
255
256 if (level >= 1)
257 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
258
259 if (level >= 2)
260 len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len);
261
262 if (level >= 3)
263 len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len);
264
265 free_page (info);
266 return len;
267}
268
269static __init int create_proc_sysinfo(void)
270{
271 create_proc_read_entry("sysinfo", 0444, NULL,
272 proc_read_sysinfo, NULL);
273 return 0;
274}
275
276__initcall(create_proc_sysinfo);
277
278/*
279 * Service levels interface.
280 */
281
282static DECLARE_RWSEM(service_level_sem);
283static LIST_HEAD(service_level_list);
284
285int register_service_level(struct service_level *slr)
286{
287 struct service_level *ptr;
288
289 down_write(&service_level_sem);
290 list_for_each_entry(ptr, &service_level_list, list)
291 if (ptr == slr) {
292 up_write(&service_level_sem);
293 return -EEXIST;
294 }
295 list_add_tail(&slr->list, &service_level_list);
296 up_write(&service_level_sem);
297 return 0;
298}
299EXPORT_SYMBOL(register_service_level);
300
301int unregister_service_level(struct service_level *slr)
302{
303 struct service_level *ptr, *next;
304 int rc = -ENOENT;
305
306 down_write(&service_level_sem);
307 list_for_each_entry_safe(ptr, next, &service_level_list, list) {
308 if (ptr != slr)
309 continue;
310 list_del(&ptr->list);
311 rc = 0;
312 break;
313 }
314 up_write(&service_level_sem);
315 return rc;
316}
317EXPORT_SYMBOL(unregister_service_level);
318
319static void *service_level_start(struct seq_file *m, loff_t *pos)
320{
321 down_read(&service_level_sem);
322 return seq_list_start(&service_level_list, *pos);
323}
324
325static void *service_level_next(struct seq_file *m, void *p, loff_t *pos)
326{
327 return seq_list_next(p, &service_level_list, pos);
328}
329
330static void service_level_stop(struct seq_file *m, void *p)
331{
332 up_read(&service_level_sem);
333}
334
335static int service_level_show(struct seq_file *m, void *p)
336{
337 struct service_level *slr;
338
339 slr = list_entry(p, struct service_level, list);
340 slr->seq_print(m, slr);
341 return 0;
342}
343
344static const struct seq_operations service_level_seq_ops = {
345 .start = service_level_start,
346 .next = service_level_next,
347 .stop = service_level_stop,
348 .show = service_level_show
349};
350
351static int service_level_open(struct inode *inode, struct file *file)
352{
353 return seq_open(file, &service_level_seq_ops);
354}
355
356static const struct file_operations service_level_ops = {
357 .open = service_level_open,
358 .read = seq_read,
359 .llseek = seq_lseek,
360 .release = seq_release
361};
362
363static void service_level_vm_print(struct seq_file *m,
364 struct service_level *slr)
365{
366 char *query_buffer, *str;
367
368 query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA);
369 if (!query_buffer)
370 return;
371 cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL);
372 str = strchr(query_buffer, '\n');
373 if (str)
374 *str = 0;
375 seq_printf(m, "VM: %s\n", query_buffer);
376 kfree(query_buffer);
377}
378
379static struct service_level service_level_vm = {
380 .seq_print = service_level_vm_print
381};
382
383static __init int create_proc_service_level(void)
384{
385 proc_create("service_levels", 0, NULL, &service_level_ops);
386 if (MACHINE_IS_VM)
387 register_service_level(&service_level_vm);
388 return 0;
389}
390
391subsys_initcall(create_proc_service_level);
392
393/*
394 * Bogomips calculation based on cpu capability.
395 */
396
397int get_cpu_capability(unsigned int *capability)
398{
399 struct sysinfo_1_2_2 *info;
400 int rc;
401
402 info = (void *) get_zeroed_page(GFP_KERNEL);
403 if (!info)
404 return -ENOMEM;
405 rc = stsi(info, 1, 2, 2);
406 if (rc == -ENOSYS)
407 goto out;
408 rc = 0;
409 *capability = info->capability;
410out:
411 free_page((unsigned long) info);
412 return rc;
413}
414
415/*
416 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
417 */
418void s390_adjust_jiffies(void)
419{
420 struct sysinfo_1_2_2 *info;
421 const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
422 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
423 FP_DECL_EX;
424 unsigned int capability;
425
426 info = (void *) get_zeroed_page(GFP_KERNEL);
427 if (!info)
428 return;
429
430 if (stsi(info, 1, 2, 2) != -ENOSYS) {
431 /*
432 * Major sigh. The cpu capability encoding is "special".
433 * If the first 9 bits of info->capability are 0 then it
434 * is a 32 bit unsigned integer in the range 0 .. 2^23.
435 * If the first 9 bits are != 0 then it is a 32 bit float.
436 * In addition a lower value indicates a proportionally
437 * higher cpu capacity. Bogomips are the other way round.
438 * To get to a halfway suitable number we divide 1e7
439 * by the cpu capability number. Yes, that means a floating
440 * point division .. math-emu here we come :-)
441 */
442 FP_UNPACK_SP(SA, &fmil);
443 if ((info->capability >> 23) == 0)
444 FP_FROM_INT_S(SB, info->capability, 32, int);
445 else
446 FP_UNPACK_SP(SB, &info->capability);
447 FP_DIV_S(SR, SA, SB);
448 FP_TO_INT_S(capability, SR, 32, 0);
449 } else
450 /*
451 * Really old machine without stsi block for basic
452 * cpu information. Report 42.0 bogomips.
453 */
454 capability = 42;
455 loops_per_jiffy = capability * (500000/HZ);
456 free_page((unsigned long) info);
457}
458
459/*
460 * calibrate the delay loop
461 */
462void __cpuinit calibrate_delay(void)
463{
464 s390_adjust_jiffies();
465 /* Print the good old Bogomips line .. */
466 printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
467 "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
468 (loops_per_jiffy/(5000/HZ)) % 100);
469}