aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_devmap.c58
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/con3215.c7
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/char/sclp.h72
-rw-r--r--drivers/s390/char/sclp_chp.c196
-rw-r--r--drivers/s390/char/sclp_config.c75
-rw-r--r--drivers/s390/char/sclp_cpi.c4
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/sclp_rw.c16
-rw-r--r--drivers/s390/char/sclp_sdias.c255
-rw-r--r--drivers/s390/char/sclp_tty.c6
-rw-r--r--drivers/s390/char/sclp_vt220.c8
-rw-r--r--drivers/s390/char/vmlogrdr.c9
-rw-r--r--drivers/s390/char/zcore.c651
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwgroup.c33
-rw-r--r--drivers/s390/cio/chp.c683
-rw-r--r--drivers/s390/cio/chp.h53
-rw-r--r--drivers/s390/cio/chsc.c1024
-rw-r--r--drivers/s390/cio/chsc.h42
-rw-r--r--drivers/s390/cio/cio.c52
-rw-r--r--drivers/s390/cio/cio.h17
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c201
-rw-r--r--drivers/s390/cio/css.h16
-rw-r--r--drivers/s390/cio/device.c246
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c7
-rw-r--r--drivers/s390/cio/idset.c112
-rw-r--r--drivers/s390/cio/idset.h25
-rw-r--r--drivers/s390/cio/ioasm.h5
-rw-r--r--drivers/s390/net/ctcmain.c23
-rw-r--r--drivers/s390/s390mach.c25
-rw-r--r--drivers/s390/sysinfo.c18
37 files changed, 2781 insertions, 1197 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index eb5dc62f0d9c..e71929db8b06 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device)
398 398
399 if (device->state == device->target) 399 if (device->state == device->target)
400 wake_up(&dasd_init_waitq); 400 wake_up(&dasd_init_waitq);
401
402 /* let user-space know that the device status changed */
403 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
401} 404}
402 405
403/* 406/*
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index ed70852cc915..6a89cefe99bb 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/ipl.h>
22 23
23/* This is ugly... */ 24/* This is ugly... */
24#define PRINTK_HEADER "dasd_devmap:" 25#define PRINTK_HEADER "dasd_devmap:"
@@ -133,6 +134,8 @@ dasd_call_setup(char *str)
133__setup ("dasd=", dasd_call_setup); 134__setup ("dasd=", dasd_call_setup);
134#endif /* #ifndef MODULE */ 135#endif /* #ifndef MODULE */
135 136
137#define DASD_IPLDEV "ipldev"
138
136/* 139/*
137 * Read a device busid/devno from a string. 140 * Read a device busid/devno from a string.
138 */ 141 */
@@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 144{
142 int val, old_style; 145 int val, old_style;
143 146
147 /* Interpret ipldev busid */
148 if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
149 if (ipl_info.type != IPL_TYPE_CCW) {
150 MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw "
151 "device");
152 return -EINVAL;
153 }
154 *id0 = 0;
155 *id1 = ipl_info.data.ccw.dev_id.ssid;
156 *devno = ipl_info.data.ccw.dev_id.devno;
157 *str += strlen(DASD_IPLDEV);
158
159 return 0;
160 }
144 /* check for leading '0x' */ 161 /* check for leading '0x' */
145 old_style = 0; 162 old_style = 0;
146 if ((*str)[0] == '0' && (*str)[1] == 'x') { 163 if ((*str)[0] == '0' && (*str)[1] == 'x') {
@@ -829,6 +846,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
829static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); 846static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
830 847
831static ssize_t 848static ssize_t
849dasd_device_status_show(struct device *dev, struct device_attribute *attr,
850 char *buf)
851{
852 struct dasd_device *device;
853 ssize_t len;
854
855 device = dasd_device_from_cdev(to_ccwdev(dev));
856 if (!IS_ERR(device)) {
857 switch (device->state) {
858 case DASD_STATE_NEW:
859 len = snprintf(buf, PAGE_SIZE, "new\n");
860 break;
861 case DASD_STATE_KNOWN:
862 len = snprintf(buf, PAGE_SIZE, "detected\n");
863 break;
864 case DASD_STATE_BASIC:
865 len = snprintf(buf, PAGE_SIZE, "basic\n");
866 break;
867 case DASD_STATE_UNFMT:
868 len = snprintf(buf, PAGE_SIZE, "unformatted\n");
869 break;
870 case DASD_STATE_READY:
871 len = snprintf(buf, PAGE_SIZE, "ready\n");
872 break;
873 case DASD_STATE_ONLINE:
874 len = snprintf(buf, PAGE_SIZE, "online\n");
875 break;
876 default:
877 len = snprintf(buf, PAGE_SIZE, "no stat\n");
878 break;
879 }
880 dasd_put_device(device);
881 } else
882 len = snprintf(buf, PAGE_SIZE, "unknown\n");
883 return len;
884}
885
886static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
887
888static ssize_t
832dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) 889dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
833{ 890{
834 struct dasd_devmap *devmap; 891 struct dasd_devmap *devmap;
@@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
939static struct attribute * dasd_attrs[] = { 996static struct attribute * dasd_attrs[] = {
940 &dev_attr_readonly.attr, 997 &dev_attr_readonly.attr,
941 &dev_attr_discipline.attr, 998 &dev_attr_discipline.attr,
999 &dev_attr_status.attr,
942 &dev_attr_alias.attr, 1000 &dev_attr_alias.attr,
943 &dev_attr_vendor.attr, 1001 &dev_attr_vendor.attr,
944 &dev_attr_uid.attr, 1002 &dev_attr_uid.attr,
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 293e667b50f2..c210784bdf46 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o 6 sclp_info.o sclp_config.o sclp_chp.o
7 7
8obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o 29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
30obj-$(CONFIG_MONREADER) += monreader.o 30obj-$(CONFIG_MONREADER) += monreader.o
31obj-$(CONFIG_MONWRITER) += monwriter.o 31obj-$(CONFIG_MONWRITER) += monwriter.o
32
33zcore_mod-objs := sclp_sdias.o zcore.o
34obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9a328f14a641..6000bdee4082 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -813,12 +813,6 @@ con3215_unblank(void)
813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
814} 814}
815 815
816static int __init
817con3215_consetup(struct console *co, char *options)
818{
819 return 0;
820}
821
822/* 816/*
823 * The console structure for the 3215 console 817 * The console structure for the 3215 console
824 */ 818 */
@@ -827,7 +821,6 @@ static struct console con3215 = {
827 .write = con3215_write, 821 .write = con3215_write,
828 .device = con3215_device, 822 .device = con3215_device,
829 .unblank = con3215_unblank, 823 .unblank = con3215_unblank,
830 .setup = con3215_consetup,
831 .flags = CON_PRINTBUFFER, 824 .flags = CON_PRINTBUFFER,
832}; 825};
833 826
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 8e7f2d7633d6..fd3479119eb4 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -555,12 +555,6 @@ con3270_unblank(void)
555 spin_unlock_irqrestore(&cp->view.lock, flags); 555 spin_unlock_irqrestore(&cp->view.lock, flags);
556} 556}
557 557
558static int __init
559con3270_consetup(struct console *co, char *options)
560{
561 return 0;
562}
563
564/* 558/*
565 * The console structure for the 3270 console 559 * The console structure for the 3270 console
566 */ 560 */
@@ -569,7 +563,6 @@ static struct console con3270 = {
569 .write = con3270_write, 563 .write = con3270_write,
570 .device = con3270_device, 564 .device = con3270_device,
571 .unblank = con3270_unblank, 565 .unblank = con3270_unblank,
572 .setup = con3270_consetup,
573 .flags = CON_PRINTBUFFER, 566 .flags = CON_PRINTBUFFER,
574}; 567};
575 568
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f171de3b0b11..fa62e6944057 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -15,6 +15,7 @@
15#include <linux/timer.h> 15#include <linux/timer.h>
16#include <linux/reboot.h> 16#include <linux/reboot.h>
17#include <linux/jiffies.h> 17#include <linux/jiffies.h>
18#include <linux/init.h>
18#include <asm/types.h> 19#include <asm/types.h>
19#include <asm/s390_ext.h> 20#include <asm/s390_ext.h>
20 21
@@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
510} 511}
511 512
512static struct sclp_register sclp_state_change_event = { 513static struct sclp_register sclp_state_change_event = {
513 .receive_mask = EvTyp_StateChange_Mask, 514 .receive_mask = EVTYP_STATECHANGE_MASK,
514 .receiver_fn = sclp_state_change_cb 515 .receiver_fn = sclp_state_change_cb
515}; 516};
516 517
@@ -930,3 +931,10 @@ sclp_init(void)
930 sclp_init_mask(1); 931 sclp_init_mask(1);
931 return 0; 932 return 0;
932} 933}
934
935static __init int sclp_initcall(void)
936{
937 return sclp_init();
938}
939
940arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7d29ab45a6ed..87ac4a3ad49d 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -19,33 +19,37 @@
19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) 19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
20#define MAX_CONSOLE_PAGES 4 20#define MAX_CONSOLE_PAGES 4
21 21
22#define EvTyp_OpCmd 0x01 22#define EVTYP_OPCMD 0x01
23#define EvTyp_Msg 0x02 23#define EVTYP_MSG 0x02
24#define EvTyp_StateChange 0x08 24#define EVTYP_STATECHANGE 0x08
25#define EvTyp_PMsgCmd 0x09 25#define EVTYP_PMSGCMD 0x09
26#define EvTyp_CntlProgOpCmd 0x20 26#define EVTYP_CNTLPROGOPCMD 0x20
27#define EvTyp_CntlProgIdent 0x0B 27#define EVTYP_CNTLPROGIDENT 0x0B
28#define EvTyp_SigQuiesce 0x1D 28#define EVTYP_SIGQUIESCE 0x1D
29#define EvTyp_VT220Msg 0x1A 29#define EVTYP_VT220MSG 0x1A
30 30#define EVTYP_CONFMGMDATA 0x04
31#define EvTyp_OpCmd_Mask 0x80000000 31#define EVTYP_SDIAS 0x1C
32#define EvTyp_Msg_Mask 0x40000000 32
33#define EvTyp_StateChange_Mask 0x01000000 33#define EVTYP_OPCMD_MASK 0x80000000
34#define EvTyp_PMsgCmd_Mask 0x00800000 34#define EVTYP_MSG_MASK 0x40000000
35#define EvTyp_CtlProgOpCmd_Mask 0x00000001 35#define EVTYP_STATECHANGE_MASK 0x01000000
36#define EvTyp_CtlProgIdent_Mask 0x00200000 36#define EVTYP_PMSGCMD_MASK 0x00800000
37#define EvTyp_SigQuiesce_Mask 0x00000008 37#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
38#define EvTyp_VT220Msg_Mask 0x00000040 38#define EVTYP_CTLPROGIDENT_MASK 0x00200000
39 39#define EVTYP_SIGQUIESCE_MASK 0x00000008
40#define GnrlMsgFlgs_DOM 0x8000 40#define EVTYP_VT220MSG_MASK 0x00000040
41#define GnrlMsgFlgs_SndAlrm 0x4000 41#define EVTYP_CONFMGMDATA_MASK 0x10000000
42#define GnrlMsgFlgs_HoldMsg 0x2000 42#define EVTYP_SDIAS_MASK 0x00000010
43 43
44#define LnTpFlgs_CntlText 0x8000 44#define GNRLMSGFLGS_DOM 0x8000
45#define LnTpFlgs_LabelText 0x4000 45#define GNRLMSGFLGS_SNDALRM 0x4000
46#define LnTpFlgs_DataText 0x2000 46#define GNRLMSGFLGS_HOLDMSG 0x2000
47#define LnTpFlgs_EndText 0x1000 47
48#define LnTpFlgs_PromptText 0x0800 48#define LNTPFLGS_CNTLTEXT 0x8000
49#define LNTPFLGS_LABELTEXT 0x4000
50#define LNTPFLGS_DATATEXT 0x2000
51#define LNTPFLGS_ENDTEXT 0x1000
52#define LNTPFLGS_PROMPTTEXT 0x0800
49 53
50typedef unsigned int sclp_cmdw_t; 54typedef unsigned int sclp_cmdw_t;
51 55
@@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t;
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 60#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
57 61
58#define GDS_ID_MDSMU 0x1310 62#define GDS_ID_MDSMU 0x1310
59#define GDS_ID_MDSRouteInfo 0x1311 63#define GDS_ID_MDSROUTEINFO 0x1311
60#define GDS_ID_AgUnWrkCorr 0x1549 64#define GDS_ID_AGUNWRKCORR 0x1549
61#define GDS_ID_SNACondReport 0x1532 65#define GDS_ID_SNACONDREPORT 0x1532
62#define GDS_ID_CPMSU 0x1212 66#define GDS_ID_CPMSU 0x1212
63#define GDS_ID_RoutTargInstr 0x154D 67#define GDS_ID_ROUTTARGINSTR 0x154D
64#define GDS_ID_OpReq 0x8070 68#define GDS_ID_OPREQ 0x8070
65#define GDS_ID_TextCmd 0x1320 69#define GDS_ID_TEXTCMD 0x1320
66 70
67#define GDS_KEY_SelfDefTextMsg 0x31 71#define GDS_KEY_SELFDEFTEXTMSG 0x31
68 72
69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 73typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
70 74
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c
new file mode 100644
index 000000000000..a66b914519b5
--- /dev/null
+++ b/drivers/s390/char/sclp_chp.c
@@ -0,0 +1,196 @@
1/*
2 * drivers/s390/char/sclp_chp.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/gfp.h>
10#include <linux/errno.h>
11#include <linux/completion.h>
12#include <asm/sclp.h>
13#include <asm/chpid.h>
14
15#include "sclp.h"
16
17#define TAG "sclp_chp: "
18
19#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001
20#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001
21#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001
22
23static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid)
24{
25 return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8;
26}
27
28static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid)
29{
30 return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8;
31}
32
33static void chp_callback(struct sclp_req *req, void *data)
34{
35 struct completion *completion = data;
36
37 complete(completion);
38}
39
40struct chp_cfg_sccb {
41 struct sccb_header header;
42 u8 ccm;
43 u8 reserved[6];
44 u8 cssid;
45} __attribute__((packed));
46
47struct chp_cfg_data {
48 struct chp_cfg_sccb sccb;
49 struct sclp_req req;
50 struct completion completion;
51} __attribute__((packed));
52
53static int do_configure(sclp_cmdw_t cmd)
54{
55 struct chp_cfg_data *data;
56 int rc;
57
58 /* Prepare sccb. */
59 data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
60 if (!data)
61 return -ENOMEM;
62 data->sccb.header.length = sizeof(struct chp_cfg_sccb);
63 data->req.command = cmd;
64 data->req.sccb = &(data->sccb);
65 data->req.status = SCLP_REQ_FILLED;
66 data->req.callback = chp_callback;
67 data->req.callback_data = &(data->completion);
68 init_completion(&data->completion);
69
70 /* Perform sclp request. */
71 rc = sclp_add_request(&(data->req));
72 if (rc)
73 goto out;
74 wait_for_completion(&data->completion);
75
76 /* Check response .*/
77 if (data->req.status != SCLP_REQ_DONE) {
78 printk(KERN_WARNING TAG "configure channel-path request failed "
79 "(status=0x%02x)\n", data->req.status);
80 rc = -EIO;
81 goto out;
82 }
83 switch (data->sccb.header.response_code) {
84 case 0x0020:
85 case 0x0120:
86 case 0x0440:
87 case 0x0450:
88 break;
89 default:
90 printk(KERN_WARNING TAG "configure channel-path failed "
91 "(cmd=0x%08x, response=0x%04x)\n", cmd,
92 data->sccb.header.response_code);
93 rc = -EIO;
94 break;
95 }
96out:
97 free_page((unsigned long) data);
98
99 return rc;
100}
101
102/**
103 * sclp_chp_configure - perform configure channel-path sclp command
104 * @chpid: channel-path ID
105 *
106 * Perform configure channel-path command sclp command for specified chpid.
107 * Return 0 after command successfully finished, non-zero otherwise.
108 */
109int sclp_chp_configure(struct chp_id chpid)
110{
111 return do_configure(get_configure_cmdw(chpid));
112}
113
114/**
115 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
116 * @chpid: channel-path ID
117 *
118 * Perform deconfigure channel-path command sclp command for specified chpid
119 * and wait for completion. On success return 0. Return non-zero otherwise.
120 */
121int sclp_chp_deconfigure(struct chp_id chpid)
122{
123 return do_configure(get_deconfigure_cmdw(chpid));
124}
125
126struct chp_info_sccb {
127 struct sccb_header header;
128 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
129 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
130 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
131 u8 ccm;
132 u8 reserved[6];
133 u8 cssid;
134} __attribute__((packed));
135
136struct chp_info_data {
137 struct chp_info_sccb sccb;
138 struct sclp_req req;
139 struct completion completion;
140} __attribute__((packed));
141
142/**
143 * sclp_chp_read_info - perform read channel-path information sclp command
144 * @info: resulting channel-path information data
145 *
146 * Perform read channel-path information sclp command and wait for completion.
147 * On success, store channel-path information in @info and return 0. Return
148 * non-zero otherwise.
149 */
150int sclp_chp_read_info(struct sclp_chp_info *info)
151{
152 struct chp_info_data *data;
153 int rc;
154
155 /* Prepare sccb. */
156 data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
157 if (!data)
158 return -ENOMEM;
159 data->sccb.header.length = sizeof(struct chp_info_sccb);
160 data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION;
161 data->req.sccb = &(data->sccb);
162 data->req.status = SCLP_REQ_FILLED;
163 data->req.callback = chp_callback;
164 data->req.callback_data = &(data->completion);
165 init_completion(&data->completion);
166
167 /* Perform sclp request. */
168 rc = sclp_add_request(&(data->req));
169 if (rc)
170 goto out;
171 wait_for_completion(&data->completion);
172
173 /* Check response .*/
174 if (data->req.status != SCLP_REQ_DONE) {
175 printk(KERN_WARNING TAG "read channel-path info request failed "
176 "(status=0x%02x)\n", data->req.status);
177 rc = -EIO;
178 goto out;
179 }
180 if (data->sccb.header.response_code != 0x0010) {
181 printk(KERN_WARNING TAG "read channel-path info failed "
182 "(response=0x%04x)\n", data->sccb.header.response_code);
183 rc = -EIO;
184 goto out;
185 }
186 memcpy(info->recognized, data->sccb.recognized,
187 SCLP_CHP_INFO_MASK_SIZE);
188 memcpy(info->standby, data->sccb.standby,
189 SCLP_CHP_INFO_MASK_SIZE);
190 memcpy(info->configured, data->sccb.configured,
191 SCLP_CHP_INFO_MASK_SIZE);
192out:
193 free_page((unsigned long) data);
194
195 return rc;
196}
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
new file mode 100644
index 000000000000..5322e5e54a98
--- /dev/null
+++ b/drivers/s390/char/sclp_config.c
@@ -0,0 +1,75 @@
1/*
2 * drivers/s390/char/sclp_config.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/cpu.h>
11#include <linux/sysdev.h>
12#include <linux/workqueue.h>
13#include "sclp.h"
14
15#define TAG "sclp_config: "
16
17struct conf_mgm_data {
18 u8 reserved;
19 u8 ev_qualifier;
20} __attribute__((packed));
21
22#define EV_QUAL_CAP_CHANGE 3
23
24static struct work_struct sclp_cpu_capability_work;
25
26static void sclp_cpu_capability_notify(struct work_struct *work)
27{
28 int cpu;
29 struct sys_device *sysdev;
30
31 printk(KERN_WARNING TAG "cpu capability changed.\n");
32 lock_cpu_hotplug();
33 for_each_online_cpu(cpu) {
34 sysdev = get_cpu_sysdev(cpu);
35 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
36 }
37 unlock_cpu_hotplug();
38}
39
40static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
41{
42 struct conf_mgm_data *cdata;
43
44 cdata = (struct conf_mgm_data *)(evbuf + 1);
45 if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE)
46 schedule_work(&sclp_cpu_capability_work);
47}
48
49static struct sclp_register sclp_conf_register =
50{
51 .receive_mask = EVTYP_CONFMGMDATA_MASK,
52 .receiver_fn = sclp_conf_receiver_fn,
53};
54
55static int __init sclp_conf_init(void)
56{
57 int rc;
58
59 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
60
61 rc = sclp_register(&sclp_conf_register);
62 if (rc) {
63 printk(KERN_ERR TAG "failed to register (%d).\n", rc);
64 return rc;
65 }
66
67 if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) {
68 printk(KERN_WARNING TAG "no configuration management.\n");
69 sclp_unregister(&sclp_conf_register);
70 rc = -ENOSYS;
71 }
72 return rc;
73}
74
75__initcall(sclp_conf_init);
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 65aa2c85737f..29fe2a5ec2fe 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -46,7 +46,7 @@ struct cpi_sccb {
46/* Event type structure for write message and write priority message */ 46/* Event type structure for write message and write priority message */
47static struct sclp_register sclp_cpi_event = 47static struct sclp_register sclp_cpi_event =
48{ 48{
49 .send_mask = EvTyp_CtlProgIdent_Mask 49 .send_mask = EVTYP_CTLPROGIDENT_MASK
50}; 50};
51 51
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
@@ -201,7 +201,7 @@ cpi_module_init(void)
201 "console.\n"); 201 "console.\n");
202 return -EINVAL; 202 return -EINVAL;
203 } 203 }
204 if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { 204 if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
205 printk(KERN_WARNING "cpi: no control program identification " 205 printk(KERN_WARNING "cpi: no control program identification "
206 "support\n"); 206 "support\n");
207 sclp_unregister(&sclp_cpi_event); 207 sclp_unregister(&sclp_cpi_event);
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index baa8fe669ed2..45ff25e787cb 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf)
43} 43}
44 44
45static struct sclp_register sclp_quiesce_event = { 45static struct sclp_register sclp_quiesce_event = {
46 .receive_mask = EvTyp_SigQuiesce_Mask, 46 .receive_mask = EVTYP_SIGQUIESCE_MASK,
47 .receiver_fn = sclp_quiesce_handler 47 .receiver_fn = sclp_quiesce_handler
48}; 48};
49 49
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 2486783ea58e..bbd5b8b66f42 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -30,7 +30,7 @@
30 30
31/* Event type structure for write message and write priority message */ 31/* Event type structure for write message and write priority message */
32static struct sclp_register sclp_rw_event = { 32static struct sclp_register sclp_rw_event = {
33 .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask 33 .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK
34}; 34};
35 35
36/* 36/*
@@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
64 memset(sccb, 0, sizeof(struct write_sccb)); 64 memset(sccb, 0, sizeof(struct write_sccb));
65 sccb->header.length = sizeof(struct write_sccb); 65 sccb->header.length = sizeof(struct write_sccb);
66 sccb->msg_buf.header.length = sizeof(struct msg_buf); 66 sccb->msg_buf.header.length = sizeof(struct msg_buf);
67 sccb->msg_buf.header.type = EvTyp_Msg; 67 sccb->msg_buf.header.type = EVTYP_MSG;
68 sccb->msg_buf.mdb.header.length = sizeof(struct mdb); 68 sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
69 sccb->msg_buf.mdb.header.type = 1; 69 sccb->msg_buf.mdb.header.type = 1;
70 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ 70 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
@@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
114 memset(mto, 0, sizeof(struct mto)); 114 memset(mto, 0, sizeof(struct mto));
115 mto->length = sizeof(struct mto); 115 mto->length = sizeof(struct mto);
116 mto->type = 4; /* message text object */ 116 mto->type = 4; /* message text object */
117 mto->line_type_flags = LnTpFlgs_EndText; /* end text */ 117 mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
118 118
119 /* set pointer to first byte after struct mto. */ 119 /* set pointer to first byte after struct mto. */
120 buffer->current_line = (char *) (mto + 1); 120 buffer->current_line = (char *) (mto + 1);
@@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
215 case '\a': /* bell, one for several times */ 215 case '\a': /* bell, one for several times */
216 /* set SCLP sound alarm bit in General Object */ 216 /* set SCLP sound alarm bit in General Object */
217 buffer->sccb->msg_buf.mdb.go.general_msg_flags |= 217 buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
218 GnrlMsgFlgs_SndAlrm; 218 GNRLMSGFLGS_SNDALRM;
219 break; 219 break;
220 case '\t': /* horizontal tabulator */ 220 case '\t': /* horizontal tabulator */
221 /* check if new mto needs to be created */ 221 /* check if new mto needs to be created */
@@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
452 return -EIO; 452 return -EIO;
453 453
454 sccb = buffer->sccb; 454 sccb = buffer->sccb;
455 if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) 455 if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK)
456 /* Use normal write message */ 456 /* Use normal write message */
457 sccb->msg_buf.header.type = EvTyp_Msg; 457 sccb->msg_buf.header.type = EVTYP_MSG;
458 else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) 458 else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK)
459 /* Use write priority message */ 459 /* Use write priority message */
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
new file mode 100644
index 000000000000..52283daddaef
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.c
@@ -0,0 +1,255 @@
1/*
2 * Sclp "store data in absolut storage"
3 *
4 * Copyright IBM Corp. 2003,2007
5 * Author(s): Michael Holzheu
6 */
7
8#include <linux/sched.h>
9#include <asm/sclp.h>
10#include <asm/debug.h>
11#include <asm/ipl.h>
12#include "sclp.h"
13#include "sclp_rw.h"
14
15#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
16#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x )
17
18#define SDIAS_RETRIES 300
19#define SDIAS_SLEEP_TICKS 50
20
21#define EQ_STORE_DATA 0x0
22#define EQ_SIZE 0x1
23#define DI_FCP_DUMP 0x0
24#define ASA_SIZE_32 0x0
25#define ASA_SIZE_64 0x1
26#define EVSTATE_ALL_STORED 0x0
27#define EVSTATE_NO_DATA 0x3
28#define EVSTATE_PART_STORED 0x10
29
30static struct debug_info *sdias_dbf;
31
32static struct sclp_register sclp_sdias_register = {
33 .send_mask = EVTYP_SDIAS_MASK,
34};
35
36struct sdias_evbuf {
37 struct evbuf_header hdr;
38 u8 event_qual;
39 u8 data_id;
40 u64 reserved2;
41 u32 event_id;
42 u16 reserved3;
43 u8 asa_size;
44 u8 event_status;
45 u32 reserved4;
46 u32 blk_cnt;
47 u64 asa;
48 u32 reserved5;
49 u32 fbn;
50 u32 reserved6;
51 u32 lbn;
52 u16 reserved7;
53 u16 dbs;
54} __attribute__((packed));
55
56struct sdias_sccb {
57 struct sccb_header hdr;
58 struct sdias_evbuf evbuf;
59} __attribute__((packed));
60
61static struct sdias_sccb sccb __attribute__((aligned(4096)));
62
63static int sclp_req_done;
64static wait_queue_head_t sdias_wq;
65static DEFINE_MUTEX(sdias_mutex);
66
67static void sdias_callback(struct sclp_req *request, void *data)
68{
69 struct sdias_sccb *sccb;
70
71 sccb = (struct sdias_sccb *) request->sccb;
72 sclp_req_done = 1;
73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
74 TRACE("callback done\n");
75}
76
77static int sdias_sclp_send(struct sclp_req *req)
78{
79 int retries;
80 int rc;
81
82 for (retries = SDIAS_RETRIES; retries; retries--) {
83 sclp_req_done = 0;
84 TRACE("add request\n");
85 rc = sclp_add_request(req);
86 if (rc) {
87 /* not initiated, wait some time and retry */
88 set_current_state(TASK_INTERRUPTIBLE);
89 TRACE("add request failed: rc = %i\n",rc);
90 schedule_timeout(SDIAS_SLEEP_TICKS);
91 continue;
92 }
93 /* initiated, wait for completion of service call */
94 wait_event(sdias_wq, (sclp_req_done == 1));
95 if (req->status == SCLP_REQ_FAILED) {
96 TRACE("sclp request failed\n");
97 rc = -EIO;
98 continue;
99 }
100 TRACE("request done\n");
101 break;
102 }
103 return rc;
104}
105
106/*
107 * Get number of blocks (4K) available in the HSA
108 */
109int sclp_sdias_blk_count(void)
110{
111 struct sclp_req request;
112 int rc;
113
114 mutex_lock(&sdias_mutex);
115
116 memset(&sccb, 0, sizeof(sccb));
117 memset(&request, 0, sizeof(request));
118
119 sccb.hdr.length = sizeof(sccb);
120 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
121 sccb.evbuf.hdr.type = EVTYP_SDIAS;
122 sccb.evbuf.event_qual = EQ_SIZE;
123 sccb.evbuf.data_id = DI_FCP_DUMP;
124 sccb.evbuf.event_id = 4712;
125 sccb.evbuf.dbs = 1;
126
127 request.sccb = &sccb;
128 request.command = SCLP_CMDW_WRITE_EVENT_DATA;
129 request.status = SCLP_REQ_FILLED;
130 request.callback = sdias_callback;
131
132 rc = sdias_sclp_send(&request);
133 if (rc) {
134 ERROR_MSG("sclp_send failed for get_nr_blocks\n");
135 goto out;
136 }
137 if (sccb.hdr.response_code != 0x0020) {
138 TRACE("send failed: %x\n", sccb.hdr.response_code);
139 rc = -EIO;
140 goto out;
141 }
142
143 switch (sccb.evbuf.event_status) {
144 case 0:
145 rc = sccb.evbuf.blk_cnt;
146 break;
147 default:
148 ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status);
149 rc = -EIO;
150 goto out;
151 }
152 TRACE("%i blocks\n", rc);
153out:
154 mutex_unlock(&sdias_mutex);
155 return rc;
156}
157
158/*
159 * Copy from HSA to absolute storage (not reentrant):
160 *
161 * @dest : Address of buffer where data should be copied
162 * @start_blk: Start Block (beginning with 1)
163 * @nr_blks : Number of 4K blocks to copy
164 *
165 * Return Value: 0 : Requested 'number' of blocks of data copied
166 * <0: ERROR - negative event status
167 */
168int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
169{
170 struct sclp_req request;
171 int rc;
172
173 mutex_lock(&sdias_mutex);
174
175 memset(&sccb, 0, sizeof(sccb));
176 memset(&request, 0, sizeof(request));
177
178 sccb.hdr.length = sizeof(sccb);
179 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
180 sccb.evbuf.hdr.type = EVTYP_SDIAS;
181 sccb.evbuf.hdr.flags = 0;
182 sccb.evbuf.event_qual = EQ_STORE_DATA;
183 sccb.evbuf.data_id = DI_FCP_DUMP;
184 sccb.evbuf.event_id = 4712;
185#ifdef __s390x__
186 sccb.evbuf.asa_size = ASA_SIZE_64;
187#else
188 sccb.evbuf.asa_size = ASA_SIZE_32;
189#endif
190 sccb.evbuf.event_status = 0;
191 sccb.evbuf.blk_cnt = nr_blks;
192 sccb.evbuf.asa = (unsigned long)dest;
193 sccb.evbuf.fbn = start_blk;
194 sccb.evbuf.lbn = 0;
195 sccb.evbuf.dbs = 1;
196
197 request.sccb = &sccb;
198 request.command = SCLP_CMDW_WRITE_EVENT_DATA;
199 request.status = SCLP_REQ_FILLED;
200 request.callback = sdias_callback;
201
202 rc = sdias_sclp_send(&request);
203 if (rc) {
204 ERROR_MSG("sclp_send failed: %x\n", rc);
205 goto out;
206 }
207 if (sccb.hdr.response_code != 0x0020) {
208 TRACE("copy failed: %x\n", sccb.hdr.response_code);
209 rc = -EIO;
210 goto out;
211 }
212
213 switch (sccb.evbuf.event_status) {
214 case EVSTATE_ALL_STORED:
215 TRACE("all stored\n");
216 case EVSTATE_PART_STORED:
217 TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
218 break;
219 case EVSTATE_NO_DATA:
220 TRACE("no data\n");
221 default:
222 ERROR_MSG("Error from SCLP while copying hsa. "
223 "Event status = %x\n",
224 sccb.evbuf.event_status);
225 rc = -EIO;
226 }
227out:
228 mutex_unlock(&sdias_mutex);
229 return rc;
230}
231
232int __init sdias_init(void)
233{
234 int rc;
235
236 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
237 return 0;
238 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
239 debug_register_view(sdias_dbf, &debug_sprintf_view);
240 debug_set_level(sdias_dbf, 6);
241 rc = sclp_register(&sclp_sdias_register);
242 if (rc) {
243 ERROR_MSG("sclp register failed\n");
244 return rc;
245 }
246 init_waitqueue_head(&sdias_wq);
247 TRACE("init done\n");
248 return 0;
249}
250
251void __exit sdias_exit(void)
252{
253 debug_unregister(sdias_dbf);
254 sclp_unregister(&sclp_sdias_register);
255}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 076816b9d528..e3b3d390b4a3 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start,
648 subvec = start; 648 subvec = start;
649 while (subvec < end) { 649 while (subvec < end) {
650 subvec = find_gds_subvector(subvec, end, 650 subvec = find_gds_subvector(subvec, end,
651 GDS_KEY_SelfDefTextMsg); 651 GDS_KEY_SELFDEFTEXTMSG);
652 if (!subvec) 652 if (!subvec)
653 break; 653 break;
654 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), 654 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
@@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
664 664
665 vec = start; 665 vec = start;
666 while (vec < end) { 666 while (vec < end) {
667 vec = find_gds_vector(vec, end, GDS_ID_TextCmd); 667 vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD);
668 if (!vec) 668 if (!vec)
669 break; 669 break;
670 sclp_eval_textcmd((struct gds_subvector *)(vec + 1), 670 sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
@@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg)
703 703
704static struct sclp_register sclp_input_event = 704static struct sclp_register sclp_input_event =
705{ 705{
706 .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, 706 .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
707 .state_change_fn = sclp_tty_state_change, 707 .state_change_fn = sclp_tty_state_change,
708 .receiver_fn = sclp_tty_receiver 708 .receiver_fn = sclp_tty_receiver
709}; 709};
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f77dc33b5f8d..726334757bbf 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void);
99 99
100/* Registration structure for our interest in SCLP event buffers */ 100/* Registration structure for our interest in SCLP event buffers */
101static struct sclp_register sclp_vt220_register = { 101static struct sclp_register sclp_vt220_register = {
102 .send_mask = EvTyp_VT220Msg_Mask, 102 .send_mask = EVTYP_VT220MSG_MASK,
103 .receive_mask = EvTyp_VT220Msg_Mask, 103 .receive_mask = EVTYP_VT220MSG_MASK,
104 .state_change_fn = NULL, 104 .state_change_fn = NULL,
105 .receiver_fn = sclp_vt220_receiver_fn 105 .receiver_fn = sclp_vt220_receiver_fn
106}; 106};
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
202static int 202static int
203__sclp_vt220_emit(struct sclp_vt220_request *request) 203__sclp_vt220_emit(struct sclp_vt220_request *request)
204{ 204{
205 if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) { 205 if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) {
206 request->sclp_req.status = SCLP_REQ_FAILED; 206 request->sclp_req.status = SCLP_REQ_FAILED;
207 return -EIO; 207 return -EIO;
208 } 208 }
@@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page)
284 sccb->header.length = sizeof(struct sclp_vt220_sccb); 284 sccb->header.length = sizeof(struct sclp_vt220_sccb);
285 sccb->header.function_code = SCLP_NORMAL_WRITE; 285 sccb->header.function_code = SCLP_NORMAL_WRITE;
286 sccb->header.response_code = 0x0000; 286 sccb->header.response_code = 0x0000;
287 sccb->evbuf.type = EvTyp_VT220Msg; 287 sccb->evbuf.type = EVTYP_VT220MSG;
288 sccb->evbuf.length = sizeof(struct evbuf_header); 288 sccb->evbuf.length = sizeof(struct evbuf_header);
289 289
290 return request; 290 return request;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b87d3b019936..a5a00e9ae4d0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
125 .recording_name = "EREP", 125 .recording_name = "EREP",
126 .minor_num = 0, 126 .minor_num = 0,
127 .buffer_free = 1, 127 .buffer_free = 1,
128 .priv_lock = SPIN_LOCK_UNLOCKED, 128 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
129 .autorecording = 1, 129 .autorecording = 1,
130 .autopurge = 1, 130 .autopurge = 1,
131 }, 131 },
@@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
134 .recording_name = "ACCOUNT", 134 .recording_name = "ACCOUNT",
135 .minor_num = 1, 135 .minor_num = 1,
136 .buffer_free = 1, 136 .buffer_free = 1,
137 .priv_lock = SPIN_LOCK_UNLOCKED, 137 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
138 .autorecording = 1, 138 .autorecording = 1,
139 .autopurge = 1, 139 .autopurge = 1,
140 }, 140 },
@@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
143 .recording_name = "SYMPTOM", 143 .recording_name = "SYMPTOM",
144 .minor_num = 2, 144 .minor_num = 2,
145 .buffer_free = 1, 145 .buffer_free = 1,
146 .priv_lock = SPIN_LOCK_UNLOCKED, 146 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
147 .autorecording = 1, 147 .autorecording = 1,
148 .autopurge = 1, 148 .autopurge = 1,
149 } 149 }
@@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
385 385
386 struct vmlogrdr_priv_t * logptr = filp->private_data; 386 struct vmlogrdr_priv_t * logptr = filp->private_data;
387 387
388 iucv_path_sever(logptr->path, NULL);
389 kfree(logptr->path);
390 logptr->path = NULL;
388 if (logptr->autorecording) { 391 if (logptr->autorecording) {
389 ret = vmlogrdr_recording(logptr,0,logptr->autopurge); 392 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
390 if (ret) 393 if (ret)
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
new file mode 100644
index 000000000000..89d439316a53
--- /dev/null
+++ b/drivers/s390/char/zcore.c
@@ -0,0 +1,651 @@
1/*
2 * zcore module to export memory content and register sets for creating system
3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
4 * dump format as s390 standalone dumps.
5 *
6 * For more information please refer to Documentation/s390/zfcpdump.txt
7 *
8 * Copyright IBM Corp. 2003,2007
9 * Author(s): Michael Holzheu
10 */
11
12#include <linux/init.h>
13#include <linux/miscdevice.h>
14#include <linux/utsname.h>
15#include <linux/debugfs.h>
16#include <asm/ipl.h>
17#include <asm/sclp.h>
18#include <asm/setup.h>
19#include <asm/sigp.h>
20#include <asm/uaccess.h>
21#include <asm/debug.h>
22#include <asm/processor.h>
23#include <asm/irqflags.h>
24
25#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
26#define MSG(x...) printk( KERN_ALERT x )
27#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x )
28
29#define TO_USER 0
30#define TO_KERNEL 1
31
32enum arch_id {
33 ARCH_S390 = 0,
34 ARCH_S390X = 1,
35};
36
37/* dump system info */
38
39struct sys_info {
40 enum arch_id arch;
41 unsigned long sa_base;
42 u32 sa_size;
43 int cpu_map[NR_CPUS];
44 unsigned long mem_size;
45 union save_area lc_mask;
46};
47
48static struct sys_info sys_info;
49static struct debug_info *zcore_dbf;
50static int hsa_available;
51static struct dentry *zcore_dir;
52static struct dentry *zcore_file;
53
54/*
55 * Copy memory from HSA to kernel or user memory (not reentrant):
56 *
57 * @dest: Kernel or user buffer where memory should be copied to
58 * @src: Start address within HSA where data should be copied
59 * @count: Size of buffer, which should be copied
60 * @mode: Either TO_KERNEL or TO_USER
61 */
62static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
63{
64 int offs, blk_num;
65 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
66
67 if (count == 0)
68 return 0;
69
70 /* copy first block */
71 offs = 0;
72 if ((src % PAGE_SIZE) != 0) {
73 blk_num = src / PAGE_SIZE + 2;
74 if (sclp_sdias_copy(buf, blk_num, 1)) {
75 TRACE("sclp_sdias_copy() failed\n");
76 return -EIO;
77 }
78 offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
79 if (mode == TO_USER) {
80 if (copy_to_user((__force __user void*) dest,
81 buf + (src % PAGE_SIZE), offs))
82 return -EFAULT;
83 } else
84 memcpy(dest, buf + (src % PAGE_SIZE), offs);
85 }
86 if (offs == count)
87 goto out;
88
89 /* copy middle */
90 for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
91 blk_num = (src + offs) / PAGE_SIZE + 2;
92 if (sclp_sdias_copy(buf, blk_num, 1)) {
93 TRACE("sclp_sdias_copy() failed\n");
94 return -EIO;
95 }
96 if (mode == TO_USER) {
97 if (copy_to_user((__force __user void*) dest + offs,
98 buf, PAGE_SIZE))
99 return -EFAULT;
100 } else
101 memcpy(dest + offs, buf, PAGE_SIZE);
102 }
103 if (offs == count)
104 goto out;
105
106 /* copy last block */
107 blk_num = (src + offs) / PAGE_SIZE + 2;
108 if (sclp_sdias_copy(buf, blk_num, 1)) {
109 TRACE("sclp_sdias_copy() failed\n");
110 return -EIO;
111 }
112 if (mode == TO_USER) {
113 if (copy_to_user((__force __user void*) dest + offs, buf,
114 PAGE_SIZE))
115 return -EFAULT;
116 } else
117 memcpy(dest + offs, buf, count - offs);
118out:
119 return 0;
120}
121
122static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
123{
124 return memcpy_hsa((void __force *) dest, src, count, TO_USER);
125}
126
127static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
128{
129 return memcpy_hsa(dest, src, count, TO_KERNEL);
130}
131
132static int memcpy_real(void *dest, unsigned long src, size_t count)
133{
134 unsigned long flags;
135 int rc = -EFAULT;
136 register unsigned long _dest asm("2") = (unsigned long) dest;
137 register unsigned long _len1 asm("3") = (unsigned long) count;
138 register unsigned long _src asm("4") = src;
139 register unsigned long _len2 asm("5") = (unsigned long) count;
140
141 if (count == 0)
142 return 0;
143 flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */
144 asm volatile (
145 "0: mvcle %1,%2,0x0\n"
146 "1: jo 0b\n"
147 " lhi %0,0x0\n"
148 "2:\n"
149 EX_TABLE(1b,2b)
150 : "+d" (rc)
151 : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2)
152 : "cc", "memory");
153 __raw_local_irq_ssm(flags);
154
155 return rc;
156}
157
158static int memcpy_real_user(__user void *dest, unsigned long src, size_t count)
159{
160 static char buf[4096];
161 int offs = 0, size;
162
163 while (offs < count) {
164 size = min(sizeof(buf), count - offs);
165 if (memcpy_real(buf, src + offs, size))
166 return -EFAULT;
167 if (copy_to_user(dest + offs, buf, size))
168 return -EFAULT;
169 offs += size;
170 }
171 return 0;
172}
173
174#ifdef __s390x__
175/*
176 * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
177 */
178static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
179 int cpu)
180{
181 int i;
182
183 for (i = 0; i < 16; i++) {
184 out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
185 out->s390.acc_regs[i] = in->s390x.acc_regs[i];
186 out->s390.ctrl_regs[i] =
187 in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
188 }
189 /* locore for 31 bit has only space for fpregs 0,2,4,6 */
190 out->s390.fp_regs[0] = in->s390x.fp_regs[0];
191 out->s390.fp_regs[1] = in->s390x.fp_regs[2];
192 out->s390.fp_regs[2] = in->s390x.fp_regs[4];
193 out->s390.fp_regs[3] = in->s390x.fp_regs[6];
194 memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
195 out->s390.psw[1] |= 0x8; /* set bit 12 */
196 memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
197 out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
198 out->s390.pref_reg = in->s390x.pref_reg;
199 out->s390.timer = in->s390x.timer;
200 out->s390.clk_cmp = in->s390x.clk_cmp;
201}
202
203static void __init s390x_to_s390_save_areas(void)
204{
205 int i = 1;
206 static union save_area tmp;
207
208 while (zfcpdump_save_areas[i]) {
209 s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
210 memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
211 i++;
212 }
213}
214
215#endif /* __s390x__ */
216
217static int __init init_cpu_info(enum arch_id arch)
218{
219 union save_area *sa;
220
221 /* get info for boot cpu from lowcore, stored in the HSA */
222
223 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
224 if (!sa) {
225 ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__);
226 return -ENOMEM;
227 }
228 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
229 ERROR_MSG("could not copy from HSA\n");
230 kfree(sa);
231 return -EIO;
232 }
233 zfcpdump_save_areas[0] = sa;
234
235#ifdef __s390x__
236 /* convert s390x regs to s390, if we are dumping an s390 Linux */
237
238 if (arch == ARCH_S390)
239 s390x_to_s390_save_areas();
240#endif
241
242 return 0;
243}
244
245static DEFINE_MUTEX(zcore_mutex);
246
247#define DUMP_VERSION 0x3
248#define DUMP_MAGIC 0xa8190173618f23fdULL
249#define DUMP_ARCH_S390X 2
250#define DUMP_ARCH_S390 1
251#define HEADER_SIZE 4096
252
253/* dump header dumped according to s390 crash dump format */
254
255struct zcore_header {
256 u64 magic;
257 u32 version;
258 u32 header_size;
259 u32 dump_level;
260 u32 page_size;
261 u64 mem_size;
262 u64 mem_start;
263 u64 mem_end;
264 u32 num_pages;
265 u32 pad1;
266 u64 tod;
267 cpuid_t cpu_id;
268 u32 arch_id;
269 u32 build_arch;
270 char pad2[4016];
271} __attribute__((packed,__aligned__(16)));
272
273static struct zcore_header zcore_header = {
274 .magic = DUMP_MAGIC,
275 .version = DUMP_VERSION,
276 .header_size = 4096,
277 .dump_level = 0,
278 .page_size = PAGE_SIZE,
279 .mem_start = 0,
280#ifdef __s390x__
281 .build_arch = DUMP_ARCH_S390X,
282#else
283 .build_arch = DUMP_ARCH_S390,
284#endif
285};
286
287/*
288 * Copy lowcore info to buffer. Use map in order to copy only register parts.
289 *
290 * @buf: User buffer
291 * @sa: Pointer to save area
292 * @sa_off: Offset in save area to copy
293 * @len: Number of bytes to copy
294 */
295static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
296{
297 int i;
298 char *lc_mask = (char*)&sys_info.lc_mask;
299
300 for (i = 0; i < len; i++) {
301 if (!lc_mask[i + sa_off])
302 continue;
303 if (copy_to_user(buf + i, sa + sa_off + i, 1))
304 return -EFAULT;
305 }
306 return 0;
307}
308
309/*
310 * Copy lowcores info to memory, if necessary
311 *
312 * @buf: User buffer
313 * @addr: Start address of buffer in dump memory
314 * @count: Size of buffer
315 */
316static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
317{
318 unsigned long end;
319 int i = 0;
320
321 if (count == 0)
322 return 0;
323
324 end = start + count;
325 while (zfcpdump_save_areas[i]) {
326 unsigned long cp_start, cp_end; /* copy range */
327 unsigned long sa_start, sa_end; /* save area range */
328 unsigned long prefix;
329 unsigned long sa_off, len, buf_off;
330
331 if (sys_info.arch == ARCH_S390)
332 prefix = zfcpdump_save_areas[i]->s390.pref_reg;
333 else
334 prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
335
336 sa_start = prefix + sys_info.sa_base;
337 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
338
339 if ((end < sa_start) || (start > sa_end))
340 goto next;
341 cp_start = max(start, sa_start);
342 cp_end = min(end, sa_end);
343
344 buf_off = cp_start - start;
345 sa_off = cp_start - sa_start;
346 len = cp_end - cp_start;
347
348 TRACE("copy_lc for: %lx\n", start);
349 if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
350 return -EFAULT;
351next:
352 i++;
353 }
354 return 0;
355}
356
357/*
358 * Read routine for zcore character device
359 * First 4K are dump header
360 * Next 32MB are HSA Memory
361 * Rest is read from absolute Memory
362 */
363static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
364 loff_t *ppos)
365{
366 unsigned long mem_start; /* Start address in memory */
367 size_t mem_offs; /* Offset in dump memory */
368 size_t hdr_count; /* Size of header part of output buffer */
369 size_t size;
370 int rc;
371
372 mutex_lock(&zcore_mutex);
373
374 if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
375 rc = -EINVAL;
376 goto fail;
377 }
378
379 count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
380
381 /* Copy dump header */
382 if (*ppos < HEADER_SIZE) {
383 size = min(count, (size_t) (HEADER_SIZE - *ppos));
384 if (copy_to_user(buf, &zcore_header + *ppos, size)) {
385 rc = -EFAULT;
386 goto fail;
387 }
388 hdr_count = size;
389 mem_start = 0;
390 } else {
391 hdr_count = 0;
392 mem_start = *ppos - HEADER_SIZE;
393 }
394
395 mem_offs = 0;
396
397 /* Copy from HSA data */
398 if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
399 size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
400 - mem_start));
401 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
402 if (rc)
403 goto fail;
404
405 mem_offs += size;
406 }
407
408 /* Copy from real mem */
409 size = count - mem_offs - hdr_count;
410 rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
411 size);
412 if (rc)
413 goto fail;
414
415 /*
416 * Since s390 dump analysis tools like lcrash or crash
417 * expect register sets in the prefix pages of the cpus,
418 * we copy them into the read buffer, if necessary.
419 * buf + hdr_count: Start of memory part of output buffer
420 * mem_start: Start memory address to copy from
421 * count - hdr_count: Size of memory area to copy
422 */
423 if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
424 rc = -EFAULT;
425 goto fail;
426 }
427 *ppos += count;
428fail:
429 mutex_unlock(&zcore_mutex);
430 return (rc < 0) ? rc : count;
431}
432
433static int zcore_open(struct inode *inode, struct file *filp)
434{
435 if (!hsa_available)
436 return -ENODATA;
437 else
438 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
439}
440
441static int zcore_release(struct inode *inode, struct file *filep)
442{
443 diag308(DIAG308_REL_HSA, NULL);
444 hsa_available = 0;
445 return 0;
446}
447
448static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
449{
450 loff_t rc;
451
452 mutex_lock(&zcore_mutex);
453 switch (orig) {
454 case 0:
455 file->f_pos = offset;
456 rc = file->f_pos;
457 break;
458 case 1:
459 file->f_pos += offset;
460 rc = file->f_pos;
461 break;
462 default:
463 rc = -EINVAL;
464 }
465 mutex_unlock(&zcore_mutex);
466 return rc;
467}
468
469static struct file_operations zcore_fops = {
470 .owner = THIS_MODULE,
471 .llseek = zcore_lseek,
472 .read = zcore_read,
473 .open = zcore_open,
474 .release = zcore_release,
475};
476
477
478static void __init set_s390_lc_mask(union save_area *map)
479{
480 memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
481 memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
482 memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
483 memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
484 memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
485 memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
486 memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
487 memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
488 memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
489}
490
491static void __init set_s390x_lc_mask(union save_area *map)
492{
493 memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
494 memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
495 memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
496 memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
497 memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
498 memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
499 memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
500 memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
501 memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
502 memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
503}
504
505/*
506 * Initialize dump globals for a given architecture
507 */
508static int __init sys_info_init(enum arch_id arch)
509{
510 switch (arch) {
511 case ARCH_S390X:
512 MSG("DETECTED 'S390X (64 bit) OS'\n");
513 sys_info.sa_base = SAVE_AREA_BASE_S390X;
514 sys_info.sa_size = sizeof(struct save_area_s390x);
515 set_s390x_lc_mask(&sys_info.lc_mask);
516 break;
517 case ARCH_S390:
518 MSG("DETECTED 'S390 (32 bit) OS'\n");
519 sys_info.sa_base = SAVE_AREA_BASE_S390;
520 sys_info.sa_size = sizeof(struct save_area_s390);
521 set_s390_lc_mask(&sys_info.lc_mask);
522 break;
523 default:
524 ERROR_MSG("unknown architecture 0x%x.\n",arch);
525 return -EINVAL;
526 }
527 sys_info.arch = arch;
528 if (init_cpu_info(arch)) {
529 ERROR_MSG("get cpu info failed\n");
530 return -ENOMEM;
531 }
532 sys_info.mem_size = real_memory_size;
533
534 return 0;
535}
536
537static int __init check_sdias(void)
538{
539 int rc, act_hsa_size;
540
541 rc = sclp_sdias_blk_count();
542 if (rc < 0) {
543 ERROR_MSG("Could not determine HSA size\n");
544 return rc;
545 }
546 act_hsa_size = (rc - 1) * PAGE_SIZE;
547 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
548 ERROR_MSG("HSA size too small: %i\n", act_hsa_size);
549 return -EINVAL;
550 }
551 return 0;
552}
553
554static void __init zcore_header_init(int arch, struct zcore_header *hdr)
555{
556 if (arch == ARCH_S390X)
557 hdr->arch_id = DUMP_ARCH_S390X;
558 else
559 hdr->arch_id = DUMP_ARCH_S390;
560 hdr->mem_size = sys_info.mem_size;
561 hdr->mem_end = sys_info.mem_size;
562 hdr->num_pages = sys_info.mem_size / PAGE_SIZE;
563 hdr->tod = get_clock();
564 get_cpu_id(&hdr->cpu_id);
565}
566
567extern int sdias_init(void);
568
569static int __init zcore_init(void)
570{
571 unsigned char arch;
572 int rc;
573
574 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
575 return -ENODATA;
576
577 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
578 debug_register_view(zcore_dbf, &debug_sprintf_view);
579 debug_set_level(zcore_dbf, 6);
580
581 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
582 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
583 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
584
585 rc = sdias_init();
586 if (rc)
587 goto fail;
588
589 rc = check_sdias();
590 if (rc) {
591 ERROR_MSG("Dump initialization failed\n");
592 goto fail;
593 }
594
595 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
596 if (rc) {
597 ERROR_MSG("sdial memcpy for arch id failed\n");
598 goto fail;
599 }
600
601#ifndef __s390x__
602 if (arch == ARCH_S390X) {
603 ERROR_MSG("32 bit dumper can't dump 64 bit system!\n");
604 rc = -EINVAL;
605 goto fail;
606 }
607#endif
608
609 rc = sys_info_init(arch);
610 if (rc) {
611 ERROR_MSG("arch init failed\n");
612 goto fail;
613 }
614
615 zcore_header_init(arch, &zcore_header);
616
617 zcore_dir = debugfs_create_dir("zcore" , NULL);
618 if (!zcore_dir) {
619 rc = -ENOMEM;
620 goto fail;
621 }
622 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
623 &zcore_fops);
624 if (!zcore_file) {
625 debugfs_remove(zcore_dir);
626 rc = -ENOMEM;
627 goto fail;
628 }
629 hsa_available = 1;
630 return 0;
631
632fail:
633 diag308(DIAG308_REL_HSA, NULL);
634 return rc;
635}
636
637extern void sdias_exit(void);
638
639static void __exit zcore_exit(void)
640{
641 debug_unregister(zcore_dbf);
642 sdias_exit();
643 diag308(DIAG308_REL_HSA, NULL);
644}
645
646MODULE_AUTHOR("Copyright IBM Corp. 2003,2007");
647MODULE_DESCRIPTION("zcore module for zfcpdump support");
648MODULE_LICENSE("GPL");
649
650subsys_initcall(zcore_init);
651module_exit(zcore_exit);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index c490c2a1c2fc..cfaf77b320f5 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 6ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 7ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 8obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5aeb68e732b0..e5ccda63e883 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
75{ 75{
76 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 76 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
77 77
78 mutex_lock(&gdev->reg_mutex);
78 __ccwgroup_remove_symlinks(gdev); 79 __ccwgroup_remove_symlinks(gdev);
79 device_unregister(dev); 80 device_unregister(dev);
81 mutex_unlock(&gdev->reg_mutex);
80} 82}
81 83
82static ssize_t 84static ssize_t
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root,
173 return -ENOMEM; 175 return -ENOMEM;
174 176
175 atomic_set(&gdev->onoff, 0); 177 atomic_set(&gdev->onoff, 0);
176 178 mutex_init(&gdev->reg_mutex);
179 mutex_lock(&gdev->reg_mutex);
177 for (i = 0; i < argc; i++) { 180 for (i = 0; i < argc; i++) {
178 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 181 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
179 182
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root,
183 || gdev->cdev[i]->id.driver_info != 186 || gdev->cdev[i]->id.driver_info !=
184 gdev->cdev[0]->id.driver_info) { 187 gdev->cdev[0]->id.driver_info) {
185 rc = -EINVAL; 188 rc = -EINVAL;
186 goto free_dev; 189 goto error;
187 } 190 }
188 /* Don't allow a device to belong to more than one group. */ 191 /* Don't allow a device to belong to more than one group. */
189 if (gdev->cdev[i]->dev.driver_data) { 192 if (gdev->cdev[i]->dev.driver_data) {
190 rc = -EINVAL; 193 rc = -EINVAL;
191 goto free_dev; 194 goto error;
192 } 195 }
193 gdev->cdev[i]->dev.driver_data = gdev; 196 gdev->cdev[i]->dev.driver_data = gdev;
194 } 197 }
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root,
203 gdev->cdev[0]->dev.bus_id); 206 gdev->cdev[0]->dev.bus_id);
204 207
205 rc = device_register(&gdev->dev); 208 rc = device_register(&gdev->dev);
206
207 if (rc) 209 if (rc)
208 goto free_dev; 210 goto error;
209 get_device(&gdev->dev); 211 get_device(&gdev->dev);
210 rc = device_create_file(&gdev->dev, &dev_attr_ungroup); 212 rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
211 213
@@ -216,6 +218,7 @@ ccwgroup_create(struct device *root,
216 218
217 rc = __ccwgroup_create_symlinks(gdev); 219 rc = __ccwgroup_create_symlinks(gdev);
218 if (!rc) { 220 if (!rc) {
221 mutex_unlock(&gdev->reg_mutex);
219 put_device(&gdev->dev); 222 put_device(&gdev->dev);
220 return 0; 223 return 0;
221 } 224 }
@@ -224,19 +227,12 @@ ccwgroup_create(struct device *root,
224error: 227error:
225 for (i = 0; i < argc; i++) 228 for (i = 0; i < argc; i++)
226 if (gdev->cdev[i]) { 229 if (gdev->cdev[i]) {
227 put_device(&gdev->cdev[i]->dev);
228 gdev->cdev[i]->dev.driver_data = NULL;
229 }
230 put_device(&gdev->dev);
231 return rc;
232free_dev:
233 for (i = 0; i < argc; i++)
234 if (gdev->cdev[i]) {
235 if (gdev->cdev[i]->dev.driver_data == gdev) 230 if (gdev->cdev[i]->dev.driver_data == gdev)
236 gdev->cdev[i]->dev.driver_data = NULL; 231 gdev->cdev[i]->dev.driver_data = NULL;
237 put_device(&gdev->cdev[i]->dev); 232 put_device(&gdev->cdev[i]->dev);
238 } 233 }
239 kfree(gdev); 234 mutex_unlock(&gdev->reg_mutex);
235 put_device(&gdev->dev);
240 return rc; 236 return rc;
241} 237}
242 238
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
422 get_driver(&cdriver->driver); 418 get_driver(&cdriver->driver);
423 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, 419 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
424 __ccwgroup_match_all))) { 420 __ccwgroup_match_all))) {
425 __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); 421 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
422
423 mutex_lock(&gdev->reg_mutex);
424 __ccwgroup_remove_symlinks(gdev);
426 device_unregister(dev); 425 device_unregister(dev);
426 mutex_unlock(&gdev->reg_mutex);
427 put_device(dev); 427 put_device(dev);
428 } 428 }
429 put_driver(&cdriver->driver); 429 put_driver(&cdriver->driver);
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
444 if (cdev->dev.driver_data) { 444 if (cdev->dev.driver_data) {
445 gdev = (struct ccwgroup_device *)cdev->dev.driver_data; 445 gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
446 if (get_device(&gdev->dev)) { 446 if (get_device(&gdev->dev)) {
447 mutex_lock(&gdev->reg_mutex);
447 if (device_is_registered(&gdev->dev)) 448 if (device_is_registered(&gdev->dev))
448 return gdev; 449 return gdev;
450 mutex_unlock(&gdev->reg_mutex);
449 put_device(&gdev->dev); 451 put_device(&gdev->dev);
450 } 452 }
451 return NULL; 453 return NULL;
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev)
465 if (gdev) { 467 if (gdev) {
466 __ccwgroup_remove_symlinks(gdev); 468 __ccwgroup_remove_symlinks(gdev);
467 device_unregister(&gdev->dev); 469 device_unregister(&gdev->dev);
470 mutex_unlock(&gdev->reg_mutex);
468 put_device(&gdev->dev); 471 put_device(&gdev->dev);
469 } 472 }
470} 473}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
new file mode 100644
index 000000000000..ac289e6eadfe
--- /dev/null
+++ b/drivers/s390/cio/chp.c
@@ -0,0 +1,683 @@
1/*
2 * drivers/s390/cio/chp.c
3 *
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Arnd Bergmann (arndb@de.ibm.com)
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 */
9
10#include <linux/bug.h>
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/init.h>
14#include <linux/jiffies.h>
15#include <linux/wait.h>
16#include <linux/mutex.h>
17#include <asm/errno.h>
18#include <asm/chpid.h>
19#include <asm/sclp.h>
20
21#include "cio.h"
22#include "css.h"
23#include "ioasm.h"
24#include "cio_debug.h"
25#include "chp.h"
26
27#define to_channelpath(device) container_of(device, struct channel_path, dev)
28#define CHP_INFO_UPDATE_INTERVAL 1*HZ
29
30enum cfg_task_t {
31 cfg_none,
32 cfg_configure,
33 cfg_deconfigure
34};
35
36/* Map for pending configure tasks. */
37static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
38static DEFINE_MUTEX(cfg_lock);
39static int cfg_busy;
40
41/* Map for channel-path status. */
42static struct sclp_chp_info chp_info;
43static DEFINE_MUTEX(info_lock);
44
45/* Time after which channel-path status may be outdated. */
46static unsigned long chp_info_expires;
47
48/* Workqueue to perform pending configure tasks. */
49static struct workqueue_struct *chp_wq;
50static struct work_struct cfg_work;
51
52/* Wait queue for configure completion events. */
53static wait_queue_head_t cfg_wait_queue;
54
55/* Return channel_path struct for given chpid. */
56static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
57{
58 return css[chpid.cssid]->chps[chpid.id];
59}
60
61/* Set vary state for given chpid. */
62static void set_chp_logically_online(struct chp_id chpid, int onoff)
63{
64 chpid_to_chp(chpid)->state = onoff;
65}
66
67/* On succes return 0 if channel-path is varied offline, 1 if it is varied
68 * online. Return -ENODEV if channel-path is not registered. */
69int chp_get_status(struct chp_id chpid)
70{
71 return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
72}
73
74/**
75 * chp_get_sch_opm - return opm for subchannel
76 * @sch: subchannel
77 *
78 * Calculate and return the operational path mask (opm) based on the chpids
79 * used by the subchannel and the status of the associated channel-paths.
80 */
81u8 chp_get_sch_opm(struct subchannel *sch)
82{
83 struct chp_id chpid;
84 int opm;
85 int i;
86
87 opm = 0;
88 chp_id_init(&chpid);
89 for (i=0; i < 8; i++) {
90 opm <<= 1;
91 chpid.id = sch->schib.pmcw.chpid[i];
92 if (chp_get_status(chpid) != 0)
93 opm |= 1;
94 }
95 return opm;
96}
97
98/**
99 * chp_is_registered - check if a channel-path is registered
100 * @chpid: channel-path ID
101 *
102 * Return non-zero if a channel-path with the given chpid is registered,
103 * zero otherwise.
104 */
105int chp_is_registered(struct chp_id chpid)
106{
107 return chpid_to_chp(chpid) != NULL;
108}
109
110/*
111 * Function: s390_vary_chpid
112 * Varies the specified chpid online or offline
113 */
114static int s390_vary_chpid(struct chp_id chpid, int on)
115{
116 char dbf_text[15];
117 int status;
118
119 sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
120 chpid.id);
121 CIO_TRACE_EVENT( 2, dbf_text);
122
123 status = chp_get_status(chpid);
124 if (status < 0) {
125 printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
126 chpid.cssid, chpid.id);
127 return -EINVAL;
128 }
129
130 if (!on && !status) {
131 printk(KERN_ERR "chpid %x.%02x is already offline\n",
132 chpid.cssid, chpid.id);
133 return -EINVAL;
134 }
135
136 set_chp_logically_online(chpid, on);
137 chsc_chp_vary(chpid, on);
138 return 0;
139}
140
141/*
142 * Channel measurement related functions
143 */
144static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf,
145 loff_t off, size_t count)
146{
147 struct channel_path *chp;
148 unsigned int size;
149
150 chp = to_channelpath(container_of(kobj, struct device, kobj));
151 if (!chp->cmg_chars)
152 return 0;
153
154 size = sizeof(struct cmg_chars);
155
156 if (off > size)
157 return 0;
158 if (off + count > size)
159 count = size - off;
160 memcpy(buf, chp->cmg_chars + off, count);
161 return count;
162}
163
164static struct bin_attribute chp_measurement_chars_attr = {
165 .attr = {
166 .name = "measurement_chars",
167 .mode = S_IRUSR,
168 .owner = THIS_MODULE,
169 },
170 .size = sizeof(struct cmg_chars),
171 .read = chp_measurement_chars_read,
172};
173
174static void chp_measurement_copy_block(struct cmg_entry *buf,
175 struct channel_subsystem *css,
176 struct chp_id chpid)
177{
178 void *area;
179 struct cmg_entry *entry, reference_buf;
180 int idx;
181
182 if (chpid.id < 128) {
183 area = css->cub_addr1;
184 idx = chpid.id;
185 } else {
186 area = css->cub_addr2;
187 idx = chpid.id - 128;
188 }
189 entry = area + (idx * sizeof(struct cmg_entry));
190 do {
191 memcpy(buf, entry, sizeof(*entry));
192 memcpy(&reference_buf, entry, sizeof(*entry));
193 } while (reference_buf.values[0] != buf->values[0]);
194}
195
196static ssize_t chp_measurement_read(struct kobject *kobj, char *buf,
197 loff_t off, size_t count)
198{
199 struct channel_path *chp;
200 struct channel_subsystem *css;
201 unsigned int size;
202
203 chp = to_channelpath(container_of(kobj, struct device, kobj));
204 css = to_css(chp->dev.parent);
205
206 size = sizeof(struct cmg_entry);
207
208 /* Only allow single reads. */
209 if (off || count < size)
210 return 0;
211 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
212 count = size;
213 return count;
214}
215
216static struct bin_attribute chp_measurement_attr = {
217 .attr = {
218 .name = "measurement",
219 .mode = S_IRUSR,
220 .owner = THIS_MODULE,
221 },
222 .size = sizeof(struct cmg_entry),
223 .read = chp_measurement_read,
224};
225
226void chp_remove_cmg_attr(struct channel_path *chp)
227{
228 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
229 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
230}
231
232int chp_add_cmg_attr(struct channel_path *chp)
233{
234 int ret;
235
236 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
237 if (ret)
238 return ret;
239 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
240 if (ret)
241 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
242 return ret;
243}
244
245/*
246 * Files for the channel path entries.
247 */
248static ssize_t chp_status_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct channel_path *chp = container_of(dev, struct channel_path, dev);
252
253 if (!chp)
254 return 0;
255 return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
256 sprintf(buf, "offline\n"));
257}
258
259static ssize_t chp_status_write(struct device *dev,
260 struct device_attribute *attr,
261 const char *buf, size_t count)
262{
263 struct channel_path *cp = container_of(dev, struct channel_path, dev);
264 char cmd[10];
265 int num_args;
266 int error;
267
268 num_args = sscanf(buf, "%5s", cmd);
269 if (!num_args)
270 return count;
271
272 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
273 error = s390_vary_chpid(cp->chpid, 1);
274 else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
275 error = s390_vary_chpid(cp->chpid, 0);
276 else
277 error = -EINVAL;
278
279 return error < 0 ? error : count;
280
281}
282
283static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
284
285static ssize_t chp_configure_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 struct channel_path *cp;
289 int status;
290
291 cp = container_of(dev, struct channel_path, dev);
292 status = chp_info_get_status(cp->chpid);
293 if (status < 0)
294 return status;
295
296 return snprintf(buf, PAGE_SIZE, "%d\n", status);
297}
298
299static int cfg_wait_idle(void);
300
301static ssize_t chp_configure_write(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t count)
304{
305 struct channel_path *cp;
306 int val;
307 char delim;
308
309 if (sscanf(buf, "%d %c", &val, &delim) != 1)
310 return -EINVAL;
311 if (val != 0 && val != 1)
312 return -EINVAL;
313 cp = container_of(dev, struct channel_path, dev);
314 chp_cfg_schedule(cp->chpid, val);
315 cfg_wait_idle();
316
317 return count;
318}
319
320static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
321
322static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
323 char *buf)
324{
325 struct channel_path *chp = container_of(dev, struct channel_path, dev);
326
327 if (!chp)
328 return 0;
329 return sprintf(buf, "%x\n", chp->desc.desc);
330}
331
332static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
333
334static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
335 char *buf)
336{
337 struct channel_path *chp = to_channelpath(dev);
338
339 if (!chp)
340 return 0;
341 if (chp->cmg == -1) /* channel measurements not available */
342 return sprintf(buf, "unknown\n");
343 return sprintf(buf, "%x\n", chp->cmg);
344}
345
346static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
347
348static ssize_t chp_shared_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
350{
351 struct channel_path *chp = to_channelpath(dev);
352
353 if (!chp)
354 return 0;
355 if (chp->shared == -1) /* channel measurements not available */
356 return sprintf(buf, "unknown\n");
357 return sprintf(buf, "%x\n", chp->shared);
358}
359
360static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
361
362static struct attribute * chp_attrs[] = {
363 &dev_attr_status.attr,
364 &dev_attr_configure.attr,
365 &dev_attr_type.attr,
366 &dev_attr_cmg.attr,
367 &dev_attr_shared.attr,
368 NULL,
369};
370
371static struct attribute_group chp_attr_group = {
372 .attrs = chp_attrs,
373};
374
375static void chp_release(struct device *dev)
376{
377 struct channel_path *cp;
378
379 cp = container_of(dev, struct channel_path, dev);
380 kfree(cp);
381}
382
383/**
384 * chp_new - register a new channel-path
385 * @chpid - channel-path ID
386 *
387 * Create and register data structure representing new channel-path. Return
388 * zero on success, non-zero otherwise.
389 */
390int chp_new(struct chp_id chpid)
391{
392 struct channel_path *chp;
393 int ret;
394
395 if (chp_is_registered(chpid))
396 return 0;
397 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
398 if (!chp)
399 return -ENOMEM;
400
401 /* fill in status, etc. */
402 chp->chpid = chpid;
403 chp->state = 1;
404 chp->dev.parent = &css[chpid.cssid]->device;
405 chp->dev.release = chp_release;
406 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
407 chpid.id);
408
409 /* Obtain channel path description and fill it in. */
410 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
411 if (ret)
412 goto out_free;
413 if ((chp->desc.flags & 0x80) == 0) {
414 ret = -ENODEV;
415 goto out_free;
416 }
417 /* Get channel-measurement characteristics. */
418 if (css_characteristics_avail && css_chsc_characteristics.scmc
419 && css_chsc_characteristics.secm) {
420 ret = chsc_get_channel_measurement_chars(chp);
421 if (ret)
422 goto out_free;
423 } else {
424 static int msg_done;
425
426 if (!msg_done) {
427 printk(KERN_WARNING "cio: Channel measurements not "
428 "available, continuing.\n");
429 msg_done = 1;
430 }
431 chp->cmg = -1;
432 }
433
434 /* make it known to the system */
435 ret = device_register(&chp->dev);
436 if (ret) {
437 printk(KERN_WARNING "%s: could not register %x.%02x\n",
438 __func__, chpid.cssid, chpid.id);
439 goto out_free;
440 }
441 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
442 if (ret) {
443 device_unregister(&chp->dev);
444 goto out_free;
445 }
446 mutex_lock(&css[chpid.cssid]->mutex);
447 if (css[chpid.cssid]->cm_enabled) {
448 ret = chp_add_cmg_attr(chp);
449 if (ret) {
450 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
451 device_unregister(&chp->dev);
452 mutex_unlock(&css[chpid.cssid]->mutex);
453 goto out_free;
454 }
455 }
456 css[chpid.cssid]->chps[chpid.id] = chp;
457 mutex_unlock(&css[chpid.cssid]->mutex);
458 return ret;
459out_free:
460 kfree(chp);
461 return ret;
462}
463
464/**
465 * chp_get_chp_desc - return newly allocated channel-path description
466 * @chpid: channel-path ID
467 *
468 * On success return a newly allocated copy of the channel-path description
469 * data associated with the given channel-path ID. Return %NULL on error.
470 */
471void *chp_get_chp_desc(struct chp_id chpid)
472{
473 struct channel_path *chp;
474 struct channel_path_desc *desc;
475
476 chp = chpid_to_chp(chpid);
477 if (!chp)
478 return NULL;
479 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
480 if (!desc)
481 return NULL;
482 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
483 return desc;
484}
485
486/**
487 * chp_process_crw - process channel-path status change
488 * @id: channel-path ID number
489 * @status: non-zero if channel-path has become available, zero otherwise
490 *
491 * Handle channel-report-words indicating that the status of a channel-path
492 * has changed.
493 */
494void chp_process_crw(int id, int status)
495{
496 struct chp_id chpid;
497
498 chp_id_init(&chpid);
499 chpid.id = id;
500 if (status) {
501 if (!chp_is_registered(chpid))
502 chp_new(chpid);
503 chsc_chp_online(chpid);
504 } else
505 chsc_chp_offline(chpid);
506}
507
508static inline int info_bit_num(struct chp_id id)
509{
510 return id.id + id.cssid * (__MAX_CHPID + 1);
511}
512
513/* Force chp_info refresh on next call to info_validate(). */
514static void info_expire(void)
515{
516 mutex_lock(&info_lock);
517 chp_info_expires = jiffies - 1;
518 mutex_unlock(&info_lock);
519}
520
521/* Ensure that chp_info is up-to-date. */
522static int info_update(void)
523{
524 int rc;
525
526 mutex_lock(&info_lock);
527 rc = 0;
528 if (time_after(jiffies, chp_info_expires)) {
529 /* Data is too old, update. */
530 rc = sclp_chp_read_info(&chp_info);
531 chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
532 }
533 mutex_unlock(&info_lock);
534
535 return rc;
536}
537
538/**
539 * chp_info_get_status - retrieve configure status of a channel-path
540 * @chpid: channel-path ID
541 *
542 * On success, return 0 for standby, 1 for configured, 2 for reserved,
543 * 3 for not recognized. Return negative error code on error.
544 */
545int chp_info_get_status(struct chp_id chpid)
546{
547 int rc;
548 int bit;
549
550 rc = info_update();
551 if (rc)
552 return rc;
553
554 bit = info_bit_num(chpid);
555 mutex_lock(&info_lock);
556 if (!chp_test_bit(chp_info.recognized, bit))
557 rc = CHP_STATUS_NOT_RECOGNIZED;
558 else if (chp_test_bit(chp_info.configured, bit))
559 rc = CHP_STATUS_CONFIGURED;
560 else if (chp_test_bit(chp_info.standby, bit))
561 rc = CHP_STATUS_STANDBY;
562 else
563 rc = CHP_STATUS_RESERVED;
564 mutex_unlock(&info_lock);
565
566 return rc;
567}
568
569/* Return configure task for chpid. */
570static enum cfg_task_t cfg_get_task(struct chp_id chpid)
571{
572 return chp_cfg_task[chpid.cssid][chpid.id];
573}
574
575/* Set configure task for chpid. */
576static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
577{
578 chp_cfg_task[chpid.cssid][chpid.id] = cfg;
579}
580
581/* Perform one configure/deconfigure request. Reschedule work function until
582 * last request. */
583static void cfg_func(struct work_struct *work)
584{
585 struct chp_id chpid;
586 enum cfg_task_t t;
587
588 mutex_lock(&cfg_lock);
589 t = cfg_none;
590 chp_id_for_each(&chpid) {
591 t = cfg_get_task(chpid);
592 if (t != cfg_none) {
593 cfg_set_task(chpid, cfg_none);
594 break;
595 }
596 }
597 mutex_unlock(&cfg_lock);
598
599 switch (t) {
600 case cfg_configure:
601 sclp_chp_configure(chpid);
602 info_expire();
603 chsc_chp_online(chpid);
604 break;
605 case cfg_deconfigure:
606 sclp_chp_deconfigure(chpid);
607 info_expire();
608 chsc_chp_offline(chpid);
609 break;
610 case cfg_none:
611 /* Get updated information after last change. */
612 info_update();
613 mutex_lock(&cfg_lock);
614 cfg_busy = 0;
615 mutex_unlock(&cfg_lock);
616 wake_up_interruptible(&cfg_wait_queue);
617 return;
618 }
619 queue_work(chp_wq, &cfg_work);
620}
621
622/**
623 * chp_cfg_schedule - schedule chpid configuration request
624 * @chpid - channel-path ID
625 * @configure - Non-zero for configure, zero for deconfigure
626 *
627 * Schedule a channel-path configuration/deconfiguration request.
628 */
629void chp_cfg_schedule(struct chp_id chpid, int configure)
630{
631 CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
632 configure);
633 mutex_lock(&cfg_lock);
634 cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
635 cfg_busy = 1;
636 mutex_unlock(&cfg_lock);
637 queue_work(chp_wq, &cfg_work);
638}
639
640/**
641 * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
642 * @chpid - channel-path ID
643 *
644 * Cancel an active channel-path deconfiguration request if it has not yet
645 * been performed.
646 */
647void chp_cfg_cancel_deconfigure(struct chp_id chpid)
648{
649 CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
650 mutex_lock(&cfg_lock);
651 if (cfg_get_task(chpid) == cfg_deconfigure)
652 cfg_set_task(chpid, cfg_none);
653 mutex_unlock(&cfg_lock);
654}
655
656static int cfg_wait_idle(void)
657{
658 if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
659 return -ERESTARTSYS;
660 return 0;
661}
662
663static int __init chp_init(void)
664{
665 struct chp_id chpid;
666
667 chp_wq = create_singlethread_workqueue("cio_chp");
668 if (!chp_wq)
669 return -ENOMEM;
670 INIT_WORK(&cfg_work, cfg_func);
671 init_waitqueue_head(&cfg_wait_queue);
672 if (info_update())
673 return 0;
674 /* Register available channel-paths. */
675 chp_id_for_each(&chpid) {
676 if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
677 chp_new(chpid);
678 }
679
680 return 0;
681}
682
683subsys_initcall(chp_init);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
new file mode 100644
index 000000000000..65286563c592
--- /dev/null
+++ b/drivers/s390/cio/chp.h
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/cio/chp.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_CHP_H
9#define S390_CHP_H S390_CHP_H
10
11#include <linux/types.h>
12#include <linux/device.h>
13#include <asm/chpid.h>
14#include "chsc.h"
15
16#define CHP_STATUS_STANDBY 0
17#define CHP_STATUS_CONFIGURED 1
18#define CHP_STATUS_RESERVED 2
19#define CHP_STATUS_NOT_RECOGNIZED 3
20
21static inline int chp_test_bit(u8 *bitmap, int num)
22{
23 int byte = num >> 3;
24 int mask = 128 >> (num & 7);
25
26 return (bitmap[byte] & mask) ? 1 : 0;
27}
28
29
30struct channel_path {
31 struct chp_id chpid;
32 int state;
33 struct channel_path_desc desc;
34 /* Channel-measurement related stuff: */
35 int cmg;
36 int shared;
37 void *cmg_chars;
38 struct device dev;
39};
40
41int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid);
45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid);
49void chp_cfg_schedule(struct chp_id chpid, int configure);
50void chp_cfg_cancel_deconfigure(struct chp_id chpid);
51int chp_info_get_status(struct chp_id chpid);
52
53#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6f05a44e3817..ea92ac4d6577 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -15,202 +15,124 @@
15#include <linux/device.h> 15#include <linux/device.h>
16 16
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "css.h" 20#include "css.h"
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "ioasm.h" 23#include "ioasm.h"
24#include "chp.h"
23#include "chsc.h" 25#include "chsc.h"
24 26
25static void *sei_page; 27static void *sei_page;
26 28
27static int new_channel_path(int chpid); 29struct chsc_ssd_area {
28 30 struct chsc_header request;
29static inline void 31 u16 :10;
30set_chp_logically_online(int chp, int onoff) 32 u16 ssid:2;
31{ 33 u16 :4;
32 css[0]->chps[chp]->state = onoff; 34 u16 f_sch; /* first subchannel */
33} 35 u16 :16;
34 36 u16 l_sch; /* last subchannel */
35static int 37 u32 :32;
36get_chp_status(int chp) 38 struct chsc_header response;
37{ 39 u32 :32;
38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 40 u8 sch_valid : 1;
39} 41 u8 dev_valid : 1;
40 42 u8 st : 3; /* subchannel type */
41void 43 u8 zeroes : 3;
42chsc_validate_chpids(struct subchannel *sch) 44 u8 unit_addr; /* unit address */
43{ 45 u16 devno; /* device number */
44 int mask, chp; 46 u8 path_mask;
45 47 u8 fla_valid_mask;
46 for (chp = 0; chp <= 7; chp++) { 48 u16 sch; /* subchannel */
47 mask = 0x80 >> chp; 49 u8 chpid[8]; /* chpids 0-7 */
48 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 50 u16 fla[8]; /* full link addresses 0-7 */
49 /* disable using this path */ 51} __attribute__ ((packed));
50 sch->opm &= ~mask;
51 }
52}
53
54void
55chpid_is_actually_online(int chp)
56{
57 int state;
58
59 state = get_chp_status(chp);
60 if (state < 0) {
61 need_rescan = 1;
62 queue_work(slow_path_wq, &slow_path_work);
63 } else
64 WARN_ON(!state);
65}
66 52
67/* FIXME: this is _always_ called for every subchannel. shouldn't we 53int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
68 * process more than one at a time? */
69static int
70chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
71{ 54{
72 int ccode, j; 55 unsigned long page;
73 56 struct chsc_ssd_area *ssd_area;
74 struct { 57 int ccode;
75 struct chsc_header request; 58 int ret;
76 u16 reserved1a:10; 59 int i;
77 u16 ssid:2; 60 int mask;
78 u16 reserved1b:4;
79 u16 f_sch; /* first subchannel */
80 u16 reserved2;
81 u16 l_sch; /* last subchannel */
82 u32 reserved3;
83 struct chsc_header response;
84 u32 reserved4;
85 u8 sch_valid : 1;
86 u8 dev_valid : 1;
87 u8 st : 3; /* subchannel type */
88 u8 zeroes : 3;
89 u8 unit_addr; /* unit address */
90 u16 devno; /* device number */
91 u8 path_mask;
92 u8 fla_valid_mask;
93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */
96 } __attribute__ ((packed)) *ssd_area;
97
98 ssd_area = page;
99 61
62 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 if (!page)
64 return -ENOMEM;
65 ssd_area = (struct chsc_ssd_area *) page;
100 ssd_area->request.length = 0x0010; 66 ssd_area->request.length = 0x0010;
101 ssd_area->request.code = 0x0004; 67 ssd_area->request.code = 0x0004;
102 68 ssd_area->ssid = schid.ssid;
103 ssd_area->ssid = sch->schid.ssid; 69 ssd_area->f_sch = schid.sch_no;
104 ssd_area->f_sch = sch->schid.sch_no; 70 ssd_area->l_sch = schid.sch_no;
105 ssd_area->l_sch = sch->schid.sch_no;
106 71
107 ccode = chsc(ssd_area); 72 ccode = chsc(ssd_area);
73 /* Check response. */
108 if (ccode > 0) { 74 if (ccode > 0) {
109 pr_debug("chsc returned with ccode = %d\n", ccode); 75 ret = (ccode == 3) ? -ENODEV : -EBUSY;
110 return (ccode == 3) ? -ENODEV : -EBUSY; 76 goto out_free;
111 } 77 }
112 78 if (ssd_area->response.code != 0x0001) {
113 switch (ssd_area->response.code) { 79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 case 0x0001: /* everything ok */ 80 schid.ssid, schid.sch_no,
115 break;
116 case 0x0002:
117 CIO_CRW_EVENT(2, "Invalid command!\n");
118 return -EINVAL;
119 case 0x0003:
120 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
121 return -EINVAL;
122 case 0x0004:
123 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
124 return -EOPNOTSUPP;
125 default:
126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
127 ssd_area->response.code); 81 ssd_area->response.code);
128 return -EIO; 82 ret = -EIO;
83 goto out_free;
129 } 84 }
130 85 if (!ssd_area->sch_valid) {
131 /* 86 ret = -ENODEV;
132 * ssd_area->st stores the type of the detected 87 goto out_free;
133 * subchannel, with the following definitions:
134 *
135 * 0: I/O subchannel: All fields have meaning
136 * 1: CHSC subchannel: Only sch_val, st and sch
137 * have meaning
138 * 2: Message subchannel: All fields except unit_addr
139 * have meaning
140 * 3: ADM subchannel: Only sch_val, st and sch
141 * have meaning
142 *
143 * Other types are currently undefined.
144 */
145 if (ssd_area->st > 3) { /* uhm, that looks strange... */
146 CIO_CRW_EVENT(0, "Strange subchannel type %d"
147 " for sch 0.%x.%04x\n", ssd_area->st,
148 sch->schid.ssid, sch->schid.sch_no);
149 /*
150 * There may have been a new subchannel type defined in the
151 * time since this code was written; since we don't know which
152 * fields have meaning and what to do with it we just jump out
153 */
154 return 0;
155 } else {
156 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
158 sch->schid.ssid, sch->schid.sch_no,
159 type[ssd_area->st]);
160
161 sch->ssd_info.valid = 1;
162 sch->ssd_info.type = ssd_area->st;
163 } 88 }
164 89 /* Copy data */
165 if (ssd_area->st == 0 || ssd_area->st == 2) { 90 ret = 0;
166 for (j = 0; j < 8; j++) { 91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
167 if (!((0x80 >> j) & ssd_area->path_mask & 92 if ((ssd_area->st != 0) && (ssd_area->st != 2))
168 ssd_area->fla_valid_mask)) 93 goto out_free;
169 continue; 94 ssd->path_mask = ssd_area->path_mask;
170 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 95 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
171 sch->ssd_info.fla[j] = ssd_area->fla[j]; 96 for (i = 0; i < 8; i++) {
97 mask = 0x80 >> i;
98 if (ssd_area->path_mask & mask) {
99 chp_id_init(&ssd->chpid[i]);
100 ssd->chpid[i].id = ssd_area->chpid[i];
172 } 101 }
102 if (ssd_area->fla_valid_mask & mask)
103 ssd->fla[i] = ssd_area->fla[i];
173 } 104 }
174 return 0; 105out_free:
106 free_page(page);
107 return ret;
175} 108}
176 109
177int 110static int check_for_io_on_path(struct subchannel *sch, int mask)
178css_get_ssd_info(struct subchannel *sch)
179{ 111{
180 int ret; 112 int cc;
181 void *page;
182 113
183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 114 cc = stsch(sch->schid, &sch->schib);
184 if (!page) 115 if (cc)
185 return -ENOMEM; 116 return 0;
186 spin_lock_irq(sch->lock); 117 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
187 ret = chsc_get_sch_desc_irq(sch, page); 118 return 1;
188 if (ret) { 119 return 0;
189 static int cio_chsc_err_msg; 120}
190 121
191 if (!cio_chsc_err_msg) { 122static void terminate_internal_io(struct subchannel *sch)
192 printk(KERN_ERR 123{
193 "chsc_get_sch_descriptions:" 124 if (cio_clear(sch)) {
194 " Error %d while doing chsc; " 125 /* Recheck device in case clear failed. */
195 "processing some machine checks may " 126 sch->lpm = 0;
196 "not work\n", ret); 127 if (device_trigger_verify(sch) != 0)
197 cio_chsc_err_msg = 1; 128 css_schedule_eval(sch->schid);
198 } 129 return;
199 }
200 spin_unlock_irq(sch->lock);
201 free_page((unsigned long)page);
202 if (!ret) {
203 int j, chpid, mask;
204 /* Allocate channel path structures, if needed. */
205 for (j = 0; j < 8; j++) {
206 mask = 0x80 >> j;
207 chpid = sch->ssd_info.chpid[j];
208 if ((sch->schib.pmcw.pim & mask) &&
209 (get_chp_status(chpid) < 0))
210 new_channel_path(chpid);
211 }
212 } 130 }
213 return ret; 131 /* Request retry of internal operation. */
132 device_set_intretry(sch);
133 /* Call handler. */
134 if (sch->driver && sch->driver->termination)
135 sch->driver->termination(&sch->dev);
214} 136}
215 137
216static int 138static int
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
219 int j; 141 int j;
220 int mask; 142 int mask;
221 struct subchannel *sch; 143 struct subchannel *sch;
222 struct channel_path *chpid; 144 struct chp_id *chpid;
223 struct schib schib; 145 struct schib schib;
224 146
225 sch = to_subchannel(dev); 147 sch = to_subchannel(dev);
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
243 if (sch->schib.pmcw.pim == 0x80) 165 if (sch->schib.pmcw.pim == 0x80)
244 goto out_unreg; 166 goto out_unreg;
245 167
246 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 168 if (check_for_io_on_path(sch, mask)) {
247 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 169 if (device_is_online(sch))
248 (sch->schib.pmcw.lpum == mask)) { 170 device_kill_io(sch);
249 int cc; 171 else {
250 172 terminate_internal_io(sch);
251 cc = cio_clear(sch); 173 /* Re-start path verification. */
252 if (cc == -ENODEV) 174 if (sch->driver && sch->driver->verify)
175 sch->driver->verify(&sch->dev);
176 }
177 } else {
178 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify)
180 sch->driver->verify(&sch->dev);
181 else if (sch->lpm == mask)
253 goto out_unreg; 182 goto out_unreg;
254 /* Request retry of internal operation. */
255 device_set_intretry(sch);
256 /* Call handler. */
257 if (sch->driver && sch->driver->termination)
258 sch->driver->termination(&sch->dev);
259 goto out_unlock;
260 } 183 }
261 184
262 /* trigger path verification. */
263 if (sch->driver && sch->driver->verify)
264 sch->driver->verify(&sch->dev);
265 else if (sch->lpm == mask)
266 goto out_unreg;
267out_unlock:
268 spin_unlock_irq(sch->lock); 185 spin_unlock_irq(sch->lock);
269 return 0; 186 return 0;
187
270out_unreg: 188out_unreg:
271 spin_unlock_irq(sch->lock);
272 sch->lpm = 0; 189 sch->lpm = 0;
273 if (css_enqueue_subchannel_slow(sch->schid)) { 190 spin_unlock_irq(sch->lock);
274 css_clear_subchannel_slow_list(); 191 css_schedule_eval(sch->schid);
275 need_rescan = 1;
276 }
277 return 0; 192 return 0;
278} 193}
279 194
280static void 195void chsc_chp_offline(struct chp_id chpid)
281s390_set_chpid_offline( __u8 chpid)
282{ 196{
283 char dbf_txt[15]; 197 char dbf_txt[15];
284 struct device *dev;
285 198
286 sprintf(dbf_txt, "chpr%x", chpid); 199 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
287 CIO_TRACE_EVENT(2, dbf_txt); 200 CIO_TRACE_EVENT(2, dbf_txt);
288 201
289 if (get_chp_status(chpid) <= 0) 202 if (chp_get_status(chpid) <= 0)
290 return; 203 return;
291 dev = get_device(&css[0]->chps[chpid]->dev); 204 bus_for_each_dev(&css_bus_type, NULL, &chpid,
292 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
293 s390_subchannel_remove_chpid); 205 s390_subchannel_remove_chpid);
294
295 if (need_rescan || css_slow_subchannels_exist())
296 queue_work(slow_path_wq, &slow_path_work);
297 put_device(dev);
298}
299
300struct res_acc_data {
301 struct channel_path *chp;
302 u32 fla_mask;
303 u16 fla;
304};
305
306static int
307s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
308{
309 int found;
310 int chp;
311 int ccode;
312
313 found = 0;
314 for (chp = 0; chp <= 7; chp++)
315 /*
316 * check if chpid is in information updated by ssd
317 */
318 if (sch->ssd_info.valid &&
319 sch->ssd_info.chpid[chp] == res_data->chp->id &&
320 (sch->ssd_info.fla[chp] & res_data->fla_mask)
321 == res_data->fla) {
322 found = 1;
323 break;
324 }
325
326 if (found == 0)
327 return 0;
328
329 /*
330 * Do a stsch to update our subchannel structure with the
331 * new path information and eventually check for logically
332 * offline chpids.
333 */
334 ccode = stsch(sch->schid, &sch->schib);
335 if (ccode > 0)
336 return 0;
337
338 return 0x80 >> chp;
339} 206}
340 207
341static int 208static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 209s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 210{
344 struct schib schib; 211 struct schib schib;
345 int ret;
346 /* 212 /*
347 * We don't know the device yet, but since a path 213 * We don't know the device yet, but since a path
348 * may be available now to the device we'll have 214 * may be available now to the device we'll have
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
353 */ 219 */
354 if (stsch_err(schid, &schib)) 220 if (stsch_err(schid, &schib))
355 /* We're through */ 221 /* We're through */
356 return need_rescan ? -EAGAIN : -ENXIO; 222 return -ENXIO;
357 223
358 /* Put it on the slow path. */ 224 /* Put it on the slow path. */
359 ret = css_enqueue_subchannel_slow(schid); 225 css_schedule_eval(schid);
360 if (ret) { 226 return 0;
361 css_clear_subchannel_slow_list(); 227}
362 need_rescan = 1; 228
363 return -EAGAIN; 229struct res_acc_data {
230 struct chp_id chpid;
231 u32 fla_mask;
232 u16 fla;
233};
234
235static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
236 struct res_acc_data *data)
237{
238 int i;
239 int mask;
240
241 for (i = 0; i < 8; i++) {
242 mask = 0x80 >> i;
243 if (!(ssd->path_mask & mask))
244 continue;
245 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
246 continue;
247 if ((ssd->fla_valid_mask & mask) &&
248 ((ssd->fla[i] & data->fla_mask) != data->fla))
249 continue;
250 return mask;
364 } 251 }
365 return 0; 252 return 0;
366} 253}
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
379 return s390_process_res_acc_new_sch(schid); 266 return s390_process_res_acc_new_sch(schid);
380 267
381 spin_lock_irq(sch->lock); 268 spin_lock_irq(sch->lock);
382 269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
383 chp_mask = s390_process_res_acc_sch(res_data, sch); 270 if (chp_mask == 0)
384 271 goto out;
385 if (chp_mask == 0) { 272 if (stsch(sch->schid, &sch->schib))
386 spin_unlock_irq(sch->lock); 273 goto out;
387 put_device(&sch->dev);
388 return 0;
389 }
390 old_lpm = sch->lpm; 274 old_lpm = sch->lpm;
391 sch->lpm = ((sch->schib.pmcw.pim & 275 sch->lpm = ((sch->schib.pmcw.pim &
392 sch->schib.pmcw.pam & 276 sch->schib.pmcw.pam &
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
396 device_trigger_reprobe(sch); 280 device_trigger_reprobe(sch);
397 else if (sch->driver && sch->driver->verify) 281 else if (sch->driver && sch->driver->verify)
398 sch->driver->verify(&sch->dev); 282 sch->driver->verify(&sch->dev);
399 283out:
400 spin_unlock_irq(sch->lock); 284 spin_unlock_irq(sch->lock);
401 put_device(&sch->dev); 285 put_device(&sch->dev);
402 return 0; 286 return 0;
403} 287}
404 288
405 289static void s390_process_res_acc (struct res_acc_data *res_data)
406static int
407s390_process_res_acc (struct res_acc_data *res_data)
408{ 290{
409 int rc;
410 char dbf_txt[15]; 291 char dbf_txt[15];
411 292
412 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 293 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
294 res_data->chpid.id);
413 CIO_TRACE_EVENT( 2, dbf_txt); 295 CIO_TRACE_EVENT( 2, dbf_txt);
414 if (res_data->fla != 0) { 296 if (res_data->fla != 0) {
415 sprintf(dbf_txt, "fla%x", res_data->fla); 297 sprintf(dbf_txt, "fla%x", res_data->fla);
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
423 * The more information we have (info), the less scanning 305 * The more information we have (info), the less scanning
424 * will we have to do. 306 * will we have to do.
425 */ 307 */
426 rc = for_each_subchannel(__s390_process_res_acc, res_data); 308 for_each_subchannel(__s390_process_res_acc, res_data);
427 if (css_slow_subchannels_exist())
428 rc = -EAGAIN;
429 else if (rc != -EAGAIN)
430 rc = 0;
431 return rc;
432} 309}
433 310
434static int 311static int
@@ -480,43 +357,45 @@ struct chsc_sei_area {
480 /* ccdf has to be big enough for a link-incident record */ 357 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed)); 358} __attribute__ ((packed));
482 359
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 360static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{ 361{
485 int chpid; 362 struct chp_id chpid;
363 int id;
486 364
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 365 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid); 366 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4) 367 if (sei_area->rs != 4)
490 return 0; 368 return;
491 chpid = __get_chpid_from_lir(sei_area->ccdf); 369 id = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0) 370 if (id < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 371 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else 372 else {
495 s390_set_chpid_offline(chpid); 373 chp_id_init(&chpid);
496 374 chpid.id = id;
497 return 0; 375 chsc_chp_offline(chpid);
376 }
498} 377}
499 378
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 379static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
501{ 380{
502 struct res_acc_data res_data; 381 struct res_acc_data res_data;
503 struct device *dev; 382 struct chp_id chpid;
504 int status; 383 int status;
505 int rc;
506 384
507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 385 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 386 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
509 if (sei_area->rs != 4) 387 if (sei_area->rs != 4)
510 return 0; 388 return;
389 chp_id_init(&chpid);
390 chpid.id = sei_area->rsid;
511 /* allocate a new channel path structure, if needed */ 391 /* allocate a new channel path structure, if needed */
512 status = get_chp_status(sei_area->rsid); 392 status = chp_get_status(chpid);
513 if (status < 0) 393 if (status < 0)
514 new_channel_path(sei_area->rsid); 394 chp_new(chpid);
515 else if (!status) 395 else if (!status)
516 return 0; 396 return;
517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
518 memset(&res_data, 0, sizeof(struct res_acc_data)); 397 memset(&res_data, 0, sizeof(struct res_acc_data));
519 res_data.chp = to_channelpath(dev); 398 res_data.chpid = chpid;
520 if ((sei_area->vf & 0xc0) != 0) { 399 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla; 400 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0) 401 if ((sei_area->vf & 0xc0) == 0xc0)
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
526 /* link address */ 405 /* link address */
527 res_data.fla_mask = 0xff00; 406 res_data.fla_mask = 0xff00;
528 } 407 }
529 rc = s390_process_res_acc(&res_data); 408 s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533} 409}
534 410
535static int chsc_process_sei(struct chsc_sei_area *sei_area) 411struct chp_config_data {
412 u8 map[32];
413 u8 op;
414 u8 pc;
415};
416
417static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
536{ 418{
537 int rc; 419 struct chp_config_data *data;
420 struct chp_id chpid;
421 int num;
422
423 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
424 if (sei_area->rs != 0)
425 return;
426 data = (struct chp_config_data *) &(sei_area->ccdf);
427 chp_id_init(&chpid);
428 for (num = 0; num <= __MAX_CHPID; num++) {
429 if (!chp_test_bit(data->map, num))
430 continue;
431 chpid.id = num;
432 printk(KERN_WARNING "cio: processing configure event %d for "
433 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
434 switch (data->op) {
435 case 0:
436 chp_cfg_schedule(chpid, 1);
437 break;
438 case 1:
439 chp_cfg_schedule(chpid, 0);
440 break;
441 case 2:
442 chp_cfg_cancel_deconfigure(chpid);
443 break;
444 }
445 }
446}
538 447
448static void chsc_process_sei(struct chsc_sei_area *sei_area)
449{
539 /* Check if we might have lost some information. */ 450 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40) 451 if (sei_area->flags & 0x40) {
541 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 452 CIO_CRW_EVENT(2, "chsc: event overflow\n");
453 css_schedule_eval_all();
454 }
542 /* which kind of information was stored? */ 455 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) { 456 switch (sei_area->cc) {
545 case 1: /* link incident*/ 457 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area); 458 chsc_process_sei_link_incident(sei_area);
547 break; 459 break;
548 case 2: /* i/o resource accessibiliy */ 460 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area); 461 chsc_process_sei_res_acc(sei_area);
462 break;
463 case 8: /* channel-path-configuration notification */
464 chsc_process_sei_chp_config(sei_area);
550 break; 465 break;
551 default: /* other stuff */ 466 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 467 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc); 468 sei_area->cc);
554 break; 469 break;
555 } 470 }
556
557 return rc;
558} 471}
559 472
560int chsc_process_crw(void) 473void chsc_process_crw(void)
561{ 474{
562 struct chsc_sei_area *sei_area; 475 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
565 476
566 if (!sei_page) 477 if (!sei_page)
567 return 0; 478 return;
568 /* Access to sei_page is serialized through machine check handler 479 /* Access to sei_page is serialized through machine check handler
569 * thread, so no need for locking. */ 480 * thread, so no need for locking. */
570 sei_area = sei_page; 481 sei_area = sei_page;
571 482
572 CIO_TRACE_EVENT( 2, "prcss"); 483 CIO_TRACE_EVENT( 2, "prcss");
573 ret = 0;
574 do { 484 do {
575 memset(sei_area, 0, sizeof(*sei_area)); 485 memset(sei_area, 0, sizeof(*sei_area));
576 sei_area->request.length = 0x0010; 486 sei_area->request.length = 0x0010;
@@ -580,37 +490,26 @@ int chsc_process_crw(void)
580 490
581 if (sei_area->response.code == 0x0001) { 491 if (sei_area->response.code == 0x0001) {
582 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 492 CIO_CRW_EVENT(4, "chsc: sei successful\n");
583 rc = chsc_process_sei(sei_area); 493 chsc_process_sei(sei_area);
584 if (rc)
585 ret = rc;
586 } else { 494 } else {
587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 495 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
588 sei_area->response.code); 496 sei_area->response.code);
589 ret = 0;
590 break; 497 break;
591 } 498 }
592 } while (sei_area->flags & 0x80); 499 } while (sei_area->flags & 0x80);
593
594 return ret;
595} 500}
596 501
597static int 502static int
598__chp_add_new_sch(struct subchannel_id schid) 503__chp_add_new_sch(struct subchannel_id schid)
599{ 504{
600 struct schib schib; 505 struct schib schib;
601 int ret;
602 506
603 if (stsch_err(schid, &schib)) 507 if (stsch_err(schid, &schib))
604 /* We're through */ 508 /* We're through */
605 return need_rescan ? -EAGAIN : -ENXIO; 509 return -ENXIO;
606 510
607 /* Put it on the slow path. */ 511 /* Put it on the slow path. */
608 ret = css_enqueue_subchannel_slow(schid); 512 css_schedule_eval(schid);
609 if (ret) {
610 css_clear_subchannel_slow_list();
611 need_rescan = 1;
612 return -EAGAIN;
613 }
614 return 0; 513 return 0;
615} 514}
616 515
@@ -619,10 +518,10 @@ static int
619__chp_add(struct subchannel_id schid, void *data) 518__chp_add(struct subchannel_id schid, void *data)
620{ 519{
621 int i, mask; 520 int i, mask;
622 struct channel_path *chp; 521 struct chp_id *chpid;
623 struct subchannel *sch; 522 struct subchannel *sch;
624 523
625 chp = data; 524 chpid = data;
626 sch = get_subchannel_by_schid(schid); 525 sch = get_subchannel_by_schid(schid);
627 if (!sch) 526 if (!sch)
628 /* Check if the subchannel is now available. */ 527 /* Check if the subchannel is now available. */
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data)
631 for (i=0; i<8; i++) { 530 for (i=0; i<8; i++) {
632 mask = 0x80 >> i; 531 mask = 0x80 >> i;
633 if ((sch->schib.pmcw.pim & mask) && 532 if ((sch->schib.pmcw.pim & mask) &&
634 (sch->schib.pmcw.chpid[i] == chp->id)) { 533 (sch->schib.pmcw.chpid[i] == chpid->id)) {
635 if (stsch(sch->schid, &sch->schib) != 0) { 534 if (stsch(sch->schid, &sch->schib) != 0) {
636 /* Endgame. */ 535 /* Endgame. */
637 spin_unlock_irq(sch->lock); 536 spin_unlock_irq(sch->lock);
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
657 return 0; 556 return 0;
658} 557}
659 558
660static int 559void chsc_chp_online(struct chp_id chpid)
661chp_add(int chpid)
662{ 560{
663 int rc;
664 char dbf_txt[15]; 561 char dbf_txt[15];
665 struct device *dev;
666 562
667 if (!get_chp_status(chpid)) 563 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
668 return 0; /* no need to do the rest */
669
670 sprintf(dbf_txt, "cadd%x", chpid);
671 CIO_TRACE_EVENT(2, dbf_txt); 564 CIO_TRACE_EVENT(2, dbf_txt);
672 565
673 dev = get_device(&css[0]->chps[chpid]->dev); 566 if (chp_get_status(chpid) != 0)
674 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 567 for_each_subchannel(__chp_add, &chpid);
675 if (css_slow_subchannels_exist())
676 rc = -EAGAIN;
677 if (rc != -EAGAIN)
678 rc = 0;
679 put_device(dev);
680 return rc;
681} 568}
682 569
683/* 570static void __s390_subchannel_vary_chpid(struct subchannel *sch,
684 * Handling of crw machine checks with channel path source. 571 struct chp_id chpid, int on)
685 */
686int
687chp_process_crw(int chpid, int on)
688{
689 if (on == 0) {
690 /* Path has gone. We use the link incident routine.*/
691 s390_set_chpid_offline(chpid);
692 return 0; /* De-register is async anyway. */
693 }
694 /*
695 * Path has come. Allocate a new channel path structure,
696 * if needed.
697 */
698 if (get_chp_status(chpid) < 0)
699 new_channel_path(chpid);
700 /* Avoid the extra overhead in process_rec_acc. */
701 return chp_add(chpid);
702}
703
704static int check_for_io_on_path(struct subchannel *sch, int index)
705{
706 int cc;
707
708 cc = stsch(sch->schid, &sch->schib);
709 if (cc)
710 return 0;
711 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
712 return 1;
713 return 0;
714}
715
716static void terminate_internal_io(struct subchannel *sch)
717{
718 if (cio_clear(sch)) {
719 /* Recheck device in case clear failed. */
720 sch->lpm = 0;
721 if (device_trigger_verify(sch) != 0) {
722 if(css_enqueue_subchannel_slow(sch->schid)) {
723 css_clear_subchannel_slow_list();
724 need_rescan = 1;
725 }
726 }
727 return;
728 }
729 /* Request retry of internal operation. */
730 device_set_intretry(sch);
731 /* Call handler. */
732 if (sch->driver && sch->driver->termination)
733 sch->driver->termination(&sch->dev);
734}
735
736static void
737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
738{ 572{
739 int chp, old_lpm; 573 int chp, old_lpm;
574 int mask;
740 unsigned long flags; 575 unsigned long flags;
741 576
742 if (!sch->ssd_info.valid)
743 return;
744
745 spin_lock_irqsave(sch->lock, flags); 577 spin_lock_irqsave(sch->lock, flags);
746 old_lpm = sch->lpm; 578 old_lpm = sch->lpm;
747 for (chp = 0; chp < 8; chp++) { 579 for (chp = 0; chp < 8; chp++) {
748 if (sch->ssd_info.chpid[chp] != chpid) 580 mask = 0x80 >> chp;
581 if (!(sch->ssd_info.path_mask & mask))
582 continue;
583 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
749 continue; 584 continue;
750 585
751 if (on) { 586 if (on) {
752 sch->opm |= (0x80 >> chp); 587 sch->opm |= mask;
753 sch->lpm |= (0x80 >> chp); 588 sch->lpm |= mask;
754 if (!old_lpm) 589 if (!old_lpm)
755 device_trigger_reprobe(sch); 590 device_trigger_reprobe(sch);
756 else if (sch->driver && sch->driver->verify) 591 else if (sch->driver && sch->driver->verify)
757 sch->driver->verify(&sch->dev); 592 sch->driver->verify(&sch->dev);
758 break; 593 break;
759 } 594 }
760 sch->opm &= ~(0x80 >> chp); 595 sch->opm &= ~mask;
761 sch->lpm &= ~(0x80 >> chp); 596 sch->lpm &= ~mask;
762 if (check_for_io_on_path(sch, chp)) { 597 if (check_for_io_on_path(sch, mask)) {
763 if (device_is_online(sch)) 598 if (device_is_online(sch))
764 /* Path verification is done after killing. */ 599 /* Path verification is done after killing. */
765 device_kill_io(sch); 600 device_kill_io(sch);
766 else 601 else {
767 /* Kill and retry internal I/O. */ 602 /* Kill and retry internal I/O. */
768 terminate_internal_io(sch); 603 terminate_internal_io(sch);
769 } else if (!sch->lpm) { 604 /* Re-start path verification. */
770 if (device_trigger_verify(sch) != 0) { 605 if (sch->driver && sch->driver->verify)
771 if (css_enqueue_subchannel_slow(sch->schid)) { 606 sch->driver->verify(&sch->dev);
772 css_clear_subchannel_slow_list();
773 need_rescan = 1;
774 }
775 } 607 }
608 } else if (!sch->lpm) {
609 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid);
776 } else if (sch->driver && sch->driver->verify) 611 } else if (sch->driver && sch->driver->verify)
777 sch->driver->verify(&sch->dev); 612 sch->driver->verify(&sch->dev);
778 break; 613 break;
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
780 spin_unlock_irqrestore(sch->lock, flags); 615 spin_unlock_irqrestore(sch->lock, flags);
781} 616}
782 617
783static int 618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
784s390_subchannel_vary_chpid_off(struct device *dev, void *data)
785{ 619{
786 struct subchannel *sch; 620 struct subchannel *sch;
787 __u8 *chpid; 621 struct chp_id *chpid;
788 622
789 sch = to_subchannel(dev); 623 sch = to_subchannel(dev);
790 chpid = data; 624 chpid = data;
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data)
793 return 0; 627 return 0;
794} 628}
795 629
796static int 630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
797s390_subchannel_vary_chpid_on(struct device *dev, void *data)
798{ 631{
799 struct subchannel *sch; 632 struct subchannel *sch;
800 __u8 *chpid; 633 struct chp_id *chpid;
801 634
802 sch = to_subchannel(dev); 635 sch = to_subchannel(dev);
803 chpid = data; 636 chpid = data;
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
821 /* We're through */ 654 /* We're through */
822 return -ENXIO; 655 return -ENXIO;
823 /* Put it on the slow path. */ 656 /* Put it on the slow path. */
824 if (css_enqueue_subchannel_slow(schid)) { 657 css_schedule_eval(schid);
825 css_clear_subchannel_slow_list();
826 need_rescan = 1;
827 return -EAGAIN;
828 }
829 return 0; 658 return 0;
830} 659}
831 660
832/* 661/**
833 * Function: s390_vary_chpid 662 * chsc_chp_vary - propagate channel-path vary operation to subchannels
834 * Varies the specified chpid online or offline 663 * @chpid: channl-path ID
664 * @on: non-zero for vary online, zero for vary offline
835 */ 665 */
836static int 666int chsc_chp_vary(struct chp_id chpid, int on)
837s390_vary_chpid( __u8 chpid, int on)
838{ 667{
839 char dbf_text[15];
840 int status;
841
842 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
843 CIO_TRACE_EVENT( 2, dbf_text);
844
845 status = get_chp_status(chpid);
846 if (status < 0) {
847 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
848 return -EINVAL;
849 }
850
851 if (!on && !status) {
852 printk(KERN_ERR "chpid %x is already offline\n", chpid);
853 return -EINVAL;
854 }
855
856 set_chp_logically_online(chpid, on);
857
858 /* 668 /*
859 * Redo PathVerification on the devices the chpid connects to 669 * Redo PathVerification on the devices the chpid connects to
860 */ 670 */
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on)
865 if (on) 675 if (on)
866 /* Scan for new devices on varied on path. */ 676 /* Scan for new devices on varied on path. */
867 for_each_subchannel(__s390_vary_chpid_on, NULL); 677 for_each_subchannel(__s390_vary_chpid_on, NULL);
868 if (need_rescan || css_slow_subchannels_exist())
869 queue_work(slow_path_wq, &slow_path_work);
870 return 0; 678 return 0;
871} 679}
872 680
873/*
874 * Channel measurement related functions
875 */
876static ssize_t
877chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
878 size_t count)
879{
880 struct channel_path *chp;
881 unsigned int size;
882
883 chp = to_channelpath(container_of(kobj, struct device, kobj));
884 if (!chp->cmg_chars)
885 return 0;
886
887 size = sizeof(struct cmg_chars);
888
889 if (off > size)
890 return 0;
891 if (off + count > size)
892 count = size - off;
893 memcpy(buf, chp->cmg_chars + off, count);
894 return count;
895}
896
897static struct bin_attribute chp_measurement_chars_attr = {
898 .attr = {
899 .name = "measurement_chars",
900 .mode = S_IRUSR,
901 .owner = THIS_MODULE,
902 },
903 .size = sizeof(struct cmg_chars),
904 .read = chp_measurement_chars_read,
905};
906
907static void
908chp_measurement_copy_block(struct cmg_entry *buf,
909 struct channel_subsystem *css, int chpid)
910{
911 void *area;
912 struct cmg_entry *entry, reference_buf;
913 int idx;
914
915 if (chpid < 128) {
916 area = css->cub_addr1;
917 idx = chpid;
918 } else {
919 area = css->cub_addr2;
920 idx = chpid - 128;
921 }
922 entry = area + (idx * sizeof(struct cmg_entry));
923 do {
924 memcpy(buf, entry, sizeof(*entry));
925 memcpy(&reference_buf, entry, sizeof(*entry));
926 } while (reference_buf.values[0] != buf->values[0]);
927}
928
929static ssize_t
930chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
931{
932 struct channel_path *chp;
933 struct channel_subsystem *css;
934 unsigned int size;
935
936 chp = to_channelpath(container_of(kobj, struct device, kobj));
937 css = to_css(chp->dev.parent);
938
939 size = sizeof(struct cmg_entry);
940
941 /* Only allow single reads. */
942 if (off || count < size)
943 return 0;
944 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
945 count = size;
946 return count;
947}
948
949static struct bin_attribute chp_measurement_attr = {
950 .attr = {
951 .name = "measurement",
952 .mode = S_IRUSR,
953 .owner = THIS_MODULE,
954 },
955 .size = sizeof(struct cmg_entry),
956 .read = chp_measurement_read,
957};
958
959static void
960chsc_remove_chp_cmg_attr(struct channel_path *chp)
961{
962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
964}
965
966static int
967chsc_add_chp_cmg_attr(struct channel_path *chp)
968{
969 int ret;
970
971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
972 if (ret)
973 return ret;
974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
975 if (ret)
976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
977 return ret;
978}
979
980static void 681static void
981chsc_remove_cmg_attr(struct channel_subsystem *css) 682chsc_remove_cmg_attr(struct channel_subsystem *css)
982{ 683{
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css)
985 for (i = 0; i <= __MAX_CHPID; i++) { 686 for (i = 0; i <= __MAX_CHPID; i++) {
986 if (!css->chps[i]) 687 if (!css->chps[i])
987 continue; 688 continue;
988 chsc_remove_chp_cmg_attr(css->chps[i]); 689 chp_remove_cmg_attr(css->chps[i]);
989 } 690 }
990} 691}
991 692
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
998 for (i = 0; i <= __MAX_CHPID; i++) { 699 for (i = 0; i <= __MAX_CHPID; i++) {
999 if (!css->chps[i]) 700 if (!css->chps[i])
1000 continue; 701 continue;
1001 ret = chsc_add_chp_cmg_attr(css->chps[i]); 702 ret = chp_add_cmg_attr(css->chps[i]);
1002 if (ret) 703 if (ret)
1003 goto cleanup; 704 goto cleanup;
1004 } 705 }
@@ -1007,12 +708,11 @@ cleanup:
1007 for (--i; i >= 0; i--) { 708 for (--i; i >= 0; i--) {
1008 if (!css->chps[i]) 709 if (!css->chps[i])
1009 continue; 710 continue;
1010 chsc_remove_chp_cmg_attr(css->chps[i]); 711 chp_remove_cmg_attr(css->chps[i]);
1011 } 712 }
1012 return ret; 713 return ret;
1013} 714}
1014 715
1015
1016static int 716static int
1017__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 717__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1018{ 718{
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
1118 } else 818 } else
1119 chsc_remove_cmg_attr(css); 819 chsc_remove_cmg_attr(css);
1120 } 820 }
1121 if (enable && !css->cm_enabled) { 821 if (!css->cm_enabled) {
1122 free_page((unsigned long)css->cub_addr1); 822 free_page((unsigned long)css->cub_addr1);
1123 free_page((unsigned long)css->cub_addr2); 823 free_page((unsigned long)css->cub_addr2);
1124 } 824 }
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable)
1127 return ret; 827 return ret;
1128} 828}
1129 829
1130/* 830int chsc_determine_channel_path_description(struct chp_id chpid,
1131 * Files for the channel path entries. 831 struct channel_path_desc *desc)
1132 */
1133static ssize_t
1134chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1135{
1136 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1137
1138 if (!chp)
1139 return 0;
1140 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
1141 sprintf(buf, "offline\n"));
1142}
1143
1144static ssize_t
1145chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1146{
1147 struct channel_path *cp = container_of(dev, struct channel_path, dev);
1148 char cmd[10];
1149 int num_args;
1150 int error;
1151
1152 num_args = sscanf(buf, "%5s", cmd);
1153 if (!num_args)
1154 return count;
1155
1156 if (!strnicmp(cmd, "on", 2))
1157 error = s390_vary_chpid(cp->id, 1);
1158 else if (!strnicmp(cmd, "off", 3))
1159 error = s390_vary_chpid(cp->id, 0);
1160 else
1161 error = -EINVAL;
1162
1163 return error < 0 ? error : count;
1164
1165}
1166
1167static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1168
1169static ssize_t
1170chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1171{
1172 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1173
1174 if (!chp)
1175 return 0;
1176 return sprintf(buf, "%x\n", chp->desc.desc);
1177}
1178
1179static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1180
1181static ssize_t
1182chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1183{
1184 struct channel_path *chp = to_channelpath(dev);
1185
1186 if (!chp)
1187 return 0;
1188 if (chp->cmg == -1) /* channel measurements not available */
1189 return sprintf(buf, "unknown\n");
1190 return sprintf(buf, "%x\n", chp->cmg);
1191}
1192
1193static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1194
1195static ssize_t
1196chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1197{
1198 struct channel_path *chp = to_channelpath(dev);
1199
1200 if (!chp)
1201 return 0;
1202 if (chp->shared == -1) /* channel measurements not available */
1203 return sprintf(buf, "unknown\n");
1204 return sprintf(buf, "%x\n", chp->shared);
1205}
1206
1207static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1208
1209static struct attribute * chp_attrs[] = {
1210 &dev_attr_status.attr,
1211 &dev_attr_type.attr,
1212 &dev_attr_cmg.attr,
1213 &dev_attr_shared.attr,
1214 NULL,
1215};
1216
1217static struct attribute_group chp_attr_group = {
1218 .attrs = chp_attrs,
1219};
1220
1221static void
1222chp_release(struct device *dev)
1223{
1224 struct channel_path *cp;
1225
1226 cp = container_of(dev, struct channel_path, dev);
1227 kfree(cp);
1228}
1229
1230static int
1231chsc_determine_channel_path_description(int chpid,
1232 struct channel_path_desc *desc)
1233{ 832{
1234 int ccode, ret; 833 int ccode, ret;
1235 834
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid,
1252 scpd_area->request.length = 0x0010; 851 scpd_area->request.length = 0x0010;
1253 scpd_area->request.code = 0x0002; 852 scpd_area->request.code = 0x0002;
1254 853
1255 scpd_area->first_chpid = chpid; 854 scpd_area->first_chpid = chpid.id;
1256 scpd_area->last_chpid = chpid; 855 scpd_area->last_chpid = chpid.id;
1257 856
1258 ccode = chsc(scpd_area); 857 ccode = chsc(scpd_area);
1259 if (ccode > 0) { 858 if (ccode > 0) {
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1316 } 915 }
1317} 916}
1318 917
1319static int 918int chsc_get_channel_measurement_chars(struct channel_path *chp)
1320chsc_get_channel_measurement_chars(struct channel_path *chp)
1321{ 919{
1322 int ccode, ret; 920 int ccode, ret;
1323 921
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1349 scmc_area->request.length = 0x0010; 947 scmc_area->request.length = 0x0010;
1350 scmc_area->request.code = 0x0022; 948 scmc_area->request.code = 0x0022;
1351 949
1352 scmc_area->first_chpid = chp->id; 950 scmc_area->first_chpid = chp->chpid.id;
1353 scmc_area->last_chpid = chp->id; 951 scmc_area->last_chpid = chp->chpid.id;
1354 952
1355 ccode = chsc(scmc_area); 953 ccode = chsc(scmc_area);
1356 if (ccode > 0) { 954 if (ccode > 0) {
@@ -1392,94 +990,6 @@ out:
1392 return ret; 990 return ret;
1393} 991}
1394 992
1395/*
1396 * Entries for chpids on the system bus.
1397 * This replaces /proc/chpids.
1398 */
1399static int
1400new_channel_path(int chpid)
1401{
1402 struct channel_path *chp;
1403 int ret;
1404
1405 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
1406 if (!chp)
1407 return -ENOMEM;
1408
1409 /* fill in status, etc. */
1410 chp->id = chpid;
1411 chp->state = 1;
1412 chp->dev.parent = &css[0]->device;
1413 chp->dev.release = chp_release;
1414 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1415
1416 /* Obtain channel path description and fill it in. */
1417 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1418 if (ret)
1419 goto out_free;
1420 /* Get channel-measurement characteristics. */
1421 if (css_characteristics_avail && css_chsc_characteristics.scmc
1422 && css_chsc_characteristics.secm) {
1423 ret = chsc_get_channel_measurement_chars(chp);
1424 if (ret)
1425 goto out_free;
1426 } else {
1427 static int msg_done;
1428
1429 if (!msg_done) {
1430 printk(KERN_WARNING "cio: Channel measurements not "
1431 "available, continuing.\n");
1432 msg_done = 1;
1433 }
1434 chp->cmg = -1;
1435 }
1436
1437 /* make it known to the system */
1438 ret = device_register(&chp->dev);
1439 if (ret) {
1440 printk(KERN_WARNING "%s: could not register %02x\n",
1441 __func__, chpid);
1442 goto out_free;
1443 }
1444 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1445 if (ret) {
1446 device_unregister(&chp->dev);
1447 goto out_free;
1448 }
1449 mutex_lock(&css[0]->mutex);
1450 if (css[0]->cm_enabled) {
1451 ret = chsc_add_chp_cmg_attr(chp);
1452 if (ret) {
1453 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1454 device_unregister(&chp->dev);
1455 mutex_unlock(&css[0]->mutex);
1456 goto out_free;
1457 }
1458 }
1459 css[0]->chps[chpid] = chp;
1460 mutex_unlock(&css[0]->mutex);
1461 return ret;
1462out_free:
1463 kfree(chp);
1464 return ret;
1465}
1466
1467void *
1468chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1469{
1470 struct channel_path *chp;
1471 struct channel_path_desc *desc;
1472
1473 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1474 if (!chp)
1475 return NULL;
1476 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1477 if (!desc)
1478 return NULL;
1479 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1480 return desc;
1481}
1482
1483static int __init 993static int __init
1484chsc_alloc_sei_area(void) 994chsc_alloc_sei_area(void)
1485{ 995{
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 0fb2b024208f..2ad81d11cf7b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,9 +1,10 @@
1#ifndef S390_CHSC_H 1#ifndef S390_CHSC_H
2#define S390_CHSC_H 2#define S390_CHSC_H
3 3
4#define CHSC_SEI_ACC_CHPID 1 4#include <linux/types.h>
5#define CHSC_SEI_ACC_LINKADDR 2 5#include <linux/device.h>
6#define CHSC_SEI_ACC_FULLLINKADDR 3 6#include <asm/chpid.h>
7#include "schid.h"
7 8
8#define CHSC_SDA_OC_MSS 0x2 9#define CHSC_SDA_OC_MSS 0x2
9 10
@@ -33,23 +34,9 @@ struct channel_path_desc {
33 u8 chpp; 34 u8 chpp;
34} __attribute__ ((packed)); 35} __attribute__ ((packed));
35 36
36struct channel_path { 37struct channel_path;
37 int id;
38 int state;
39 struct channel_path_desc desc;
40 /* Channel-measurement related stuff: */
41 int cmg;
42 int shared;
43 void *cmg_chars;
44 struct device dev;
45};
46 38
47extern void s390_process_css( void ); 39extern void chsc_process_crw(void);
48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
53 40
54struct css_general_char { 41struct css_general_char {
55 u64 : 41; 42 u64 : 41;
@@ -82,15 +69,26 @@ struct css_chsc_char {
82extern struct css_general_char css_general_characteristics; 69extern struct css_general_char css_general_characteristics;
83extern struct css_chsc_char css_chsc_characteristics; 70extern struct css_chsc_char css_chsc_characteristics;
84 71
72struct chsc_ssd_info {
73 u8 path_mask;
74 u8 fla_valid_mask;
75 struct chp_id chpid[8];
76 u16 fla[8];
77};
78extern int chsc_get_ssd_info(struct subchannel_id schid,
79 struct chsc_ssd_info *ssd);
85extern int chsc_determine_css_characteristics(void); 80extern int chsc_determine_css_characteristics(void);
86extern int css_characteristics_avail; 81extern int css_characteristics_avail;
87 82
88extern void *chsc_get_chp_desc(struct subchannel*, int);
89
90extern int chsc_enable_facility(int); 83extern int chsc_enable_facility(int);
91struct channel_subsystem; 84struct channel_subsystem;
92extern int chsc_secm(struct channel_subsystem *, int); 85extern int chsc_secm(struct channel_subsystem *, int);
93 86
94#define to_channelpath(device) container_of(device, struct channel_path, dev) 87int chsc_chp_vary(struct chp_id chpid, int on);
88int chsc_determine_channel_path_description(struct chp_id chpid,
89 struct channel_path_desc *desc);
90void chsc_chp_online(struct chp_id chpid);
91void chsc_chp_offline(struct chp_id chpid);
92int chsc_get_channel_measurement_chars(struct channel_path *chp);
95 93
96#endif 94#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 9cb129ab5be5..ea1defba5693 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -22,6 +22,7 @@
22#include <asm/setup.h> 22#include <asm/setup.h>
23#include <asm/reset.h> 23#include <asm/reset.h>
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h>
25#include "airq.h" 26#include "airq.h"
26#include "cio.h" 27#include "cio.h"
27#include "css.h" 28#include "css.h"
@@ -29,6 +30,7 @@
29#include "ioasm.h" 30#include "ioasm.h"
30#include "blacklist.h" 31#include "blacklist.h"
31#include "cio_debug.h" 32#include "cio_debug.h"
33#include "chp.h"
32#include "../s390mach.h" 34#include "../s390mach.h"
33 35
34debug_info_t *cio_debug_msg_id; 36debug_info_t *cio_debug_msg_id;
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
592 err = -ENODEV; 594 err = -ENODEV;
593 goto out; 595 goto out;
594 } 596 }
595 sch->opm = 0xff; 597 if (cio_is_console(sch->schid))
596 if (!cio_is_console(sch->schid)) 598 sch->opm = 0xff;
597 chsc_validate_chpids(sch); 599 else
600 sch->opm = chp_get_sch_opm(sch);
598 sch->lpm = sch->schib.pmcw.pam & sch->opm; 601 sch->lpm = sch->schib.pmcw.pam & sch->opm;
599 602
600 CIO_DEBUG(KERN_INFO, 0, 603 CIO_DEBUG(KERN_INFO, 0,
@@ -954,6 +957,7 @@ static void css_reset(void)
954{ 957{
955 int i, ret; 958 int i, ret;
956 unsigned long long timeout; 959 unsigned long long timeout;
960 struct chp_id chpid;
957 961
958 /* Reset subchannels. */ 962 /* Reset subchannels. */
959 for_each_subchannel(__shutdown_subchannel_easy, NULL); 963 for_each_subchannel(__shutdown_subchannel_easy, NULL);
@@ -963,8 +967,10 @@ static void css_reset(void)
963 __ctl_set_bit(14, 28); 967 __ctl_set_bit(14, 28);
964 /* Temporarily reenable machine checks. */ 968 /* Temporarily reenable machine checks. */
965 local_mcck_enable(); 969 local_mcck_enable();
970 chp_id_init(&chpid);
966 for (i = 0; i <= __MAX_CHPID; i++) { 971 for (i = 0; i <= __MAX_CHPID; i++) {
967 ret = rchp(i); 972 chpid.id = i;
973 ret = rchp(chpid);
968 if ((ret == 0) || (ret == 2)) 974 if ((ret == 0) || (ret == 2))
969 /* 975 /*
970 * rchp either succeeded, or another rchp is already 976 * rchp either succeeded, or another rchp is already
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
1048 do_reipl_asm(*((__u32*)&schid)); 1054 do_reipl_asm(*((__u32*)&schid));
1049} 1055}
1050 1056
1051static struct schib __initdata ipl_schib; 1057int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1052
1053/*
1054 * ipl_save_parameters gets called very early. It is not allowed to access
1055 * anything in the bss section at all. The bss section is not cleared yet,
1056 * but may contain some ipl parameters written by the firmware.
1057 * These parameters (if present) are copied to 0x2000.
1058 * To avoid corruption of the ipl parameters, all variables used by this
1059 * function must reside on the stack or in the data section.
1060 */
1061void ipl_save_parameters(void)
1062{ 1058{
1063 struct subchannel_id schid; 1059 struct subchannel_id schid;
1064 unsigned int *ipl_ptr; 1060 struct schib schib;
1065 void *src, *dst;
1066 1061
1067 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1062 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1068 if (!schid.one) 1063 if (!schid.one)
1069 return; 1064 return -ENODEV;
1070 if (stsch(schid, &ipl_schib)) 1065 if (stsch(schid, &schib))
1071 return; 1066 return -ENODEV;
1072 if (!ipl_schib.pmcw.dnv) 1067 if (!schib.pmcw.dnv)
1073 return; 1068 return -ENODEV;
1074 ipl_devno = ipl_schib.pmcw.dev; 1069 iplinfo->devno = schib.pmcw.dev;
1075 ipl_flags |= IPL_DEVNO_VALID; 1070 iplinfo->is_qdio = schib.pmcw.qf;
1076 if (!ipl_schib.pmcw.qf) 1071 return 0;
1077 return;
1078 ipl_flags |= IPL_PARMBLOCK_VALID;
1079 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
1080 src = (void *)(unsigned long)*ipl_ptr;
1081 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1082 memmove(dst, src, PAGE_SIZE);
1083 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
1084} 1072}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 35154a210357..7446c39951a7 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,18 +1,11 @@
1#ifndef S390_CIO_H 1#ifndef S390_CIO_H
2#define S390_CIO_H 2#define S390_CIO_H
3 3
4#include "schid.h"
5#include <linux/mutex.h> 4#include <linux/mutex.h>
6 5#include <linux/device.h>
7/* 6#include <asm/chpid.h>
8 * where we put the ssd info 7#include "chsc.h"
9 */ 8#include "schid.h"
10struct ssd_info {
11 __u8 valid:1;
12 __u8 type:7; /* subchannel type */
13 __u8 chpid[8]; /* chpids */
14 __u16 fla[8]; /* full link addresses */
15} __attribute__ ((packed));
16 9
17/* 10/*
18 * path management control word 11 * path management control word
@@ -108,7 +101,7 @@ struct subchannel {
108 struct schib schib; /* subchannel information block */ 101 struct schib schib; /* subchannel information block */
109 struct orb orb; /* operation request block */ 102 struct orb orb; /* operation request block */
110 struct ccw1 sense_ccw; /* static ccw for sense command */ 103 struct ccw1 sense_ccw; /* static ccw for sense command */
111 struct ssd_info ssd_info; /* subchannel description */ 104 struct chsc_ssd_info ssd_info; /* subchannel description */
112 struct device dev; /* entry in device tree */ 105 struct device dev; /* entry in device tree */
113 struct css_driver *driver; 106 struct css_driver *driver;
114} __attribute__ ((aligned(8))); 107} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 90b22faabbf7..28abd697be1a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -476,7 +476,7 @@ struct cmb_area {
476}; 476};
477 477
478static struct cmb_area cmb_area = { 478static struct cmb_area cmb_area = {
479 .lock = SPIN_LOCK_UNLOCKED, 479 .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
480 .list = LIST_HEAD_INIT(cmb_area.list), 480 .list = LIST_HEAD_INIT(cmb_area.list),
481 .num_channels = 1024, 481 .num_channels = 1024,
482}; 482};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fe0ace7aece8..27c6d9e55b23 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,8 +20,9 @@
20#include "ioasm.h" 20#include "ioasm.h"
21#include "chsc.h" 21#include "chsc.h"
22#include "device.h" 22#include "device.h"
23#include "idset.h"
24#include "chp.h"
23 25
24int need_rescan = 0;
25int css_init_done = 0; 26int css_init_done = 0;
26static int need_reprobe = 0; 27static int need_reprobe = 0;
27static int max_ssid = 0; 28static int max_ssid = 0;
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch)
125 mutex_unlock(&sch->reg_mutex); 126 mutex_unlock(&sch->reg_mutex);
126} 127}
127 128
128static int 129static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
129css_register_subchannel(struct subchannel *sch) 130{
131 int i;
132 int mask;
133
134 memset(ssd, 0, sizeof(struct chsc_ssd_info));
135 ssd->path_mask = pmcw->pim;
136 for (i = 0; i < 8; i++) {
137 mask = 0x80 >> i;
138 if (pmcw->pim & mask) {
139 chp_id_init(&ssd->chpid[i]);
140 ssd->chpid[i].id = pmcw->chpid[i];
141 }
142 }
143}
144
145static void ssd_register_chpids(struct chsc_ssd_info *ssd)
146{
147 int i;
148 int mask;
149
150 for (i = 0; i < 8; i++) {
151 mask = 0x80 >> i;
152 if (ssd->path_mask & mask)
153 if (!chp_is_registered(ssd->chpid[i]))
154 chp_new(ssd->chpid[i]);
155 }
156}
157
158void css_update_ssd_info(struct subchannel *sch)
159{
160 int ret;
161
162 if (cio_is_console(sch->schid)) {
163 /* Console is initialized too early for functions requiring
164 * memory allocation. */
165 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
166 } else {
167 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
168 if (ret)
169 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
170 ssd_register_chpids(&sch->ssd_info);
171 }
172}
173
174static int css_register_subchannel(struct subchannel *sch)
130{ 175{
131 int ret; 176 int ret;
132 177
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch)
135 sch->dev.bus = &css_bus_type; 180 sch->dev.bus = &css_bus_type;
136 sch->dev.release = &css_subchannel_release; 181 sch->dev.release = &css_subchannel_release;
137 sch->dev.groups = subch_attr_groups; 182 sch->dev.groups = subch_attr_groups;
138 183 css_update_ssd_info(sch);
139 css_get_ssd_info(sch);
140
141 /* make it known to the system */ 184 /* make it known to the system */
142 ret = css_sch_device_register(sch); 185 ret = css_sch_device_register(sch);
143 if (ret) { 186 if (ret) {
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
306 return css_probe_device(schid); 349 return css_probe_device(schid);
307} 350}
308 351
309static int css_evaluate_subchannel(struct subchannel_id schid, int slow) 352static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
310{ 353{
311 struct subchannel *sch; 354 struct subchannel *sch;
312 int ret; 355 int ret;
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
317 put_device(&sch->dev); 360 put_device(&sch->dev);
318 } else 361 } else
319 ret = css_evaluate_new_subchannel(schid, slow); 362 ret = css_evaluate_new_subchannel(schid, slow);
320 363 if (ret == -EAGAIN)
321 return ret; 364 css_schedule_eval(schid);
322} 365}
323 366
324static int 367static struct idset *slow_subchannel_set;
325css_rescan_devices(struct subchannel_id schid, void *data) 368static spinlock_t slow_subchannel_lock;
369
370static int __init slow_subchannel_init(void)
326{ 371{
327 return css_evaluate_subchannel(schid, 1); 372 spin_lock_init(&slow_subchannel_lock);
373 slow_subchannel_set = idset_sch_new();
374 if (!slow_subchannel_set) {
375 printk(KERN_WARNING "cio: could not allocate slow subchannel "
376 "set\n");
377 return -ENOMEM;
378 }
379 return 0;
328} 380}
329 381
330struct slow_subchannel { 382subsys_initcall(slow_subchannel_init);
331 struct list_head slow_list;
332 struct subchannel_id schid;
333};
334
335static LIST_HEAD(slow_subchannels_head);
336static DEFINE_SPINLOCK(slow_subchannel_lock);
337 383
338static void 384static void css_slow_path_func(struct work_struct *unused)
339css_trigger_slow_path(struct work_struct *unused)
340{ 385{
341 CIO_TRACE_EVENT(4, "slowpath"); 386 struct subchannel_id schid;
342
343 if (need_rescan) {
344 need_rescan = 0;
345 for_each_subchannel(css_rescan_devices, NULL);
346 return;
347 }
348 387
388 CIO_TRACE_EVENT(4, "slowpath");
349 spin_lock_irq(&slow_subchannel_lock); 389 spin_lock_irq(&slow_subchannel_lock);
350 while (!list_empty(&slow_subchannels_head)) { 390 init_subchannel_id(&schid);
351 struct slow_subchannel *slow_sch = 391 while (idset_sch_get_first(slow_subchannel_set, &schid)) {
352 list_entry(slow_subchannels_head.next, 392 idset_sch_del(slow_subchannel_set, schid);
353 struct slow_subchannel, slow_list);
354
355 list_del_init(slow_subchannels_head.next);
356 spin_unlock_irq(&slow_subchannel_lock); 393 spin_unlock_irq(&slow_subchannel_lock);
357 css_evaluate_subchannel(slow_sch->schid, 1); 394 css_evaluate_subchannel(schid, 1);
358 spin_lock_irq(&slow_subchannel_lock); 395 spin_lock_irq(&slow_subchannel_lock);
359 kfree(slow_sch);
360 } 396 }
361 spin_unlock_irq(&slow_subchannel_lock); 397 spin_unlock_irq(&slow_subchannel_lock);
362} 398}
363 399
364DECLARE_WORK(slow_path_work, css_trigger_slow_path); 400static DECLARE_WORK(slow_path_work, css_slow_path_func);
365struct workqueue_struct *slow_path_wq; 401struct workqueue_struct *slow_path_wq;
366 402
403void css_schedule_eval(struct subchannel_id schid)
404{
405 unsigned long flags;
406
407 spin_lock_irqsave(&slow_subchannel_lock, flags);
408 idset_sch_add(slow_subchannel_set, schid);
409 queue_work(slow_path_wq, &slow_path_work);
410 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
411}
412
413void css_schedule_eval_all(void)
414{
415 unsigned long flags;
416
417 spin_lock_irqsave(&slow_subchannel_lock, flags);
418 idset_fill(slow_subchannel_set);
419 queue_work(slow_path_wq, &slow_path_work);
420 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
421}
422
367/* Reprobe subchannel if unregistered. */ 423/* Reprobe subchannel if unregistered. */
368static int reprobe_subchannel(struct subchannel_id schid, void *data) 424static int reprobe_subchannel(struct subchannel_id schid, void *data)
369{ 425{
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void)
426EXPORT_SYMBOL_GPL(css_schedule_reprobe); 482EXPORT_SYMBOL_GPL(css_schedule_reprobe);
427 483
428/* 484/*
429 * Rescan for new devices. FIXME: This is slow.
430 * This function is called when we have lost CRWs due to overflows and we have
431 * to do subchannel housekeeping.
432 */
433void
434css_reiterate_subchannels(void)
435{
436 css_clear_subchannel_slow_list();
437 need_rescan = 1;
438}
439
440/*
441 * Called from the machine check handler for subchannel report words. 485 * Called from the machine check handler for subchannel report words.
442 */ 486 */
443int 487void css_process_crw(int rsid1, int rsid2)
444css_process_crw(int rsid1, int rsid2)
445{ 488{
446 int ret;
447 struct subchannel_id mchk_schid; 489 struct subchannel_id mchk_schid;
448 490
449 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 491 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
450 rsid1, rsid2); 492 rsid1, rsid2);
451
452 if (need_rescan)
453 /* We need to iterate all subchannels anyway. */
454 return -EAGAIN;
455
456 init_subchannel_id(&mchk_schid); 493 init_subchannel_id(&mchk_schid);
457 mchk_schid.sch_no = rsid1; 494 mchk_schid.sch_no = rsid1;
458 if (rsid2 != 0) 495 if (rsid2 != 0)
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2)
463 * use stsch() to find out if the subchannel in question has come 500 * use stsch() to find out if the subchannel in question has come
464 * or gone. 501 * or gone.
465 */ 502 */
466 ret = css_evaluate_subchannel(mchk_schid, 0); 503 css_evaluate_subchannel(mchk_schid, 0);
467 if (ret == -EAGAIN) {
468 if (css_enqueue_subchannel_slow(mchk_schid)) {
469 css_clear_subchannel_slow_list();
470 need_rescan = 1;
471 }
472 }
473 return ret;
474} 504}
475 505
476static int __init 506static int __init
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = {
745 775
746subsys_initcall(init_channel_subsystem); 776subsys_initcall(init_channel_subsystem);
747 777
748int
749css_enqueue_subchannel_slow(struct subchannel_id schid)
750{
751 struct slow_subchannel *new_slow_sch;
752 unsigned long flags;
753
754 new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
755 if (!new_slow_sch)
756 return -ENOMEM;
757 new_slow_sch->schid = schid;
758 spin_lock_irqsave(&slow_subchannel_lock, flags);
759 list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
760 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
761 return 0;
762}
763
764void
765css_clear_subchannel_slow_list(void)
766{
767 unsigned long flags;
768
769 spin_lock_irqsave(&slow_subchannel_lock, flags);
770 while (!list_empty(&slow_subchannels_head)) {
771 struct slow_subchannel *slow_sch =
772 list_entry(slow_subchannels_head.next,
773 struct slow_subchannel, slow_list);
774
775 list_del_init(slow_subchannels_head.next);
776 kfree(slow_sch);
777 }
778 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
779}
780
781
782
783int
784css_slow_subchannels_exist(void)
785{
786 return (!list_empty(&slow_subchannels_head));
787}
788
789MODULE_LICENSE("GPL"); 778MODULE_LICENSE("GPL");
790EXPORT_SYMBOL(css_bus_type); 779EXPORT_SYMBOL(css_bus_type);
791EXPORT_SYMBOL_GPL(css_characteristics_avail); 780EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ca2bab932a8a..71fcfdc42800 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -4,8 +4,11 @@
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/wait.h> 5#include <linux/wait.h>
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <linux/device.h>
8#include <linux/types.h>
7 9
8#include <asm/cio.h> 10#include <asm/cio.h>
11#include <asm/chpid.h>
9 12
10#include "schid.h" 13#include "schid.h"
11 14
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 146extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 147extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 148extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int); 149extern void css_process_crw(int, int);
147extern void css_reiterate_subchannels(void); 150extern void css_reiterate_subchannels(void);
151void css_update_ssd_info(struct subchannel *sch);
148 152
149#define __MAX_SUBCHANNEL 65535 153#define __MAX_SUBCHANNEL 65535
150#define __MAX_SSID 3 154#define __MAX_SSID 3
151#define __MAX_CHPID 255
152#define __MAX_CSSID 0
153 155
154struct channel_subsystem { 156struct channel_subsystem {
155 u8 cssid; 157 u8 cssid;
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch);
185void device_kill_pending_timer(struct subchannel *); 187void device_kill_pending_timer(struct subchannel *);
186 188
187/* Helper functions to build lists for the slow path. */ 189/* Helper functions to build lists for the slow path. */
188extern int css_enqueue_subchannel_slow(struct subchannel_id schid); 190void css_schedule_eval(struct subchannel_id schid);
189void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); 191void css_schedule_eval_all(void);
190void css_clear_subchannel_slow_list(void);
191int css_slow_subchannels_exist(void);
192extern int need_rescan;
193 192
194int sch_is_pseudo_sch(struct subchannel *); 193int sch_is_pseudo_sch(struct subchannel *);
195 194
196extern struct workqueue_struct *slow_path_wq; 195extern struct workqueue_struct *slow_path_wq;
197extern struct work_struct slow_path_work;
198 196
199int subchannel_add_files (struct device *); 197int subchannel_add_files (struct device *);
200extern struct attribute_group *subch_attr_groups[]; 198extern struct attribute_group *subch_attr_groups[];
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e322111fb369..03355902c582 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
56/* Store modalias string delimited by prefix/suffix string into buffer with 56/* Store modalias string delimited by prefix/suffix string into buffer with
57 * specified size. Return length of resulting string (excluding trailing '\0') 57 * specified size. Return length of resulting string (excluding trailing '\0')
58 * even if string doesn't fit buffer (snprintf semantics). */ 58 * even if string doesn't fit buffer (snprintf semantics). */
59static int snprint_alias(char *buf, size_t size, const char *prefix, 59static int snprint_alias(char *buf, size_t size,
60 struct ccw_device_id *id, const char *suffix) 60 struct ccw_device_id *id, const char *suffix)
61{ 61{
62 int len; 62 int len;
63 63
64 len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, 64 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
65 id->cu_model);
66 if (len > size) 65 if (len > size)
67 return len; 66 return len;
68 buf += len; 67 buf += len;
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp,
85 struct ccw_device *cdev = to_ccwdev(dev); 84 struct ccw_device *cdev = to_ccwdev(dev);
86 struct ccw_device_id *id = &(cdev->id); 85 struct ccw_device_id *id = &(cdev->id);
87 int i = 0; 86 int i = 0;
88 int len; 87 int len = 0;
88 int ret;
89 char modalias_buf[30];
89 90
90 /* CU_TYPE= */ 91 /* CU_TYPE= */
91 len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; 92 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
92 if (len > buffer_size || i >= num_envp) 93 "CU_TYPE=%04X", id->cu_type);
93 return -ENOMEM; 94 if (ret)
94 envp[i++] = buffer; 95 return ret;
95 buffer += len;
96 buffer_size -= len;
97 96
98 /* CU_MODEL= */ 97 /* CU_MODEL= */
99 len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; 98 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
100 if (len > buffer_size || i >= num_envp) 99 "CU_MODEL=%02X", id->cu_model);
101 return -ENOMEM; 100 if (ret)
102 envp[i++] = buffer; 101 return ret;
103 buffer += len;
104 buffer_size -= len;
105 102
106 /* The next two can be zero, that's ok for us */ 103 /* The next two can be zero, that's ok for us */
107 /* DEV_TYPE= */ 104 /* DEV_TYPE= */
108 len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; 105 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
109 if (len > buffer_size || i >= num_envp) 106 "DEV_TYPE=%04X", id->dev_type);
110 return -ENOMEM; 107 if (ret)
111 envp[i++] = buffer; 108 return ret;
112 buffer += len;
113 buffer_size -= len;
114 109
115 /* DEV_MODEL= */ 110 /* DEV_MODEL= */
116 len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", 111 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
117 (unsigned char) id->dev_model) + 1; 112 "DEV_MODEL=%02X", id->dev_model);
118 if (len > buffer_size || i >= num_envp) 113 if (ret)
119 return -ENOMEM; 114 return ret;
120 envp[i++] = buffer;
121 buffer += len;
122 buffer_size -= len;
123 115
124 /* MODALIAS= */ 116 /* MODALIAS= */
125 len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; 117 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
126 if (len > buffer_size || i >= num_envp) 118 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
127 return -ENOMEM; 119 "MODALIAS=%s", modalias_buf);
128 envp[i++] = buffer; 120 return ret;
129 buffer += len;
130 buffer_size -= len;
131
132 envp[i] = NULL;
133
134 return 0;
135} 121}
136 122
137struct bus_type ccw_bus_type; 123struct bus_type ccw_bus_type;
@@ -230,12 +216,18 @@ static ssize_t
230chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 216chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
231{ 217{
232 struct subchannel *sch = to_subchannel(dev); 218 struct subchannel *sch = to_subchannel(dev);
233 struct ssd_info *ssd = &sch->ssd_info; 219 struct chsc_ssd_info *ssd = &sch->ssd_info;
234 ssize_t ret = 0; 220 ssize_t ret = 0;
235 int chp; 221 int chp;
222 int mask;
236 223
237 for (chp = 0; chp < 8; chp++) 224 for (chp = 0; chp < 8; chp++) {
238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); 225 mask = 0x80 >> chp;
226 if (ssd->path_mask & mask)
227 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
228 else
229 ret += sprintf(buf + ret, "00 ");
230 }
239 ret += sprintf (buf+ret, "\n"); 231 ret += sprintf (buf+ret, "\n");
240 return min((ssize_t)PAGE_SIZE, ret); 232 return min((ssize_t)PAGE_SIZE, ret);
241} 233}
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
280 struct ccw_device_id *id = &(cdev->id); 272 struct ccw_device_id *id = &(cdev->id);
281 int len; 273 int len;
282 274
283 len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; 275 len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1;
284 276
285 return len > PAGE_SIZE ? PAGE_SIZE : len; 277 return len > PAGE_SIZE ? PAGE_SIZE : len;
286} 278}
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
298 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 290 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
299} 291}
300 292
301static void ccw_device_unregister(struct work_struct *work) 293static void ccw_device_unregister(struct ccw_device *cdev)
302{ 294{
303 struct ccw_device_private *priv;
304 struct ccw_device *cdev;
305
306 priv = container_of(work, struct ccw_device_private, kick_work);
307 cdev = priv->cdev;
308 if (test_and_clear_bit(1, &cdev->private->registered)) 295 if (test_and_clear_bit(1, &cdev->private->registered))
309 device_unregister(&cdev->dev); 296 device_del(&cdev->dev);
310 put_device(&cdev->dev);
311} 297}
312 298
313static void 299static void
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
324 spin_lock_irqsave(cdev->ccwlock, flags); 310 spin_lock_irqsave(cdev->ccwlock, flags);
325 cdev->private->state = DEV_STATE_NOT_OPER; 311 cdev->private->state = DEV_STATE_NOT_OPER;
326 spin_unlock_irqrestore(cdev->ccwlock, flags); 312 spin_unlock_irqrestore(cdev->ccwlock, flags);
327 if (get_device(&cdev->dev)) { 313 ccw_device_unregister(cdev);
328 PREPARE_WORK(&cdev->private->kick_work, 314 put_device(&cdev->dev);
329 ccw_device_unregister);
330 queue_work(ccw_device_work, &cdev->private->kick_work);
331 }
332 return ; 315 return ;
333 } 316 }
334 sch = to_subchannel(cdev->dev.parent); 317 sch = to_subchannel(cdev->dev.parent);
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev)
413 return (ret == 0) ? -ENODEV : ret; 396 return (ret == 0) ? -ENODEV : ret;
414} 397}
415 398
416static ssize_t 399static void online_store_handle_offline(struct ccw_device *cdev)
417online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 400{
401 if (cdev->private->state == DEV_STATE_DISCONNECTED)
402 ccw_device_remove_disconnected(cdev);
403 else if (cdev->drv && cdev->drv->set_offline)
404 ccw_device_set_offline(cdev);
405}
406
407static int online_store_recog_and_online(struct ccw_device *cdev)
408{
409 int ret;
410
411 /* Do device recognition, if needed. */
412 if (cdev->id.cu_type == 0) {
413 ret = ccw_device_recognition(cdev);
414 if (ret) {
415 printk(KERN_WARNING"Couldn't start recognition "
416 "for device %s (ret=%d)\n",
417 cdev->dev.bus_id, ret);
418 return ret;
419 }
420 wait_event(cdev->private->wait_q,
421 cdev->private->flags.recog_done);
422 }
423 if (cdev->drv && cdev->drv->set_online)
424 ccw_device_set_online(cdev);
425 return 0;
426}
427static void online_store_handle_online(struct ccw_device *cdev, int force)
428{
429 int ret;
430
431 ret = online_store_recog_and_online(cdev);
432 if (ret)
433 return;
434 if (force && cdev->private->state == DEV_STATE_BOXED) {
435 ret = ccw_device_stlck(cdev);
436 if (ret) {
437 printk(KERN_WARNING"ccw_device_stlck for device %s "
438 "returned %d!\n", cdev->dev.bus_id, ret);
439 return;
440 }
441 if (cdev->id.cu_type == 0)
442 cdev->private->state = DEV_STATE_NOT_OPER;
443 online_store_recog_and_online(cdev);
444 }
445
446}
447
448static ssize_t online_store (struct device *dev, struct device_attribute *attr,
449 const char *buf, size_t count)
418{ 450{
419 struct ccw_device *cdev = to_ccwdev(dev); 451 struct ccw_device *cdev = to_ccwdev(dev);
420 int i, force, ret; 452 int i, force;
421 char *tmp; 453 char *tmp;
422 454
423 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 455 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
434 force = 0; 466 force = 0;
435 i = simple_strtoul(buf, &tmp, 16); 467 i = simple_strtoul(buf, &tmp, 16);
436 } 468 }
437 if (i == 1) { 469
438 /* Do device recognition, if needed. */ 470 switch (i) {
439 if (cdev->id.cu_type == 0) { 471 case 0:
440 ret = ccw_device_recognition(cdev); 472 online_store_handle_offline(cdev);
441 if (ret) { 473 break;
442 printk(KERN_WARNING"Couldn't start recognition " 474 case 1:
443 "for device %s (ret=%d)\n", 475 online_store_handle_online(cdev, force);
444 cdev->dev.bus_id, ret); 476 break;
445 goto out; 477 default:
446 } 478 count = -EINVAL;
447 wait_event(cdev->private->wait_q,
448 cdev->private->flags.recog_done);
449 }
450 if (cdev->drv && cdev->drv->set_online)
451 ccw_device_set_online(cdev);
452 } else if (i == 0) {
453 if (cdev->private->state == DEV_STATE_DISCONNECTED)
454 ccw_device_remove_disconnected(cdev);
455 else if (cdev->drv && cdev->drv->set_offline)
456 ccw_device_set_offline(cdev);
457 }
458 if (force && cdev->private->state == DEV_STATE_BOXED) {
459 ret = ccw_device_stlck(cdev);
460 if (ret) {
461 printk(KERN_WARNING"ccw_device_stlck for device %s "
462 "returned %d!\n", cdev->dev.bus_id, ret);
463 goto out;
464 }
465 /* Do device recognition, if needed. */
466 if (cdev->id.cu_type == 0) {
467 cdev->private->state = DEV_STATE_NOT_OPER;
468 ret = ccw_device_recognition(cdev);
469 if (ret) {
470 printk(KERN_WARNING"Couldn't start recognition "
471 "for device %s (ret=%d)\n",
472 cdev->dev.bus_id, ret);
473 goto out;
474 }
475 wait_event(cdev->private->wait_q,
476 cdev->private->flags.recog_done);
477 }
478 if (cdev->drv && cdev->drv->set_online)
479 ccw_device_set_online(cdev);
480 } 479 }
481 out:
482 if (cdev->drv) 480 if (cdev->drv)
483 module_put(cdev->drv->owner); 481 module_put(cdev->drv->owner);
484 atomic_set(&cdev->private->onoff, 0); 482 atomic_set(&cdev->private->onoff, 0);
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = {
548 .attrs = ccwdev_attrs, 546 .attrs = ccwdev_attrs,
549}; 547};
550 548
551static int 549struct attribute_group *ccwdev_attr_groups[] = {
552device_add_files (struct device *dev) 550 &ccwdev_attr_group,
553{ 551 NULL,
554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 552};
555}
556
557static void
558device_remove_files(struct device *dev)
559{
560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
561}
562 553
563/* this is a simple abstraction for device_register that sets the 554/* this is a simple abstraction for device_register that sets the
564 * correct bus type and adds the bus specific files */ 555 * correct bus type and adds the bus specific files */
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev)
573 return ret; 564 return ret;
574 565
575 set_bit(1, &cdev->private->registered); 566 set_bit(1, &cdev->private->registered);
576 if ((ret = device_add_files(dev))) {
577 if (test_and_clear_bit(1, &cdev->private->registered))
578 device_del(dev);
579 }
580 return ret; 567 return ret;
581} 568}
582 569
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work)
648 return; 635 return;
649 } 636 }
650 set_bit(1, &cdev->private->registered); 637 set_bit(1, &cdev->private->registered);
651 if (device_add_files(&cdev->dev)) {
652 if (test_and_clear_bit(1, &cdev->private->registered))
653 device_unregister(&cdev->dev);
654 }
655} 638}
656 639
657void ccw_device_do_unreg_rereg(struct work_struct *work) 640void ccw_device_do_unreg_rereg(struct work_struct *work)
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work)
664 cdev = priv->cdev; 647 cdev = priv->cdev;
665 sch = to_subchannel(cdev->dev.parent); 648 sch = to_subchannel(cdev->dev.parent);
666 649
667 device_remove_files(&cdev->dev); 650 ccw_device_unregister(cdev);
668 if (test_and_clear_bit(1, &cdev->private->registered))
669 device_del(&cdev->dev);
670 PREPARE_WORK(&cdev->private->kick_work, 651 PREPARE_WORK(&cdev->private->kick_work,
671 ccw_device_add_changed); 652 ccw_device_add_changed);
672 queue_work(ccw_device_work, &cdev->private->kick_work); 653 queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
705 cdev->dev.parent = &sch->dev; 686 cdev->dev.parent = &sch->dev;
706 cdev->dev.release = ccw_device_release; 687 cdev->dev.release = ccw_device_release;
707 INIT_LIST_HEAD(&cdev->private->kick_work.entry); 688 INIT_LIST_HEAD(&cdev->private->kick_work.entry);
689 cdev->dev.groups = ccwdev_attr_groups;
708 /* Do first half of device_register. */ 690 /* Do first half of device_register. */
709 device_initialize(&cdev->dev); 691 device_initialize(&cdev->dev);
710 if (!get_device(&sch->dev)) { 692 if (!get_device(&sch->dev)) {
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
736static void sch_attach_device(struct subchannel *sch, 718static void sch_attach_device(struct subchannel *sch,
737 struct ccw_device *cdev) 719 struct ccw_device *cdev)
738{ 720{
721 css_update_ssd_info(sch);
739 spin_lock_irq(sch->lock); 722 spin_lock_irq(sch->lock);
740 sch->dev.driver_data = cdev; 723 sch->dev.driver_data = cdev;
741 cdev->private->schid = sch->schid; 724 cdev->private->schid = sch->schid;
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work)
871 priv = container_of(work, struct ccw_device_private, kick_work); 854 priv = container_of(work, struct ccw_device_private, kick_work);
872 cdev = priv->cdev; 855 cdev = priv->cdev;
873 sch = to_subchannel(cdev->dev.parent); 856 sch = to_subchannel(cdev->dev.parent);
874 857 css_update_ssd_info(sch);
875 /* 858 /*
876 * io_subchannel_register() will also be called after device 859 * io_subchannel_register() will also be called after device
877 * recognition has been done for a boxed device (which will already 860 * recognition has been done for a boxed device (which will already
@@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch)
1133 sch->dev.driver_data = NULL; 1116 sch->dev.driver_data = NULL;
1134 cdev->private->state = DEV_STATE_NOT_OPER; 1117 cdev->private->state = DEV_STATE_NOT_OPER;
1135 spin_unlock_irqrestore(cdev->ccwlock, flags); 1118 spin_unlock_irqrestore(cdev->ccwlock, flags);
1136 /* 1119 ccw_device_unregister(cdev);
1137 * Put unregistration on workqueue to avoid livelocks on the css bus 1120 put_device(&cdev->dev);
1138 * semaphore.
1139 */
1140 if (get_device(&cdev->dev)) {
1141 PREPARE_WORK(&cdev->private->kick_work,
1142 ccw_device_unregister);
1143 queue_work(ccw_device_work, &cdev->private->kick_work);
1144 }
1145 return 0; 1121 return 0;
1146} 1122}
1147 1123
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 089a3ddd6265..898ec3b2bebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -15,6 +15,7 @@
15 15
16#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -22,6 +23,7 @@
22#include "device.h" 23#include "device.h"
23#include "chsc.h" 24#include "chsc.h"
24#include "ioasm.h" 25#include "ioasm.h"
26#include "chp.h"
25 27
26int 28int
27device_is_online(struct subchannel *sch) 29device_is_online(struct subchannel *sch)
@@ -210,14 +212,18 @@ static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 212__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 213{
212 int mask, i; 214 int mask, i;
215 struct chp_id chpid;
213 216
217 chp_id_init(&chpid);
214 for (i = 0; i<8; i++) { 218 for (i = 0; i<8; i++) {
215 mask = 0x80 >> i; 219 mask = 0x80 >> i;
216 if (!(sch->lpm & mask)) 220 if (!(sch->lpm & mask))
217 continue; 221 continue;
218 if (old_lpm & mask) 222 if (old_lpm & mask)
219 continue; 223 continue;
220 chpid_is_actually_online(sch->schib.pmcw.chpid[i]); 224 chpid.id = sch->schib.pmcw.chpid[i];
225 if (!chp_is_registered(chpid))
226 css_schedule_eval_all();
221 } 227 }
222} 228}
223 229
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 7c7775aae38a..16f59fcb66b1 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -16,12 +16,14 @@
16 16
17#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
18#include <asm/idals.h> 18#include <asm/idals.h>
19#include <asm/chpid.h>
19 20
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "css.h" 23#include "css.h"
23#include "chsc.h" 24#include "chsc.h"
24#include "device.h" 25#include "device.h"
26#include "chp.h"
25 27
26int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) 28int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
27{ 29{
@@ -606,9 +608,12 @@ void *
606ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 608ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
607{ 609{
608 struct subchannel *sch; 610 struct subchannel *sch;
611 struct chp_id chpid;
609 612
610 sch = to_subchannel(cdev->dev.parent); 613 sch = to_subchannel(cdev->dev.parent);
611 return chsc_get_chp_desc(sch, chp_no); 614 chp_id_init(&chpid);
615 chpid.id = sch->schib.pmcw.chpid[chp_no];
616 return chp_get_chp_desc(chpid);
612} 617}
613 618
614// FIXME: these have to go: 619// FIXME: these have to go:
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644
index 000000000000..16ea828e99f7
--- /dev/null
+++ b/drivers/s390/cio/idset.c
@@ -0,0 +1,112 @@
1/*
2 * drivers/s390/cio/idset.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/slab.h>
9#include <asm/bitops.h>
10#include "idset.h"
11#include "css.h"
12
13struct idset {
14 int num_ssid;
15 int num_id;
16 unsigned long bitmap[0];
17};
18
19static inline unsigned long bitmap_size(int num_ssid, int num_id)
20{
21 return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
22}
23
24static struct idset *idset_new(int num_ssid, int num_id)
25{
26 struct idset *set;
27
28 set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id),
29 GFP_KERNEL);
30 if (set) {
31 set->num_ssid = num_ssid;
32 set->num_id = num_id;
33 }
34 return set;
35}
36
37void idset_free(struct idset *set)
38{
39 kfree(set);
40}
41
42void idset_clear(struct idset *set)
43{
44 memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
45}
46
47void idset_fill(struct idset *set)
48{
49 memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
50}
51
52static inline void idset_add(struct idset *set, int ssid, int id)
53{
54 set_bit(ssid * set->num_id + id, set->bitmap);
55}
56
57static inline void idset_del(struct idset *set, int ssid, int id)
58{
59 clear_bit(ssid * set->num_id + id, set->bitmap);
60}
61
62static inline int idset_contains(struct idset *set, int ssid, int id)
63{
64 return test_bit(ssid * set->num_id + id, set->bitmap);
65}
66
67static inline int idset_get_first(struct idset *set, int *ssid, int *id)
68{
69 int bitnum;
70
71 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
72 if (bitnum >= set->num_ssid * set->num_id)
73 return 0;
74 *ssid = bitnum / set->num_id;
75 *id = bitnum % set->num_id;
76 return 1;
77}
78
79struct idset *idset_sch_new(void)
80{
81 return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
82}
83
84void idset_sch_add(struct idset *set, struct subchannel_id schid)
85{
86 idset_add(set, schid.ssid, schid.sch_no);
87}
88
89void idset_sch_del(struct idset *set, struct subchannel_id schid)
90{
91 idset_del(set, schid.ssid, schid.sch_no);
92}
93
94int idset_sch_contains(struct idset *set, struct subchannel_id schid)
95{
96 return idset_contains(set, schid.ssid, schid.sch_no);
97}
98
99int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
100{
101 int ssid = 0;
102 int id = 0;
103 int rc;
104
105 rc = idset_get_first(set, &ssid, &id);
106 if (rc) {
107 init_subchannel_id(schid);
108 schid->ssid = ssid;
109 schid->sch_no = id;
110 }
111 return rc;
112}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644
index 000000000000..144466ab8c15
--- /dev/null
+++ b/drivers/s390/cio/idset.h
@@ -0,0 +1,25 @@
1/*
2 * drivers/s390/cio/idset.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H
10
11#include "schid.h"
12
13struct idset;
14
15void idset_free(struct idset *set);
16void idset_clear(struct idset *set);
17void idset_fill(struct idset *set);
18
19struct idset *idset_sch_new(void);
20void idset_sch_add(struct idset *set, struct subchannel_id id);
21void idset_sch_del(struct idset *set, struct subchannel_id id);
22int idset_sch_contains(struct idset *set, struct subchannel_id id);
23int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
24
25#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index ad6d82940069..7153dd959082 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,6 +1,7 @@
1#ifndef S390_CIO_IOASM_H 1#ifndef S390_CIO_IOASM_H
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include <asm/chpid.h>
4#include "schid.h" 5#include "schid.h"
5 6
6/* 7/*
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area)
189 return cc; 190 return cc;
190} 191}
191 192
192static inline int rchp(int chpid) 193static inline int rchp(struct chp_id chpid)
193{ 194{
194 register unsigned int reg1 asm ("1") = chpid; 195 register struct chp_id reg1 asm ("1") = chpid;
195 int ccode; 196 int ccode;
196 197
197 asm volatile( 198 asm volatile(
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index b0f813e6f48e..b20fd0681733 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1642,21 +1642,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1642 struct channel *ch; 1642 struct channel *ch;
1643 1643
1644 DBF_TEXT(trace, 2, __FUNCTION__); 1644 DBF_TEXT(trace, 2, __FUNCTION__);
1645 if ((ch = 1645 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1646 (struct channel *) kmalloc(sizeof (struct channel), 1646 if (!ch) {
1647 GFP_KERNEL)) == NULL) {
1648 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1647 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1649 return -1; 1648 return -1;
1650 } 1649 }
1651 memset(ch, 0, sizeof (struct channel)); 1650 /* assure all flags and counters are reset */
1652 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1), 1651 ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1653 GFP_KERNEL | GFP_DMA)) == NULL) { 1652 if (!ch->ccw) {
1654 kfree(ch); 1653 kfree(ch);
1655 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1654 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1656 return -1; 1655 return -1;
1657 } 1656 }
1658 1657
1659 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1660 1658
1661 /** 1659 /**
1662 * "static" ccws are used in the following way: 1660 * "static" ccws are used in the following way:
@@ -1696,15 +1694,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1696 return -1; 1694 return -1;
1697 } 1695 }
1698 fsm_newstate(ch->fsm, CH_STATE_IDLE); 1696 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1699 if ((ch->irb = kmalloc(sizeof (struct irb), 1697 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1700 GFP_KERNEL)) == NULL) { 1698 if (!ch->irb) {
1701 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1699 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1702 kfree_fsm(ch->fsm); 1700 kfree_fsm(ch->fsm);
1703 kfree(ch->ccw); 1701 kfree(ch->ccw);
1704 kfree(ch); 1702 kfree(ch);
1705 return -1; 1703 return -1;
1706 } 1704 }
1707 memset(ch->irb, 0, sizeof (struct irb));
1708 while (*c && less_than((*c)->id, ch->id)) 1705 while (*c && less_than((*c)->id, ch->id))
1709 c = &(*c)->next; 1706 c = &(*c)->next;
1710 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { 1707 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
@@ -2751,14 +2748,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev)
2751 if (!get_device(&cgdev->dev)) 2748 if (!get_device(&cgdev->dev))
2752 return -ENODEV; 2749 return -ENODEV;
2753 2750
2754 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL); 2751 priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
2755 if (!priv) { 2752 if (!priv) {
2756 ctc_pr_err("%s: Out of memory\n", __func__); 2753 ctc_pr_err("%s: Out of memory\n", __func__);
2757 put_device(&cgdev->dev); 2754 put_device(&cgdev->dev);
2758 return -ENOMEM; 2755 return -ENOMEM;
2759 } 2756 }
2760 2757
2761 memset(priv, 0, sizeof (struct ctc_priv));
2762 rc = ctc_add_files(&cgdev->dev); 2758 rc = ctc_add_files(&cgdev->dev);
2763 if (rc) { 2759 if (rc) {
2764 kfree(priv); 2760 kfree(priv);
@@ -2799,10 +2795,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
2799 DBF_TEXT(setup, 3, __FUNCTION__); 2795 DBF_TEXT(setup, 3, __FUNCTION__);
2800 2796
2801 if (alloc_device) { 2797 if (alloc_device) {
2802 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL); 2798 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
2803 if (!dev) 2799 if (!dev)
2804 return NULL; 2800 return NULL;
2805 memset(dev, 0, sizeof (struct net_device));
2806 } 2801 }
2807 2802
2808 dev->priv = privptr; 2803 dev->priv = privptr;
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 806bb1a921eb..644a06eba828 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -21,6 +21,7 @@
21#include "cio/cio.h" 21#include "cio/cio.h"
22#include "cio/chsc.h" 22#include "cio/chsc.h"
23#include "cio/css.h" 23#include "cio/css.h"
24#include "cio/chp.h"
24#include "s390mach.h" 25#include "s390mach.h"
25 26
26static struct semaphore m_sem; 27static struct semaphore m_sem;
@@ -44,14 +45,13 @@ static int
44s390_collect_crw_info(void *param) 45s390_collect_crw_info(void *param)
45{ 46{
46 struct crw crw[2]; 47 struct crw crw[2];
47 int ccode, ret, slow; 48 int ccode;
48 struct semaphore *sem; 49 struct semaphore *sem;
49 unsigned int chain; 50 unsigned int chain;
50 51
51 sem = (struct semaphore *)param; 52 sem = (struct semaphore *)param;
52repeat: 53repeat:
53 down_interruptible(sem); 54 down_interruptible(sem);
54 slow = 0;
55 chain = 0; 55 chain = 0;
56 while (1) { 56 while (1) {
57 if (unlikely(chain > 1)) { 57 if (unlikely(chain > 1)) {
@@ -84,9 +84,8 @@ repeat:
84 /* Check for overflows. */ 84 /* Check for overflows. */
85 if (crw[chain].oflw) { 85 if (crw[chain].oflw) {
86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
87 css_reiterate_subchannels(); 87 css_schedule_eval_all();
88 chain = 0; 88 chain = 0;
89 slow = 1;
90 continue; 89 continue;
91 } 90 }
92 switch (crw[chain].rsc) { 91 switch (crw[chain].rsc) {
@@ -94,10 +93,7 @@ repeat:
94 if (crw[0].chn && !chain) 93 if (crw[0].chn && !chain)
95 break; 94 break;
96 pr_debug("source is subchannel %04X\n", crw[0].rsid); 95 pr_debug("source is subchannel %04X\n", crw[0].rsid);
97 ret = css_process_crw (crw[0].rsid, 96 css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
98 chain ? crw[1].rsid : 0);
99 if (ret == -EAGAIN)
100 slow = 1;
101 break; 97 break;
102 case CRW_RSC_MONITOR: 98 case CRW_RSC_MONITOR:
103 pr_debug("source is monitoring facility\n"); 99 pr_debug("source is monitoring facility\n");
@@ -116,28 +112,23 @@ repeat:
116 } 112 }
117 switch (crw[0].erc) { 113 switch (crw[0].erc) {
118 case CRW_ERC_IPARM: /* Path has come. */ 114 case CRW_ERC_IPARM: /* Path has come. */
119 ret = chp_process_crw(crw[0].rsid, 1); 115 chp_process_crw(crw[0].rsid, 1);
120 break; 116 break;
121 case CRW_ERC_PERRI: /* Path has gone. */ 117 case CRW_ERC_PERRI: /* Path has gone. */
122 case CRW_ERC_PERRN: 118 case CRW_ERC_PERRN:
123 ret = chp_process_crw(crw[0].rsid, 0); 119 chp_process_crw(crw[0].rsid, 0);
124 break; 120 break;
125 default: 121 default:
126 pr_debug("Don't know how to handle erc=%x\n", 122 pr_debug("Don't know how to handle erc=%x\n",
127 crw[0].erc); 123 crw[0].erc);
128 ret = 0;
129 } 124 }
130 if (ret == -EAGAIN)
131 slow = 1;
132 break; 125 break;
133 case CRW_RSC_CONFIG: 126 case CRW_RSC_CONFIG:
134 pr_debug("source is configuration-alert facility\n"); 127 pr_debug("source is configuration-alert facility\n");
135 break; 128 break;
136 case CRW_RSC_CSS: 129 case CRW_RSC_CSS:
137 pr_debug("source is channel subsystem\n"); 130 pr_debug("source is channel subsystem\n");
138 ret = chsc_process_crw(); 131 chsc_process_crw();
139 if (ret == -EAGAIN)
140 slow = 1;
141 break; 132 break;
142 default: 133 default:
143 pr_debug("unknown source\n"); 134 pr_debug("unknown source\n");
@@ -146,8 +137,6 @@ repeat:
146 /* chain is always 0 or 1 here. */ 137 /* chain is always 0 or 1 here. */
147 chain = crw[chain].chn ? chain + 1 : 0; 138 chain = crw[chain].chn ? chain + 1 : 0;
148 } 139 }
149 if (slow)
150 queue_work(slow_path_wq, &slow_path_work);
151 goto repeat; 140 goto repeat;
152 return 0; 141 return 0;
153} 142}
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 090743d2f914..19343f9675c3 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void)
357 357
358__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
359 359
360int get_cpu_capability(unsigned int *capability)
361{
362 struct sysinfo_1_2_2 *info;
363 int rc;
364
365 info = (void *) get_zeroed_page(GFP_KERNEL);
366 if (!info)
367 return -ENOMEM;
368 rc = stsi(info, 1, 2, 2);
369 if (rc == -ENOSYS)
370 goto out;
371 rc = 0;
372 *capability = info->capability;
373out:
374 free_page((unsigned long) info);
375 return rc;
376}
377
360/* 378/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy. 379 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */ 380 */