diff options
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/Makefile | 2 | ||||
-rw-r--r-- | drivers/s390/cio/ccwgroup.c | 33 | ||||
-rw-r--r-- | drivers/s390/cio/chp.c | 683 | ||||
-rw-r--r-- | drivers/s390/cio/chp.h | 53 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.c | 1024 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.h | 42 | ||||
-rw-r--r-- | drivers/s390/cio/cio.c | 52 | ||||
-rw-r--r-- | drivers/s390/cio/cio.h | 17 | ||||
-rw-r--r-- | drivers/s390/cio/cmf.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 201 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 16 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 246 | ||||
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 8 | ||||
-rw-r--r-- | drivers/s390/cio/device_ops.c | 7 | ||||
-rw-r--r-- | drivers/s390/cio/idset.c | 112 | ||||
-rw-r--r-- | drivers/s390/cio/idset.h | 25 | ||||
-rw-r--r-- | drivers/s390/cio/ioasm.h | 5 |
17 files changed, 1434 insertions, 1094 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index c490c2a1c2fc..cfaf77b320f5 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the S/390 common i/o drivers | 2 | # Makefile for the S/390 common i/o drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o | 5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o |
6 | ccw_device-objs += device.o device_fsm.o device_ops.o | 6 | ccw_device-objs += device.o device_fsm.o device_ops.o |
7 | ccw_device-objs += device_id.o device_pgid.o device_status.o | 7 | ccw_device-objs += device_id.o device_pgid.o device_status.o |
8 | obj-y += ccw_device.o cmf.o | 8 | obj-y += ccw_device.o cmf.o |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5aeb68e732b0..e5ccda63e883 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev) | |||
75 | { | 75 | { |
76 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | 76 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); |
77 | 77 | ||
78 | mutex_lock(&gdev->reg_mutex); | ||
78 | __ccwgroup_remove_symlinks(gdev); | 79 | __ccwgroup_remove_symlinks(gdev); |
79 | device_unregister(dev); | 80 | device_unregister(dev); |
81 | mutex_unlock(&gdev->reg_mutex); | ||
80 | } | 82 | } |
81 | 83 | ||
82 | static ssize_t | 84 | static ssize_t |
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root, | |||
173 | return -ENOMEM; | 175 | return -ENOMEM; |
174 | 176 | ||
175 | atomic_set(&gdev->onoff, 0); | 177 | atomic_set(&gdev->onoff, 0); |
176 | 178 | mutex_init(&gdev->reg_mutex); | |
179 | mutex_lock(&gdev->reg_mutex); | ||
177 | for (i = 0; i < argc; i++) { | 180 | for (i = 0; i < argc; i++) { |
178 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); | 181 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); |
179 | 182 | ||
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root, | |||
183 | || gdev->cdev[i]->id.driver_info != | 186 | || gdev->cdev[i]->id.driver_info != |
184 | gdev->cdev[0]->id.driver_info) { | 187 | gdev->cdev[0]->id.driver_info) { |
185 | rc = -EINVAL; | 188 | rc = -EINVAL; |
186 | goto free_dev; | 189 | goto error; |
187 | } | 190 | } |
188 | /* Don't allow a device to belong to more than one group. */ | 191 | /* Don't allow a device to belong to more than one group. */ |
189 | if (gdev->cdev[i]->dev.driver_data) { | 192 | if (gdev->cdev[i]->dev.driver_data) { |
190 | rc = -EINVAL; | 193 | rc = -EINVAL; |
191 | goto free_dev; | 194 | goto error; |
192 | } | 195 | } |
193 | gdev->cdev[i]->dev.driver_data = gdev; | 196 | gdev->cdev[i]->dev.driver_data = gdev; |
194 | } | 197 | } |
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root, | |||
203 | gdev->cdev[0]->dev.bus_id); | 206 | gdev->cdev[0]->dev.bus_id); |
204 | 207 | ||
205 | rc = device_register(&gdev->dev); | 208 | rc = device_register(&gdev->dev); |
206 | |||
207 | if (rc) | 209 | if (rc) |
208 | goto free_dev; | 210 | goto error; |
209 | get_device(&gdev->dev); | 211 | get_device(&gdev->dev); |
210 | rc = device_create_file(&gdev->dev, &dev_attr_ungroup); | 212 | rc = device_create_file(&gdev->dev, &dev_attr_ungroup); |
211 | 213 | ||
@@ -216,6 +218,7 @@ ccwgroup_create(struct device *root, | |||
216 | 218 | ||
217 | rc = __ccwgroup_create_symlinks(gdev); | 219 | rc = __ccwgroup_create_symlinks(gdev); |
218 | if (!rc) { | 220 | if (!rc) { |
221 | mutex_unlock(&gdev->reg_mutex); | ||
219 | put_device(&gdev->dev); | 222 | put_device(&gdev->dev); |
220 | return 0; | 223 | return 0; |
221 | } | 224 | } |
@@ -224,19 +227,12 @@ ccwgroup_create(struct device *root, | |||
224 | error: | 227 | error: |
225 | for (i = 0; i < argc; i++) | 228 | for (i = 0; i < argc; i++) |
226 | if (gdev->cdev[i]) { | 229 | if (gdev->cdev[i]) { |
227 | put_device(&gdev->cdev[i]->dev); | ||
228 | gdev->cdev[i]->dev.driver_data = NULL; | ||
229 | } | ||
230 | put_device(&gdev->dev); | ||
231 | return rc; | ||
232 | free_dev: | ||
233 | for (i = 0; i < argc; i++) | ||
234 | if (gdev->cdev[i]) { | ||
235 | if (gdev->cdev[i]->dev.driver_data == gdev) | 230 | if (gdev->cdev[i]->dev.driver_data == gdev) |
236 | gdev->cdev[i]->dev.driver_data = NULL; | 231 | gdev->cdev[i]->dev.driver_data = NULL; |
237 | put_device(&gdev->cdev[i]->dev); | 232 | put_device(&gdev->cdev[i]->dev); |
238 | } | 233 | } |
239 | kfree(gdev); | 234 | mutex_unlock(&gdev->reg_mutex); |
235 | put_device(&gdev->dev); | ||
240 | return rc; | 236 | return rc; |
241 | } | 237 | } |
242 | 238 | ||
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) | |||
422 | get_driver(&cdriver->driver); | 418 | get_driver(&cdriver->driver); |
423 | while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, | 419 | while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, |
424 | __ccwgroup_match_all))) { | 420 | __ccwgroup_match_all))) { |
425 | __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); | 421 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); |
422 | |||
423 | mutex_lock(&gdev->reg_mutex); | ||
424 | __ccwgroup_remove_symlinks(gdev); | ||
426 | device_unregister(dev); | 425 | device_unregister(dev); |
426 | mutex_unlock(&gdev->reg_mutex); | ||
427 | put_device(dev); | 427 | put_device(dev); |
428 | } | 428 | } |
429 | put_driver(&cdriver->driver); | 429 | put_driver(&cdriver->driver); |
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) | |||
444 | if (cdev->dev.driver_data) { | 444 | if (cdev->dev.driver_data) { |
445 | gdev = (struct ccwgroup_device *)cdev->dev.driver_data; | 445 | gdev = (struct ccwgroup_device *)cdev->dev.driver_data; |
446 | if (get_device(&gdev->dev)) { | 446 | if (get_device(&gdev->dev)) { |
447 | mutex_lock(&gdev->reg_mutex); | ||
447 | if (device_is_registered(&gdev->dev)) | 448 | if (device_is_registered(&gdev->dev)) |
448 | return gdev; | 449 | return gdev; |
450 | mutex_unlock(&gdev->reg_mutex); | ||
449 | put_device(&gdev->dev); | 451 | put_device(&gdev->dev); |
450 | } | 452 | } |
451 | return NULL; | 453 | return NULL; |
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev) | |||
465 | if (gdev) { | 467 | if (gdev) { |
466 | __ccwgroup_remove_symlinks(gdev); | 468 | __ccwgroup_remove_symlinks(gdev); |
467 | device_unregister(&gdev->dev); | 469 | device_unregister(&gdev->dev); |
470 | mutex_unlock(&gdev->reg_mutex); | ||
468 | put_device(&gdev->dev); | 471 | put_device(&gdev->dev); |
469 | } | 472 | } |
470 | } | 473 | } |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c new file mode 100644 index 000000000000..ac289e6eadfe --- /dev/null +++ b/drivers/s390/cio/chp.c | |||
@@ -0,0 +1,683 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/chp.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 1999,2007 | ||
5 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) | ||
6 | * Arnd Bergmann (arndb@de.ibm.com) | ||
7 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/bug.h> | ||
11 | #include <linux/workqueue.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/jiffies.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/chpid.h> | ||
19 | #include <asm/sclp.h> | ||
20 | |||
21 | #include "cio.h" | ||
22 | #include "css.h" | ||
23 | #include "ioasm.h" | ||
24 | #include "cio_debug.h" | ||
25 | #include "chp.h" | ||
26 | |||
27 | #define to_channelpath(device) container_of(device, struct channel_path, dev) | ||
28 | #define CHP_INFO_UPDATE_INTERVAL 1*HZ | ||
29 | |||
30 | enum cfg_task_t { | ||
31 | cfg_none, | ||
32 | cfg_configure, | ||
33 | cfg_deconfigure | ||
34 | }; | ||
35 | |||
36 | /* Map for pending configure tasks. */ | ||
37 | static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1]; | ||
38 | static DEFINE_MUTEX(cfg_lock); | ||
39 | static int cfg_busy; | ||
40 | |||
41 | /* Map for channel-path status. */ | ||
42 | static struct sclp_chp_info chp_info; | ||
43 | static DEFINE_MUTEX(info_lock); | ||
44 | |||
45 | /* Time after which channel-path status may be outdated. */ | ||
46 | static unsigned long chp_info_expires; | ||
47 | |||
48 | /* Workqueue to perform pending configure tasks. */ | ||
49 | static struct workqueue_struct *chp_wq; | ||
50 | static struct work_struct cfg_work; | ||
51 | |||
52 | /* Wait queue for configure completion events. */ | ||
53 | static wait_queue_head_t cfg_wait_queue; | ||
54 | |||
55 | /* Return channel_path struct for given chpid. */ | ||
56 | static inline struct channel_path *chpid_to_chp(struct chp_id chpid) | ||
57 | { | ||
58 | return css[chpid.cssid]->chps[chpid.id]; | ||
59 | } | ||
60 | |||
61 | /* Set vary state for given chpid. */ | ||
62 | static void set_chp_logically_online(struct chp_id chpid, int onoff) | ||
63 | { | ||
64 | chpid_to_chp(chpid)->state = onoff; | ||
65 | } | ||
66 | |||
67 | /* On succes return 0 if channel-path is varied offline, 1 if it is varied | ||
68 | * online. Return -ENODEV if channel-path is not registered. */ | ||
69 | int chp_get_status(struct chp_id chpid) | ||
70 | { | ||
71 | return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV); | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * chp_get_sch_opm - return opm for subchannel | ||
76 | * @sch: subchannel | ||
77 | * | ||
78 | * Calculate and return the operational path mask (opm) based on the chpids | ||
79 | * used by the subchannel and the status of the associated channel-paths. | ||
80 | */ | ||
81 | u8 chp_get_sch_opm(struct subchannel *sch) | ||
82 | { | ||
83 | struct chp_id chpid; | ||
84 | int opm; | ||
85 | int i; | ||
86 | |||
87 | opm = 0; | ||
88 | chp_id_init(&chpid); | ||
89 | for (i=0; i < 8; i++) { | ||
90 | opm <<= 1; | ||
91 | chpid.id = sch->schib.pmcw.chpid[i]; | ||
92 | if (chp_get_status(chpid) != 0) | ||
93 | opm |= 1; | ||
94 | } | ||
95 | return opm; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * chp_is_registered - check if a channel-path is registered | ||
100 | * @chpid: channel-path ID | ||
101 | * | ||
102 | * Return non-zero if a channel-path with the given chpid is registered, | ||
103 | * zero otherwise. | ||
104 | */ | ||
105 | int chp_is_registered(struct chp_id chpid) | ||
106 | { | ||
107 | return chpid_to_chp(chpid) != NULL; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Function: s390_vary_chpid | ||
112 | * Varies the specified chpid online or offline | ||
113 | */ | ||
114 | static int s390_vary_chpid(struct chp_id chpid, int on) | ||
115 | { | ||
116 | char dbf_text[15]; | ||
117 | int status; | ||
118 | |||
119 | sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid, | ||
120 | chpid.id); | ||
121 | CIO_TRACE_EVENT( 2, dbf_text); | ||
122 | |||
123 | status = chp_get_status(chpid); | ||
124 | if (status < 0) { | ||
125 | printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n", | ||
126 | chpid.cssid, chpid.id); | ||
127 | return -EINVAL; | ||
128 | } | ||
129 | |||
130 | if (!on && !status) { | ||
131 | printk(KERN_ERR "chpid %x.%02x is already offline\n", | ||
132 | chpid.cssid, chpid.id); | ||
133 | return -EINVAL; | ||
134 | } | ||
135 | |||
136 | set_chp_logically_online(chpid, on); | ||
137 | chsc_chp_vary(chpid, on); | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Channel measurement related functions | ||
143 | */ | ||
144 | static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf, | ||
145 | loff_t off, size_t count) | ||
146 | { | ||
147 | struct channel_path *chp; | ||
148 | unsigned int size; | ||
149 | |||
150 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
151 | if (!chp->cmg_chars) | ||
152 | return 0; | ||
153 | |||
154 | size = sizeof(struct cmg_chars); | ||
155 | |||
156 | if (off > size) | ||
157 | return 0; | ||
158 | if (off + count > size) | ||
159 | count = size - off; | ||
160 | memcpy(buf, chp->cmg_chars + off, count); | ||
161 | return count; | ||
162 | } | ||
163 | |||
164 | static struct bin_attribute chp_measurement_chars_attr = { | ||
165 | .attr = { | ||
166 | .name = "measurement_chars", | ||
167 | .mode = S_IRUSR, | ||
168 | .owner = THIS_MODULE, | ||
169 | }, | ||
170 | .size = sizeof(struct cmg_chars), | ||
171 | .read = chp_measurement_chars_read, | ||
172 | }; | ||
173 | |||
174 | static void chp_measurement_copy_block(struct cmg_entry *buf, | ||
175 | struct channel_subsystem *css, | ||
176 | struct chp_id chpid) | ||
177 | { | ||
178 | void *area; | ||
179 | struct cmg_entry *entry, reference_buf; | ||
180 | int idx; | ||
181 | |||
182 | if (chpid.id < 128) { | ||
183 | area = css->cub_addr1; | ||
184 | idx = chpid.id; | ||
185 | } else { | ||
186 | area = css->cub_addr2; | ||
187 | idx = chpid.id - 128; | ||
188 | } | ||
189 | entry = area + (idx * sizeof(struct cmg_entry)); | ||
190 | do { | ||
191 | memcpy(buf, entry, sizeof(*entry)); | ||
192 | memcpy(&reference_buf, entry, sizeof(*entry)); | ||
193 | } while (reference_buf.values[0] != buf->values[0]); | ||
194 | } | ||
195 | |||
196 | static ssize_t chp_measurement_read(struct kobject *kobj, char *buf, | ||
197 | loff_t off, size_t count) | ||
198 | { | ||
199 | struct channel_path *chp; | ||
200 | struct channel_subsystem *css; | ||
201 | unsigned int size; | ||
202 | |||
203 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
204 | css = to_css(chp->dev.parent); | ||
205 | |||
206 | size = sizeof(struct cmg_entry); | ||
207 | |||
208 | /* Only allow single reads. */ | ||
209 | if (off || count < size) | ||
210 | return 0; | ||
211 | chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid); | ||
212 | count = size; | ||
213 | return count; | ||
214 | } | ||
215 | |||
216 | static struct bin_attribute chp_measurement_attr = { | ||
217 | .attr = { | ||
218 | .name = "measurement", | ||
219 | .mode = S_IRUSR, | ||
220 | .owner = THIS_MODULE, | ||
221 | }, | ||
222 | .size = sizeof(struct cmg_entry), | ||
223 | .read = chp_measurement_read, | ||
224 | }; | ||
225 | |||
226 | void chp_remove_cmg_attr(struct channel_path *chp) | ||
227 | { | ||
228 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
229 | device_remove_bin_file(&chp->dev, &chp_measurement_attr); | ||
230 | } | ||
231 | |||
232 | int chp_add_cmg_attr(struct channel_path *chp) | ||
233 | { | ||
234 | int ret; | ||
235 | |||
236 | ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
237 | if (ret) | ||
238 | return ret; | ||
239 | ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); | ||
240 | if (ret) | ||
241 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Files for the channel path entries. | ||
247 | */ | ||
248 | static ssize_t chp_status_show(struct device *dev, | ||
249 | struct device_attribute *attr, char *buf) | ||
250 | { | ||
251 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
252 | |||
253 | if (!chp) | ||
254 | return 0; | ||
255 | return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : | ||
256 | sprintf(buf, "offline\n")); | ||
257 | } | ||
258 | |||
259 | static ssize_t chp_status_write(struct device *dev, | ||
260 | struct device_attribute *attr, | ||
261 | const char *buf, size_t count) | ||
262 | { | ||
263 | struct channel_path *cp = container_of(dev, struct channel_path, dev); | ||
264 | char cmd[10]; | ||
265 | int num_args; | ||
266 | int error; | ||
267 | |||
268 | num_args = sscanf(buf, "%5s", cmd); | ||
269 | if (!num_args) | ||
270 | return count; | ||
271 | |||
272 | if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) | ||
273 | error = s390_vary_chpid(cp->chpid, 1); | ||
274 | else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) | ||
275 | error = s390_vary_chpid(cp->chpid, 0); | ||
276 | else | ||
277 | error = -EINVAL; | ||
278 | |||
279 | return error < 0 ? error : count; | ||
280 | |||
281 | } | ||
282 | |||
283 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | ||
284 | |||
285 | static ssize_t chp_configure_show(struct device *dev, | ||
286 | struct device_attribute *attr, char *buf) | ||
287 | { | ||
288 | struct channel_path *cp; | ||
289 | int status; | ||
290 | |||
291 | cp = container_of(dev, struct channel_path, dev); | ||
292 | status = chp_info_get_status(cp->chpid); | ||
293 | if (status < 0) | ||
294 | return status; | ||
295 | |||
296 | return snprintf(buf, PAGE_SIZE, "%d\n", status); | ||
297 | } | ||
298 | |||
299 | static int cfg_wait_idle(void); | ||
300 | |||
301 | static ssize_t chp_configure_write(struct device *dev, | ||
302 | struct device_attribute *attr, | ||
303 | const char *buf, size_t count) | ||
304 | { | ||
305 | struct channel_path *cp; | ||
306 | int val; | ||
307 | char delim; | ||
308 | |||
309 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
310 | return -EINVAL; | ||
311 | if (val != 0 && val != 1) | ||
312 | return -EINVAL; | ||
313 | cp = container_of(dev, struct channel_path, dev); | ||
314 | chp_cfg_schedule(cp->chpid, val); | ||
315 | cfg_wait_idle(); | ||
316 | |||
317 | return count; | ||
318 | } | ||
319 | |||
320 | static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write); | ||
321 | |||
322 | static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, | ||
323 | char *buf) | ||
324 | { | ||
325 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
326 | |||
327 | if (!chp) | ||
328 | return 0; | ||
329 | return sprintf(buf, "%x\n", chp->desc.desc); | ||
330 | } | ||
331 | |||
332 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | ||
333 | |||
334 | static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr, | ||
335 | char *buf) | ||
336 | { | ||
337 | struct channel_path *chp = to_channelpath(dev); | ||
338 | |||
339 | if (!chp) | ||
340 | return 0; | ||
341 | if (chp->cmg == -1) /* channel measurements not available */ | ||
342 | return sprintf(buf, "unknown\n"); | ||
343 | return sprintf(buf, "%x\n", chp->cmg); | ||
344 | } | ||
345 | |||
346 | static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); | ||
347 | |||
348 | static ssize_t chp_shared_show(struct device *dev, | ||
349 | struct device_attribute *attr, char *buf) | ||
350 | { | ||
351 | struct channel_path *chp = to_channelpath(dev); | ||
352 | |||
353 | if (!chp) | ||
354 | return 0; | ||
355 | if (chp->shared == -1) /* channel measurements not available */ | ||
356 | return sprintf(buf, "unknown\n"); | ||
357 | return sprintf(buf, "%x\n", chp->shared); | ||
358 | } | ||
359 | |||
360 | static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); | ||
361 | |||
362 | static struct attribute * chp_attrs[] = { | ||
363 | &dev_attr_status.attr, | ||
364 | &dev_attr_configure.attr, | ||
365 | &dev_attr_type.attr, | ||
366 | &dev_attr_cmg.attr, | ||
367 | &dev_attr_shared.attr, | ||
368 | NULL, | ||
369 | }; | ||
370 | |||
371 | static struct attribute_group chp_attr_group = { | ||
372 | .attrs = chp_attrs, | ||
373 | }; | ||
374 | |||
375 | static void chp_release(struct device *dev) | ||
376 | { | ||
377 | struct channel_path *cp; | ||
378 | |||
379 | cp = container_of(dev, struct channel_path, dev); | ||
380 | kfree(cp); | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * chp_new - register a new channel-path | ||
385 | * @chpid - channel-path ID | ||
386 | * | ||
387 | * Create and register data structure representing new channel-path. Return | ||
388 | * zero on success, non-zero otherwise. | ||
389 | */ | ||
390 | int chp_new(struct chp_id chpid) | ||
391 | { | ||
392 | struct channel_path *chp; | ||
393 | int ret; | ||
394 | |||
395 | if (chp_is_registered(chpid)) | ||
396 | return 0; | ||
397 | chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); | ||
398 | if (!chp) | ||
399 | return -ENOMEM; | ||
400 | |||
401 | /* fill in status, etc. */ | ||
402 | chp->chpid = chpid; | ||
403 | chp->state = 1; | ||
404 | chp->dev.parent = &css[chpid.cssid]->device; | ||
405 | chp->dev.release = chp_release; | ||
406 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid, | ||
407 | chpid.id); | ||
408 | |||
409 | /* Obtain channel path description and fill it in. */ | ||
410 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | ||
411 | if (ret) | ||
412 | goto out_free; | ||
413 | if ((chp->desc.flags & 0x80) == 0) { | ||
414 | ret = -ENODEV; | ||
415 | goto out_free; | ||
416 | } | ||
417 | /* Get channel-measurement characteristics. */ | ||
418 | if (css_characteristics_avail && css_chsc_characteristics.scmc | ||
419 | && css_chsc_characteristics.secm) { | ||
420 | ret = chsc_get_channel_measurement_chars(chp); | ||
421 | if (ret) | ||
422 | goto out_free; | ||
423 | } else { | ||
424 | static int msg_done; | ||
425 | |||
426 | if (!msg_done) { | ||
427 | printk(KERN_WARNING "cio: Channel measurements not " | ||
428 | "available, continuing.\n"); | ||
429 | msg_done = 1; | ||
430 | } | ||
431 | chp->cmg = -1; | ||
432 | } | ||
433 | |||
434 | /* make it known to the system */ | ||
435 | ret = device_register(&chp->dev); | ||
436 | if (ret) { | ||
437 | printk(KERN_WARNING "%s: could not register %x.%02x\n", | ||
438 | __func__, chpid.cssid, chpid.id); | ||
439 | goto out_free; | ||
440 | } | ||
441 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | ||
442 | if (ret) { | ||
443 | device_unregister(&chp->dev); | ||
444 | goto out_free; | ||
445 | } | ||
446 | mutex_lock(&css[chpid.cssid]->mutex); | ||
447 | if (css[chpid.cssid]->cm_enabled) { | ||
448 | ret = chp_add_cmg_attr(chp); | ||
449 | if (ret) { | ||
450 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); | ||
451 | device_unregister(&chp->dev); | ||
452 | mutex_unlock(&css[chpid.cssid]->mutex); | ||
453 | goto out_free; | ||
454 | } | ||
455 | } | ||
456 | css[chpid.cssid]->chps[chpid.id] = chp; | ||
457 | mutex_unlock(&css[chpid.cssid]->mutex); | ||
458 | return ret; | ||
459 | out_free: | ||
460 | kfree(chp); | ||
461 | return ret; | ||
462 | } | ||
463 | |||
464 | /** | ||
465 | * chp_get_chp_desc - return newly allocated channel-path description | ||
466 | * @chpid: channel-path ID | ||
467 | * | ||
468 | * On success return a newly allocated copy of the channel-path description | ||
469 | * data associated with the given channel-path ID. Return %NULL on error. | ||
470 | */ | ||
471 | void *chp_get_chp_desc(struct chp_id chpid) | ||
472 | { | ||
473 | struct channel_path *chp; | ||
474 | struct channel_path_desc *desc; | ||
475 | |||
476 | chp = chpid_to_chp(chpid); | ||
477 | if (!chp) | ||
478 | return NULL; | ||
479 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | ||
480 | if (!desc) | ||
481 | return NULL; | ||
482 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | ||
483 | return desc; | ||
484 | } | ||
485 | |||
486 | /** | ||
487 | * chp_process_crw - process channel-path status change | ||
488 | * @id: channel-path ID number | ||
489 | * @status: non-zero if channel-path has become available, zero otherwise | ||
490 | * | ||
491 | * Handle channel-report-words indicating that the status of a channel-path | ||
492 | * has changed. | ||
493 | */ | ||
494 | void chp_process_crw(int id, int status) | ||
495 | { | ||
496 | struct chp_id chpid; | ||
497 | |||
498 | chp_id_init(&chpid); | ||
499 | chpid.id = id; | ||
500 | if (status) { | ||
501 | if (!chp_is_registered(chpid)) | ||
502 | chp_new(chpid); | ||
503 | chsc_chp_online(chpid); | ||
504 | } else | ||
505 | chsc_chp_offline(chpid); | ||
506 | } | ||
507 | |||
508 | static inline int info_bit_num(struct chp_id id) | ||
509 | { | ||
510 | return id.id + id.cssid * (__MAX_CHPID + 1); | ||
511 | } | ||
512 | |||
513 | /* Force chp_info refresh on next call to info_validate(). */ | ||
514 | static void info_expire(void) | ||
515 | { | ||
516 | mutex_lock(&info_lock); | ||
517 | chp_info_expires = jiffies - 1; | ||
518 | mutex_unlock(&info_lock); | ||
519 | } | ||
520 | |||
521 | /* Ensure that chp_info is up-to-date. */ | ||
522 | static int info_update(void) | ||
523 | { | ||
524 | int rc; | ||
525 | |||
526 | mutex_lock(&info_lock); | ||
527 | rc = 0; | ||
528 | if (time_after(jiffies, chp_info_expires)) { | ||
529 | /* Data is too old, update. */ | ||
530 | rc = sclp_chp_read_info(&chp_info); | ||
531 | chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; | ||
532 | } | ||
533 | mutex_unlock(&info_lock); | ||
534 | |||
535 | return rc; | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * chp_info_get_status - retrieve configure status of a channel-path | ||
540 | * @chpid: channel-path ID | ||
541 | * | ||
542 | * On success, return 0 for standby, 1 for configured, 2 for reserved, | ||
543 | * 3 for not recognized. Return negative error code on error. | ||
544 | */ | ||
545 | int chp_info_get_status(struct chp_id chpid) | ||
546 | { | ||
547 | int rc; | ||
548 | int bit; | ||
549 | |||
550 | rc = info_update(); | ||
551 | if (rc) | ||
552 | return rc; | ||
553 | |||
554 | bit = info_bit_num(chpid); | ||
555 | mutex_lock(&info_lock); | ||
556 | if (!chp_test_bit(chp_info.recognized, bit)) | ||
557 | rc = CHP_STATUS_NOT_RECOGNIZED; | ||
558 | else if (chp_test_bit(chp_info.configured, bit)) | ||
559 | rc = CHP_STATUS_CONFIGURED; | ||
560 | else if (chp_test_bit(chp_info.standby, bit)) | ||
561 | rc = CHP_STATUS_STANDBY; | ||
562 | else | ||
563 | rc = CHP_STATUS_RESERVED; | ||
564 | mutex_unlock(&info_lock); | ||
565 | |||
566 | return rc; | ||
567 | } | ||
568 | |||
569 | /* Return configure task for chpid. */ | ||
570 | static enum cfg_task_t cfg_get_task(struct chp_id chpid) | ||
571 | { | ||
572 | return chp_cfg_task[chpid.cssid][chpid.id]; | ||
573 | } | ||
574 | |||
575 | /* Set configure task for chpid. */ | ||
576 | static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg) | ||
577 | { | ||
578 | chp_cfg_task[chpid.cssid][chpid.id] = cfg; | ||
579 | } | ||
580 | |||
581 | /* Perform one configure/deconfigure request. Reschedule work function until | ||
582 | * last request. */ | ||
583 | static void cfg_func(struct work_struct *work) | ||
584 | { | ||
585 | struct chp_id chpid; | ||
586 | enum cfg_task_t t; | ||
587 | |||
588 | mutex_lock(&cfg_lock); | ||
589 | t = cfg_none; | ||
590 | chp_id_for_each(&chpid) { | ||
591 | t = cfg_get_task(chpid); | ||
592 | if (t != cfg_none) { | ||
593 | cfg_set_task(chpid, cfg_none); | ||
594 | break; | ||
595 | } | ||
596 | } | ||
597 | mutex_unlock(&cfg_lock); | ||
598 | |||
599 | switch (t) { | ||
600 | case cfg_configure: | ||
601 | sclp_chp_configure(chpid); | ||
602 | info_expire(); | ||
603 | chsc_chp_online(chpid); | ||
604 | break; | ||
605 | case cfg_deconfigure: | ||
606 | sclp_chp_deconfigure(chpid); | ||
607 | info_expire(); | ||
608 | chsc_chp_offline(chpid); | ||
609 | break; | ||
610 | case cfg_none: | ||
611 | /* Get updated information after last change. */ | ||
612 | info_update(); | ||
613 | mutex_lock(&cfg_lock); | ||
614 | cfg_busy = 0; | ||
615 | mutex_unlock(&cfg_lock); | ||
616 | wake_up_interruptible(&cfg_wait_queue); | ||
617 | return; | ||
618 | } | ||
619 | queue_work(chp_wq, &cfg_work); | ||
620 | } | ||
621 | |||
622 | /** | ||
623 | * chp_cfg_schedule - schedule chpid configuration request | ||
624 | * @chpid - channel-path ID | ||
625 | * @configure - Non-zero for configure, zero for deconfigure | ||
626 | * | ||
627 | * Schedule a channel-path configuration/deconfiguration request. | ||
628 | */ | ||
629 | void chp_cfg_schedule(struct chp_id chpid, int configure) | ||
630 | { | ||
631 | CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id, | ||
632 | configure); | ||
633 | mutex_lock(&cfg_lock); | ||
634 | cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure); | ||
635 | cfg_busy = 1; | ||
636 | mutex_unlock(&cfg_lock); | ||
637 | queue_work(chp_wq, &cfg_work); | ||
638 | } | ||
639 | |||
640 | /** | ||
641 | * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request | ||
642 | * @chpid - channel-path ID | ||
643 | * | ||
644 | * Cancel an active channel-path deconfiguration request if it has not yet | ||
645 | * been performed. | ||
646 | */ | ||
647 | void chp_cfg_cancel_deconfigure(struct chp_id chpid) | ||
648 | { | ||
649 | CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id); | ||
650 | mutex_lock(&cfg_lock); | ||
651 | if (cfg_get_task(chpid) == cfg_deconfigure) | ||
652 | cfg_set_task(chpid, cfg_none); | ||
653 | mutex_unlock(&cfg_lock); | ||
654 | } | ||
655 | |||
656 | static int cfg_wait_idle(void) | ||
657 | { | ||
658 | if (wait_event_interruptible(cfg_wait_queue, !cfg_busy)) | ||
659 | return -ERESTARTSYS; | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | static int __init chp_init(void) | ||
664 | { | ||
665 | struct chp_id chpid; | ||
666 | |||
667 | chp_wq = create_singlethread_workqueue("cio_chp"); | ||
668 | if (!chp_wq) | ||
669 | return -ENOMEM; | ||
670 | INIT_WORK(&cfg_work, cfg_func); | ||
671 | init_waitqueue_head(&cfg_wait_queue); | ||
672 | if (info_update()) | ||
673 | return 0; | ||
674 | /* Register available channel-paths. */ | ||
675 | chp_id_for_each(&chpid) { | ||
676 | if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) | ||
677 | chp_new(chpid); | ||
678 | } | ||
679 | |||
680 | return 0; | ||
681 | } | ||
682 | |||
683 | subsys_initcall(chp_init); | ||
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h new file mode 100644 index 000000000000..65286563c592 --- /dev/null +++ b/drivers/s390/cio/chp.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/chp.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef S390_CHP_H | ||
9 | #define S390_CHP_H S390_CHP_H | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <asm/chpid.h> | ||
14 | #include "chsc.h" | ||
15 | |||
16 | #define CHP_STATUS_STANDBY 0 | ||
17 | #define CHP_STATUS_CONFIGURED 1 | ||
18 | #define CHP_STATUS_RESERVED 2 | ||
19 | #define CHP_STATUS_NOT_RECOGNIZED 3 | ||
20 | |||
21 | static inline int chp_test_bit(u8 *bitmap, int num) | ||
22 | { | ||
23 | int byte = num >> 3; | ||
24 | int mask = 128 >> (num & 7); | ||
25 | |||
26 | return (bitmap[byte] & mask) ? 1 : 0; | ||
27 | } | ||
28 | |||
29 | |||
30 | struct channel_path { | ||
31 | struct chp_id chpid; | ||
32 | int state; | ||
33 | struct channel_path_desc desc; | ||
34 | /* Channel-measurement related stuff: */ | ||
35 | int cmg; | ||
36 | int shared; | ||
37 | void *cmg_chars; | ||
38 | struct device dev; | ||
39 | }; | ||
40 | |||
41 | int chp_get_status(struct chp_id chpid); | ||
42 | u8 chp_get_sch_opm(struct subchannel *sch); | ||
43 | int chp_is_registered(struct chp_id chpid); | ||
44 | void *chp_get_chp_desc(struct chp_id chpid); | ||
45 | void chp_process_crw(int id, int available); | ||
46 | void chp_remove_cmg_attr(struct channel_path *chp); | ||
47 | int chp_add_cmg_attr(struct channel_path *chp); | ||
48 | int chp_new(struct chp_id chpid); | ||
49 | void chp_cfg_schedule(struct chp_id chpid, int configure); | ||
50 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); | ||
51 | int chp_info_get_status(struct chp_id chpid); | ||
52 | |||
53 | #endif /* S390_CHP_H */ | ||
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 6f05a44e3817..ea92ac4d6577 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -15,202 +15,124 @@ | |||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | 16 | ||
17 | #include <asm/cio.h> | 17 | #include <asm/cio.h> |
18 | #include <asm/chpid.h> | ||
18 | 19 | ||
19 | #include "css.h" | 20 | #include "css.h" |
20 | #include "cio.h" | 21 | #include "cio.h" |
21 | #include "cio_debug.h" | 22 | #include "cio_debug.h" |
22 | #include "ioasm.h" | 23 | #include "ioasm.h" |
24 | #include "chp.h" | ||
23 | #include "chsc.h" | 25 | #include "chsc.h" |
24 | 26 | ||
25 | static void *sei_page; | 27 | static void *sei_page; |
26 | 28 | ||
27 | static int new_channel_path(int chpid); | 29 | struct chsc_ssd_area { |
28 | 30 | struct chsc_header request; | |
29 | static inline void | 31 | u16 :10; |
30 | set_chp_logically_online(int chp, int onoff) | 32 | u16 ssid:2; |
31 | { | 33 | u16 :4; |
32 | css[0]->chps[chp]->state = onoff; | 34 | u16 f_sch; /* first subchannel */ |
33 | } | 35 | u16 :16; |
34 | 36 | u16 l_sch; /* last subchannel */ | |
35 | static int | 37 | u32 :32; |
36 | get_chp_status(int chp) | 38 | struct chsc_header response; |
37 | { | 39 | u32 :32; |
38 | return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); | 40 | u8 sch_valid : 1; |
39 | } | 41 | u8 dev_valid : 1; |
40 | 42 | u8 st : 3; /* subchannel type */ | |
41 | void | 43 | u8 zeroes : 3; |
42 | chsc_validate_chpids(struct subchannel *sch) | 44 | u8 unit_addr; /* unit address */ |
43 | { | 45 | u16 devno; /* device number */ |
44 | int mask, chp; | 46 | u8 path_mask; |
45 | 47 | u8 fla_valid_mask; | |
46 | for (chp = 0; chp <= 7; chp++) { | 48 | u16 sch; /* subchannel */ |
47 | mask = 0x80 >> chp; | 49 | u8 chpid[8]; /* chpids 0-7 */ |
48 | if (!get_chp_status(sch->schib.pmcw.chpid[chp])) | 50 | u16 fla[8]; /* full link addresses 0-7 */ |
49 | /* disable using this path */ | 51 | } __attribute__ ((packed)); |
50 | sch->opm &= ~mask; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | void | ||
55 | chpid_is_actually_online(int chp) | ||
56 | { | ||
57 | int state; | ||
58 | |||
59 | state = get_chp_status(chp); | ||
60 | if (state < 0) { | ||
61 | need_rescan = 1; | ||
62 | queue_work(slow_path_wq, &slow_path_work); | ||
63 | } else | ||
64 | WARN_ON(!state); | ||
65 | } | ||
66 | 52 | ||
67 | /* FIXME: this is _always_ called for every subchannel. shouldn't we | 53 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
68 | * process more than one at a time? */ | ||
69 | static int | ||
70 | chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | ||
71 | { | 54 | { |
72 | int ccode, j; | 55 | unsigned long page; |
73 | 56 | struct chsc_ssd_area *ssd_area; | |
74 | struct { | 57 | int ccode; |
75 | struct chsc_header request; | 58 | int ret; |
76 | u16 reserved1a:10; | 59 | int i; |
77 | u16 ssid:2; | 60 | int mask; |
78 | u16 reserved1b:4; | ||
79 | u16 f_sch; /* first subchannel */ | ||
80 | u16 reserved2; | ||
81 | u16 l_sch; /* last subchannel */ | ||
82 | u32 reserved3; | ||
83 | struct chsc_header response; | ||
84 | u32 reserved4; | ||
85 | u8 sch_valid : 1; | ||
86 | u8 dev_valid : 1; | ||
87 | u8 st : 3; /* subchannel type */ | ||
88 | u8 zeroes : 3; | ||
89 | u8 unit_addr; /* unit address */ | ||
90 | u16 devno; /* device number */ | ||
91 | u8 path_mask; | ||
92 | u8 fla_valid_mask; | ||
93 | u16 sch; /* subchannel */ | ||
94 | u8 chpid[8]; /* chpids 0-7 */ | ||
95 | u16 fla[8]; /* full link addresses 0-7 */ | ||
96 | } __attribute__ ((packed)) *ssd_area; | ||
97 | |||
98 | ssd_area = page; | ||
99 | 61 | ||
62 | page = get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
63 | if (!page) | ||
64 | return -ENOMEM; | ||
65 | ssd_area = (struct chsc_ssd_area *) page; | ||
100 | ssd_area->request.length = 0x0010; | 66 | ssd_area->request.length = 0x0010; |
101 | ssd_area->request.code = 0x0004; | 67 | ssd_area->request.code = 0x0004; |
102 | 68 | ssd_area->ssid = schid.ssid; | |
103 | ssd_area->ssid = sch->schid.ssid; | 69 | ssd_area->f_sch = schid.sch_no; |
104 | ssd_area->f_sch = sch->schid.sch_no; | 70 | ssd_area->l_sch = schid.sch_no; |
105 | ssd_area->l_sch = sch->schid.sch_no; | ||
106 | 71 | ||
107 | ccode = chsc(ssd_area); | 72 | ccode = chsc(ssd_area); |
73 | /* Check response. */ | ||
108 | if (ccode > 0) { | 74 | if (ccode > 0) { |
109 | pr_debug("chsc returned with ccode = %d\n", ccode); | 75 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
110 | return (ccode == 3) ? -ENODEV : -EBUSY; | 76 | goto out_free; |
111 | } | 77 | } |
112 | 78 | if (ssd_area->response.code != 0x0001) { | |
113 | switch (ssd_area->response.code) { | 79 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", |
114 | case 0x0001: /* everything ok */ | 80 | schid.ssid, schid.sch_no, |
115 | break; | ||
116 | case 0x0002: | ||
117 | CIO_CRW_EVENT(2, "Invalid command!\n"); | ||
118 | return -EINVAL; | ||
119 | case 0x0003: | ||
120 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
121 | return -EINVAL; | ||
122 | case 0x0004: | ||
123 | CIO_CRW_EVENT(2, "Model does not provide ssd\n"); | ||
124 | return -EOPNOTSUPP; | ||
125 | default: | ||
126 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
127 | ssd_area->response.code); | 81 | ssd_area->response.code); |
128 | return -EIO; | 82 | ret = -EIO; |
83 | goto out_free; | ||
129 | } | 84 | } |
130 | 85 | if (!ssd_area->sch_valid) { | |
131 | /* | 86 | ret = -ENODEV; |
132 | * ssd_area->st stores the type of the detected | 87 | goto out_free; |
133 | * subchannel, with the following definitions: | ||
134 | * | ||
135 | * 0: I/O subchannel: All fields have meaning | ||
136 | * 1: CHSC subchannel: Only sch_val, st and sch | ||
137 | * have meaning | ||
138 | * 2: Message subchannel: All fields except unit_addr | ||
139 | * have meaning | ||
140 | * 3: ADM subchannel: Only sch_val, st and sch | ||
141 | * have meaning | ||
142 | * | ||
143 | * Other types are currently undefined. | ||
144 | */ | ||
145 | if (ssd_area->st > 3) { /* uhm, that looks strange... */ | ||
146 | CIO_CRW_EVENT(0, "Strange subchannel type %d" | ||
147 | " for sch 0.%x.%04x\n", ssd_area->st, | ||
148 | sch->schid.ssid, sch->schid.sch_no); | ||
149 | /* | ||
150 | * There may have been a new subchannel type defined in the | ||
151 | * time since this code was written; since we don't know which | ||
152 | * fields have meaning and what to do with it we just jump out | ||
153 | */ | ||
154 | return 0; | ||
155 | } else { | ||
156 | const char *type[4] = {"I/O", "chsc", "message", "ADM"}; | ||
157 | CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", | ||
158 | sch->schid.ssid, sch->schid.sch_no, | ||
159 | type[ssd_area->st]); | ||
160 | |||
161 | sch->ssd_info.valid = 1; | ||
162 | sch->ssd_info.type = ssd_area->st; | ||
163 | } | 88 | } |
164 | 89 | /* Copy data */ | |
165 | if (ssd_area->st == 0 || ssd_area->st == 2) { | 90 | ret = 0; |
166 | for (j = 0; j < 8; j++) { | 91 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); |
167 | if (!((0x80 >> j) & ssd_area->path_mask & | 92 | if ((ssd_area->st != 0) && (ssd_area->st != 2)) |
168 | ssd_area->fla_valid_mask)) | 93 | goto out_free; |
169 | continue; | 94 | ssd->path_mask = ssd_area->path_mask; |
170 | sch->ssd_info.chpid[j] = ssd_area->chpid[j]; | 95 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; |
171 | sch->ssd_info.fla[j] = ssd_area->fla[j]; | 96 | for (i = 0; i < 8; i++) { |
97 | mask = 0x80 >> i; | ||
98 | if (ssd_area->path_mask & mask) { | ||
99 | chp_id_init(&ssd->chpid[i]); | ||
100 | ssd->chpid[i].id = ssd_area->chpid[i]; | ||
172 | } | 101 | } |
102 | if (ssd_area->fla_valid_mask & mask) | ||
103 | ssd->fla[i] = ssd_area->fla[i]; | ||
173 | } | 104 | } |
174 | return 0; | 105 | out_free: |
106 | free_page(page); | ||
107 | return ret; | ||
175 | } | 108 | } |
176 | 109 | ||
177 | int | 110 | static int check_for_io_on_path(struct subchannel *sch, int mask) |
178 | css_get_ssd_info(struct subchannel *sch) | ||
179 | { | 111 | { |
180 | int ret; | 112 | int cc; |
181 | void *page; | ||
182 | 113 | ||
183 | page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 114 | cc = stsch(sch->schid, &sch->schib); |
184 | if (!page) | 115 | if (cc) |
185 | return -ENOMEM; | 116 | return 0; |
186 | spin_lock_irq(sch->lock); | 117 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) |
187 | ret = chsc_get_sch_desc_irq(sch, page); | 118 | return 1; |
188 | if (ret) { | 119 | return 0; |
189 | static int cio_chsc_err_msg; | 120 | } |
190 | 121 | ||
191 | if (!cio_chsc_err_msg) { | 122 | static void terminate_internal_io(struct subchannel *sch) |
192 | printk(KERN_ERR | 123 | { |
193 | "chsc_get_sch_descriptions:" | 124 | if (cio_clear(sch)) { |
194 | " Error %d while doing chsc; " | 125 | /* Recheck device in case clear failed. */ |
195 | "processing some machine checks may " | 126 | sch->lpm = 0; |
196 | "not work\n", ret); | 127 | if (device_trigger_verify(sch) != 0) |
197 | cio_chsc_err_msg = 1; | 128 | css_schedule_eval(sch->schid); |
198 | } | 129 | return; |
199 | } | ||
200 | spin_unlock_irq(sch->lock); | ||
201 | free_page((unsigned long)page); | ||
202 | if (!ret) { | ||
203 | int j, chpid, mask; | ||
204 | /* Allocate channel path structures, if needed. */ | ||
205 | for (j = 0; j < 8; j++) { | ||
206 | mask = 0x80 >> j; | ||
207 | chpid = sch->ssd_info.chpid[j]; | ||
208 | if ((sch->schib.pmcw.pim & mask) && | ||
209 | (get_chp_status(chpid) < 0)) | ||
210 | new_channel_path(chpid); | ||
211 | } | ||
212 | } | 130 | } |
213 | return ret; | 131 | /* Request retry of internal operation. */ |
132 | device_set_intretry(sch); | ||
133 | /* Call handler. */ | ||
134 | if (sch->driver && sch->driver->termination) | ||
135 | sch->driver->termination(&sch->dev); | ||
214 | } | 136 | } |
215 | 137 | ||
216 | static int | 138 | static int |
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
219 | int j; | 141 | int j; |
220 | int mask; | 142 | int mask; |
221 | struct subchannel *sch; | 143 | struct subchannel *sch; |
222 | struct channel_path *chpid; | 144 | struct chp_id *chpid; |
223 | struct schib schib; | 145 | struct schib schib; |
224 | 146 | ||
225 | sch = to_subchannel(dev); | 147 | sch = to_subchannel(dev); |
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
243 | if (sch->schib.pmcw.pim == 0x80) | 165 | if (sch->schib.pmcw.pim == 0x80) |
244 | goto out_unreg; | 166 | goto out_unreg; |
245 | 167 | ||
246 | if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && | 168 | if (check_for_io_on_path(sch, mask)) { |
247 | (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && | 169 | if (device_is_online(sch)) |
248 | (sch->schib.pmcw.lpum == mask)) { | 170 | device_kill_io(sch); |
249 | int cc; | 171 | else { |
250 | 172 | terminate_internal_io(sch); | |
251 | cc = cio_clear(sch); | 173 | /* Re-start path verification. */ |
252 | if (cc == -ENODEV) | 174 | if (sch->driver && sch->driver->verify) |
175 | sch->driver->verify(&sch->dev); | ||
176 | } | ||
177 | } else { | ||
178 | /* trigger path verification. */ | ||
179 | if (sch->driver && sch->driver->verify) | ||
180 | sch->driver->verify(&sch->dev); | ||
181 | else if (sch->lpm == mask) | ||
253 | goto out_unreg; | 182 | goto out_unreg; |
254 | /* Request retry of internal operation. */ | ||
255 | device_set_intretry(sch); | ||
256 | /* Call handler. */ | ||
257 | if (sch->driver && sch->driver->termination) | ||
258 | sch->driver->termination(&sch->dev); | ||
259 | goto out_unlock; | ||
260 | } | 183 | } |
261 | 184 | ||
262 | /* trigger path verification. */ | ||
263 | if (sch->driver && sch->driver->verify) | ||
264 | sch->driver->verify(&sch->dev); | ||
265 | else if (sch->lpm == mask) | ||
266 | goto out_unreg; | ||
267 | out_unlock: | ||
268 | spin_unlock_irq(sch->lock); | 185 | spin_unlock_irq(sch->lock); |
269 | return 0; | 186 | return 0; |
187 | |||
270 | out_unreg: | 188 | out_unreg: |
271 | spin_unlock_irq(sch->lock); | ||
272 | sch->lpm = 0; | 189 | sch->lpm = 0; |
273 | if (css_enqueue_subchannel_slow(sch->schid)) { | 190 | spin_unlock_irq(sch->lock); |
274 | css_clear_subchannel_slow_list(); | 191 | css_schedule_eval(sch->schid); |
275 | need_rescan = 1; | ||
276 | } | ||
277 | return 0; | 192 | return 0; |
278 | } | 193 | } |
279 | 194 | ||
280 | static void | 195 | void chsc_chp_offline(struct chp_id chpid) |
281 | s390_set_chpid_offline( __u8 chpid) | ||
282 | { | 196 | { |
283 | char dbf_txt[15]; | 197 | char dbf_txt[15]; |
284 | struct device *dev; | ||
285 | 198 | ||
286 | sprintf(dbf_txt, "chpr%x", chpid); | 199 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); |
287 | CIO_TRACE_EVENT(2, dbf_txt); | 200 | CIO_TRACE_EVENT(2, dbf_txt); |
288 | 201 | ||
289 | if (get_chp_status(chpid) <= 0) | 202 | if (chp_get_status(chpid) <= 0) |
290 | return; | 203 | return; |
291 | dev = get_device(&css[0]->chps[chpid]->dev); | 204 | bus_for_each_dev(&css_bus_type, NULL, &chpid, |
292 | bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), | ||
293 | s390_subchannel_remove_chpid); | 205 | s390_subchannel_remove_chpid); |
294 | |||
295 | if (need_rescan || css_slow_subchannels_exist()) | ||
296 | queue_work(slow_path_wq, &slow_path_work); | ||
297 | put_device(dev); | ||
298 | } | ||
299 | |||
300 | struct res_acc_data { | ||
301 | struct channel_path *chp; | ||
302 | u32 fla_mask; | ||
303 | u16 fla; | ||
304 | }; | ||
305 | |||
306 | static int | ||
307 | s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) | ||
308 | { | ||
309 | int found; | ||
310 | int chp; | ||
311 | int ccode; | ||
312 | |||
313 | found = 0; | ||
314 | for (chp = 0; chp <= 7; chp++) | ||
315 | /* | ||
316 | * check if chpid is in information updated by ssd | ||
317 | */ | ||
318 | if (sch->ssd_info.valid && | ||
319 | sch->ssd_info.chpid[chp] == res_data->chp->id && | ||
320 | (sch->ssd_info.fla[chp] & res_data->fla_mask) | ||
321 | == res_data->fla) { | ||
322 | found = 1; | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | if (found == 0) | ||
327 | return 0; | ||
328 | |||
329 | /* | ||
330 | * Do a stsch to update our subchannel structure with the | ||
331 | * new path information and eventually check for logically | ||
332 | * offline chpids. | ||
333 | */ | ||
334 | ccode = stsch(sch->schid, &sch->schib); | ||
335 | if (ccode > 0) | ||
336 | return 0; | ||
337 | |||
338 | return 0x80 >> chp; | ||
339 | } | 206 | } |
340 | 207 | ||
341 | static int | 208 | static int |
342 | s390_process_res_acc_new_sch(struct subchannel_id schid) | 209 | s390_process_res_acc_new_sch(struct subchannel_id schid) |
343 | { | 210 | { |
344 | struct schib schib; | 211 | struct schib schib; |
345 | int ret; | ||
346 | /* | 212 | /* |
347 | * We don't know the device yet, but since a path | 213 | * We don't know the device yet, but since a path |
348 | * may be available now to the device we'll have | 214 | * may be available now to the device we'll have |
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid) | |||
353 | */ | 219 | */ |
354 | if (stsch_err(schid, &schib)) | 220 | if (stsch_err(schid, &schib)) |
355 | /* We're through */ | 221 | /* We're through */ |
356 | return need_rescan ? -EAGAIN : -ENXIO; | 222 | return -ENXIO; |
357 | 223 | ||
358 | /* Put it on the slow path. */ | 224 | /* Put it on the slow path. */ |
359 | ret = css_enqueue_subchannel_slow(schid); | 225 | css_schedule_eval(schid); |
360 | if (ret) { | 226 | return 0; |
361 | css_clear_subchannel_slow_list(); | 227 | } |
362 | need_rescan = 1; | 228 | |
363 | return -EAGAIN; | 229 | struct res_acc_data { |
230 | struct chp_id chpid; | ||
231 | u32 fla_mask; | ||
232 | u16 fla; | ||
233 | }; | ||
234 | |||
235 | static int get_res_chpid_mask(struct chsc_ssd_info *ssd, | ||
236 | struct res_acc_data *data) | ||
237 | { | ||
238 | int i; | ||
239 | int mask; | ||
240 | |||
241 | for (i = 0; i < 8; i++) { | ||
242 | mask = 0x80 >> i; | ||
243 | if (!(ssd->path_mask & mask)) | ||
244 | continue; | ||
245 | if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) | ||
246 | continue; | ||
247 | if ((ssd->fla_valid_mask & mask) && | ||
248 | ((ssd->fla[i] & data->fla_mask) != data->fla)) | ||
249 | continue; | ||
250 | return mask; | ||
364 | } | 251 | } |
365 | return 0; | 252 | return 0; |
366 | } | 253 | } |
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
379 | return s390_process_res_acc_new_sch(schid); | 266 | return s390_process_res_acc_new_sch(schid); |
380 | 267 | ||
381 | spin_lock_irq(sch->lock); | 268 | spin_lock_irq(sch->lock); |
382 | 269 | chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); | |
383 | chp_mask = s390_process_res_acc_sch(res_data, sch); | 270 | if (chp_mask == 0) |
384 | 271 | goto out; | |
385 | if (chp_mask == 0) { | 272 | if (stsch(sch->schid, &sch->schib)) |
386 | spin_unlock_irq(sch->lock); | 273 | goto out; |
387 | put_device(&sch->dev); | ||
388 | return 0; | ||
389 | } | ||
390 | old_lpm = sch->lpm; | 274 | old_lpm = sch->lpm; |
391 | sch->lpm = ((sch->schib.pmcw.pim & | 275 | sch->lpm = ((sch->schib.pmcw.pim & |
392 | sch->schib.pmcw.pam & | 276 | sch->schib.pmcw.pam & |
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
396 | device_trigger_reprobe(sch); | 280 | device_trigger_reprobe(sch); |
397 | else if (sch->driver && sch->driver->verify) | 281 | else if (sch->driver && sch->driver->verify) |
398 | sch->driver->verify(&sch->dev); | 282 | sch->driver->verify(&sch->dev); |
399 | 283 | out: | |
400 | spin_unlock_irq(sch->lock); | 284 | spin_unlock_irq(sch->lock); |
401 | put_device(&sch->dev); | 285 | put_device(&sch->dev); |
402 | return 0; | 286 | return 0; |
403 | } | 287 | } |
404 | 288 | ||
405 | 289 | static void s390_process_res_acc (struct res_acc_data *res_data) | |
406 | static int | ||
407 | s390_process_res_acc (struct res_acc_data *res_data) | ||
408 | { | 290 | { |
409 | int rc; | ||
410 | char dbf_txt[15]; | 291 | char dbf_txt[15]; |
411 | 292 | ||
412 | sprintf(dbf_txt, "accpr%x", res_data->chp->id); | 293 | sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, |
294 | res_data->chpid.id); | ||
413 | CIO_TRACE_EVENT( 2, dbf_txt); | 295 | CIO_TRACE_EVENT( 2, dbf_txt); |
414 | if (res_data->fla != 0) { | 296 | if (res_data->fla != 0) { |
415 | sprintf(dbf_txt, "fla%x", res_data->fla); | 297 | sprintf(dbf_txt, "fla%x", res_data->fla); |
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data) | |||
423 | * The more information we have (info), the less scanning | 305 | * The more information we have (info), the less scanning |
424 | * will we have to do. | 306 | * will we have to do. |
425 | */ | 307 | */ |
426 | rc = for_each_subchannel(__s390_process_res_acc, res_data); | 308 | for_each_subchannel(__s390_process_res_acc, res_data); |
427 | if (css_slow_subchannels_exist()) | ||
428 | rc = -EAGAIN; | ||
429 | else if (rc != -EAGAIN) | ||
430 | rc = 0; | ||
431 | return rc; | ||
432 | } | 309 | } |
433 | 310 | ||
434 | static int | 311 | static int |
@@ -480,43 +357,45 @@ struct chsc_sei_area { | |||
480 | /* ccdf has to be big enough for a link-incident record */ | 357 | /* ccdf has to be big enough for a link-incident record */ |
481 | } __attribute__ ((packed)); | 358 | } __attribute__ ((packed)); |
482 | 359 | ||
483 | static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | 360 | static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) |
484 | { | 361 | { |
485 | int chpid; | 362 | struct chp_id chpid; |
363 | int id; | ||
486 | 364 | ||
487 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", | 365 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", |
488 | sei_area->rs, sei_area->rsid); | 366 | sei_area->rs, sei_area->rsid); |
489 | if (sei_area->rs != 4) | 367 | if (sei_area->rs != 4) |
490 | return 0; | 368 | return; |
491 | chpid = __get_chpid_from_lir(sei_area->ccdf); | 369 | id = __get_chpid_from_lir(sei_area->ccdf); |
492 | if (chpid < 0) | 370 | if (id < 0) |
493 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); | 371 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); |
494 | else | 372 | else { |
495 | s390_set_chpid_offline(chpid); | 373 | chp_id_init(&chpid); |
496 | 374 | chpid.id = id; | |
497 | return 0; | 375 | chsc_chp_offline(chpid); |
376 | } | ||
498 | } | 377 | } |
499 | 378 | ||
500 | static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | 379 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) |
501 | { | 380 | { |
502 | struct res_acc_data res_data; | 381 | struct res_acc_data res_data; |
503 | struct device *dev; | 382 | struct chp_id chpid; |
504 | int status; | 383 | int status; |
505 | int rc; | ||
506 | 384 | ||
507 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " | 385 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " |
508 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); | 386 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); |
509 | if (sei_area->rs != 4) | 387 | if (sei_area->rs != 4) |
510 | return 0; | 388 | return; |
389 | chp_id_init(&chpid); | ||
390 | chpid.id = sei_area->rsid; | ||
511 | /* allocate a new channel path structure, if needed */ | 391 | /* allocate a new channel path structure, if needed */ |
512 | status = get_chp_status(sei_area->rsid); | 392 | status = chp_get_status(chpid); |
513 | if (status < 0) | 393 | if (status < 0) |
514 | new_channel_path(sei_area->rsid); | 394 | chp_new(chpid); |
515 | else if (!status) | 395 | else if (!status) |
516 | return 0; | 396 | return; |
517 | dev = get_device(&css[0]->chps[sei_area->rsid]->dev); | ||
518 | memset(&res_data, 0, sizeof(struct res_acc_data)); | 397 | memset(&res_data, 0, sizeof(struct res_acc_data)); |
519 | res_data.chp = to_channelpath(dev); | 398 | res_data.chpid = chpid; |
520 | if ((sei_area->vf & 0xc0) != 0) { | 399 | if ((sei_area->vf & 0xc0) != 0) { |
521 | res_data.fla = sei_area->fla; | 400 | res_data.fla = sei_area->fla; |
522 | if ((sei_area->vf & 0xc0) == 0xc0) | 401 | if ((sei_area->vf & 0xc0) == 0xc0) |
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | |||
526 | /* link address */ | 405 | /* link address */ |
527 | res_data.fla_mask = 0xff00; | 406 | res_data.fla_mask = 0xff00; |
528 | } | 407 | } |
529 | rc = s390_process_res_acc(&res_data); | 408 | s390_process_res_acc(&res_data); |
530 | put_device(dev); | ||
531 | |||
532 | return rc; | ||
533 | } | 409 | } |
534 | 410 | ||
535 | static int chsc_process_sei(struct chsc_sei_area *sei_area) | 411 | struct chp_config_data { |
412 | u8 map[32]; | ||
413 | u8 op; | ||
414 | u8 pc; | ||
415 | }; | ||
416 | |||
417 | static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | ||
536 | { | 418 | { |
537 | int rc; | 419 | struct chp_config_data *data; |
420 | struct chp_id chpid; | ||
421 | int num; | ||
422 | |||
423 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); | ||
424 | if (sei_area->rs != 0) | ||
425 | return; | ||
426 | data = (struct chp_config_data *) &(sei_area->ccdf); | ||
427 | chp_id_init(&chpid); | ||
428 | for (num = 0; num <= __MAX_CHPID; num++) { | ||
429 | if (!chp_test_bit(data->map, num)) | ||
430 | continue; | ||
431 | chpid.id = num; | ||
432 | printk(KERN_WARNING "cio: processing configure event %d for " | ||
433 | "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); | ||
434 | switch (data->op) { | ||
435 | case 0: | ||
436 | chp_cfg_schedule(chpid, 1); | ||
437 | break; | ||
438 | case 1: | ||
439 | chp_cfg_schedule(chpid, 0); | ||
440 | break; | ||
441 | case 2: | ||
442 | chp_cfg_cancel_deconfigure(chpid); | ||
443 | break; | ||
444 | } | ||
445 | } | ||
446 | } | ||
538 | 447 | ||
448 | static void chsc_process_sei(struct chsc_sei_area *sei_area) | ||
449 | { | ||
539 | /* Check if we might have lost some information. */ | 450 | /* Check if we might have lost some information. */ |
540 | if (sei_area->flags & 0x40) | 451 | if (sei_area->flags & 0x40) { |
541 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); | 452 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); |
453 | css_schedule_eval_all(); | ||
454 | } | ||
542 | /* which kind of information was stored? */ | 455 | /* which kind of information was stored? */ |
543 | rc = 0; | ||
544 | switch (sei_area->cc) { | 456 | switch (sei_area->cc) { |
545 | case 1: /* link incident*/ | 457 | case 1: /* link incident*/ |
546 | rc = chsc_process_sei_link_incident(sei_area); | 458 | chsc_process_sei_link_incident(sei_area); |
547 | break; | 459 | break; |
548 | case 2: /* i/o resource accessibiliy */ | 460 | case 2: /* i/o resource accessibiliy */ |
549 | rc = chsc_process_sei_res_acc(sei_area); | 461 | chsc_process_sei_res_acc(sei_area); |
462 | break; | ||
463 | case 8: /* channel-path-configuration notification */ | ||
464 | chsc_process_sei_chp_config(sei_area); | ||
550 | break; | 465 | break; |
551 | default: /* other stuff */ | 466 | default: /* other stuff */ |
552 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", | 467 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", |
553 | sei_area->cc); | 468 | sei_area->cc); |
554 | break; | 469 | break; |
555 | } | 470 | } |
556 | |||
557 | return rc; | ||
558 | } | 471 | } |
559 | 472 | ||
560 | int chsc_process_crw(void) | 473 | void chsc_process_crw(void) |
561 | { | 474 | { |
562 | struct chsc_sei_area *sei_area; | 475 | struct chsc_sei_area *sei_area; |
563 | int ret; | ||
564 | int rc; | ||
565 | 476 | ||
566 | if (!sei_page) | 477 | if (!sei_page) |
567 | return 0; | 478 | return; |
568 | /* Access to sei_page is serialized through machine check handler | 479 | /* Access to sei_page is serialized through machine check handler |
569 | * thread, so no need for locking. */ | 480 | * thread, so no need for locking. */ |
570 | sei_area = sei_page; | 481 | sei_area = sei_page; |
571 | 482 | ||
572 | CIO_TRACE_EVENT( 2, "prcss"); | 483 | CIO_TRACE_EVENT( 2, "prcss"); |
573 | ret = 0; | ||
574 | do { | 484 | do { |
575 | memset(sei_area, 0, sizeof(*sei_area)); | 485 | memset(sei_area, 0, sizeof(*sei_area)); |
576 | sei_area->request.length = 0x0010; | 486 | sei_area->request.length = 0x0010; |
@@ -580,37 +490,26 @@ int chsc_process_crw(void) | |||
580 | 490 | ||
581 | if (sei_area->response.code == 0x0001) { | 491 | if (sei_area->response.code == 0x0001) { |
582 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); | 492 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); |
583 | rc = chsc_process_sei(sei_area); | 493 | chsc_process_sei(sei_area); |
584 | if (rc) | ||
585 | ret = rc; | ||
586 | } else { | 494 | } else { |
587 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", | 495 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", |
588 | sei_area->response.code); | 496 | sei_area->response.code); |
589 | ret = 0; | ||
590 | break; | 497 | break; |
591 | } | 498 | } |
592 | } while (sei_area->flags & 0x80); | 499 | } while (sei_area->flags & 0x80); |
593 | |||
594 | return ret; | ||
595 | } | 500 | } |
596 | 501 | ||
597 | static int | 502 | static int |
598 | __chp_add_new_sch(struct subchannel_id schid) | 503 | __chp_add_new_sch(struct subchannel_id schid) |
599 | { | 504 | { |
600 | struct schib schib; | 505 | struct schib schib; |
601 | int ret; | ||
602 | 506 | ||
603 | if (stsch_err(schid, &schib)) | 507 | if (stsch_err(schid, &schib)) |
604 | /* We're through */ | 508 | /* We're through */ |
605 | return need_rescan ? -EAGAIN : -ENXIO; | 509 | return -ENXIO; |
606 | 510 | ||
607 | /* Put it on the slow path. */ | 511 | /* Put it on the slow path. */ |
608 | ret = css_enqueue_subchannel_slow(schid); | 512 | css_schedule_eval(schid); |
609 | if (ret) { | ||
610 | css_clear_subchannel_slow_list(); | ||
611 | need_rescan = 1; | ||
612 | return -EAGAIN; | ||
613 | } | ||
614 | return 0; | 513 | return 0; |
615 | } | 514 | } |
616 | 515 | ||
@@ -619,10 +518,10 @@ static int | |||
619 | __chp_add(struct subchannel_id schid, void *data) | 518 | __chp_add(struct subchannel_id schid, void *data) |
620 | { | 519 | { |
621 | int i, mask; | 520 | int i, mask; |
622 | struct channel_path *chp; | 521 | struct chp_id *chpid; |
623 | struct subchannel *sch; | 522 | struct subchannel *sch; |
624 | 523 | ||
625 | chp = data; | 524 | chpid = data; |
626 | sch = get_subchannel_by_schid(schid); | 525 | sch = get_subchannel_by_schid(schid); |
627 | if (!sch) | 526 | if (!sch) |
628 | /* Check if the subchannel is now available. */ | 527 | /* Check if the subchannel is now available. */ |
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data) | |||
631 | for (i=0; i<8; i++) { | 530 | for (i=0; i<8; i++) { |
632 | mask = 0x80 >> i; | 531 | mask = 0x80 >> i; |
633 | if ((sch->schib.pmcw.pim & mask) && | 532 | if ((sch->schib.pmcw.pim & mask) && |
634 | (sch->schib.pmcw.chpid[i] == chp->id)) { | 533 | (sch->schib.pmcw.chpid[i] == chpid->id)) { |
635 | if (stsch(sch->schid, &sch->schib) != 0) { | 534 | if (stsch(sch->schid, &sch->schib) != 0) { |
636 | /* Endgame. */ | 535 | /* Endgame. */ |
637 | spin_unlock_irq(sch->lock); | 536 | spin_unlock_irq(sch->lock); |
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data) | |||
657 | return 0; | 556 | return 0; |
658 | } | 557 | } |
659 | 558 | ||
660 | static int | 559 | void chsc_chp_online(struct chp_id chpid) |
661 | chp_add(int chpid) | ||
662 | { | 560 | { |
663 | int rc; | ||
664 | char dbf_txt[15]; | 561 | char dbf_txt[15]; |
665 | struct device *dev; | ||
666 | 562 | ||
667 | if (!get_chp_status(chpid)) | 563 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); |
668 | return 0; /* no need to do the rest */ | ||
669 | |||
670 | sprintf(dbf_txt, "cadd%x", chpid); | ||
671 | CIO_TRACE_EVENT(2, dbf_txt); | 564 | CIO_TRACE_EVENT(2, dbf_txt); |
672 | 565 | ||
673 | dev = get_device(&css[0]->chps[chpid]->dev); | 566 | if (chp_get_status(chpid) != 0) |
674 | rc = for_each_subchannel(__chp_add, to_channelpath(dev)); | 567 | for_each_subchannel(__chp_add, &chpid); |
675 | if (css_slow_subchannels_exist()) | ||
676 | rc = -EAGAIN; | ||
677 | if (rc != -EAGAIN) | ||
678 | rc = 0; | ||
679 | put_device(dev); | ||
680 | return rc; | ||
681 | } | 568 | } |
682 | 569 | ||
683 | /* | 570 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
684 | * Handling of crw machine checks with channel path source. | 571 | struct chp_id chpid, int on) |
685 | */ | ||
686 | int | ||
687 | chp_process_crw(int chpid, int on) | ||
688 | { | ||
689 | if (on == 0) { | ||
690 | /* Path has gone. We use the link incident routine.*/ | ||
691 | s390_set_chpid_offline(chpid); | ||
692 | return 0; /* De-register is async anyway. */ | ||
693 | } | ||
694 | /* | ||
695 | * Path has come. Allocate a new channel path structure, | ||
696 | * if needed. | ||
697 | */ | ||
698 | if (get_chp_status(chpid) < 0) | ||
699 | new_channel_path(chpid); | ||
700 | /* Avoid the extra overhead in process_rec_acc. */ | ||
701 | return chp_add(chpid); | ||
702 | } | ||
703 | |||
704 | static int check_for_io_on_path(struct subchannel *sch, int index) | ||
705 | { | ||
706 | int cc; | ||
707 | |||
708 | cc = stsch(sch->schid, &sch->schib); | ||
709 | if (cc) | ||
710 | return 0; | ||
711 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) | ||
712 | return 1; | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static void terminate_internal_io(struct subchannel *sch) | ||
717 | { | ||
718 | if (cio_clear(sch)) { | ||
719 | /* Recheck device in case clear failed. */ | ||
720 | sch->lpm = 0; | ||
721 | if (device_trigger_verify(sch) != 0) { | ||
722 | if(css_enqueue_subchannel_slow(sch->schid)) { | ||
723 | css_clear_subchannel_slow_list(); | ||
724 | need_rescan = 1; | ||
725 | } | ||
726 | } | ||
727 | return; | ||
728 | } | ||
729 | /* Request retry of internal operation. */ | ||
730 | device_set_intretry(sch); | ||
731 | /* Call handler. */ | ||
732 | if (sch->driver && sch->driver->termination) | ||
733 | sch->driver->termination(&sch->dev); | ||
734 | } | ||
735 | |||
736 | static void | ||
737 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | ||
738 | { | 572 | { |
739 | int chp, old_lpm; | 573 | int chp, old_lpm; |
574 | int mask; | ||
740 | unsigned long flags; | 575 | unsigned long flags; |
741 | 576 | ||
742 | if (!sch->ssd_info.valid) | ||
743 | return; | ||
744 | |||
745 | spin_lock_irqsave(sch->lock, flags); | 577 | spin_lock_irqsave(sch->lock, flags); |
746 | old_lpm = sch->lpm; | 578 | old_lpm = sch->lpm; |
747 | for (chp = 0; chp < 8; chp++) { | 579 | for (chp = 0; chp < 8; chp++) { |
748 | if (sch->ssd_info.chpid[chp] != chpid) | 580 | mask = 0x80 >> chp; |
581 | if (!(sch->ssd_info.path_mask & mask)) | ||
582 | continue; | ||
583 | if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) | ||
749 | continue; | 584 | continue; |
750 | 585 | ||
751 | if (on) { | 586 | if (on) { |
752 | sch->opm |= (0x80 >> chp); | 587 | sch->opm |= mask; |
753 | sch->lpm |= (0x80 >> chp); | 588 | sch->lpm |= mask; |
754 | if (!old_lpm) | 589 | if (!old_lpm) |
755 | device_trigger_reprobe(sch); | 590 | device_trigger_reprobe(sch); |
756 | else if (sch->driver && sch->driver->verify) | 591 | else if (sch->driver && sch->driver->verify) |
757 | sch->driver->verify(&sch->dev); | 592 | sch->driver->verify(&sch->dev); |
758 | break; | 593 | break; |
759 | } | 594 | } |
760 | sch->opm &= ~(0x80 >> chp); | 595 | sch->opm &= ~mask; |
761 | sch->lpm &= ~(0x80 >> chp); | 596 | sch->lpm &= ~mask; |
762 | if (check_for_io_on_path(sch, chp)) { | 597 | if (check_for_io_on_path(sch, mask)) { |
763 | if (device_is_online(sch)) | 598 | if (device_is_online(sch)) |
764 | /* Path verification is done after killing. */ | 599 | /* Path verification is done after killing. */ |
765 | device_kill_io(sch); | 600 | device_kill_io(sch); |
766 | else | 601 | else { |
767 | /* Kill and retry internal I/O. */ | 602 | /* Kill and retry internal I/O. */ |
768 | terminate_internal_io(sch); | 603 | terminate_internal_io(sch); |
769 | } else if (!sch->lpm) { | 604 | /* Re-start path verification. */ |
770 | if (device_trigger_verify(sch) != 0) { | 605 | if (sch->driver && sch->driver->verify) |
771 | if (css_enqueue_subchannel_slow(sch->schid)) { | 606 | sch->driver->verify(&sch->dev); |
772 | css_clear_subchannel_slow_list(); | ||
773 | need_rescan = 1; | ||
774 | } | ||
775 | } | 607 | } |
608 | } else if (!sch->lpm) { | ||
609 | if (device_trigger_verify(sch) != 0) | ||
610 | css_schedule_eval(sch->schid); | ||
776 | } else if (sch->driver && sch->driver->verify) | 611 | } else if (sch->driver && sch->driver->verify) |
777 | sch->driver->verify(&sch->dev); | 612 | sch->driver->verify(&sch->dev); |
778 | break; | 613 | break; |
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | |||
780 | spin_unlock_irqrestore(sch->lock, flags); | 615 | spin_unlock_irqrestore(sch->lock, flags); |
781 | } | 616 | } |
782 | 617 | ||
783 | static int | 618 | static int s390_subchannel_vary_chpid_off(struct device *dev, void *data) |
784 | s390_subchannel_vary_chpid_off(struct device *dev, void *data) | ||
785 | { | 619 | { |
786 | struct subchannel *sch; | 620 | struct subchannel *sch; |
787 | __u8 *chpid; | 621 | struct chp_id *chpid; |
788 | 622 | ||
789 | sch = to_subchannel(dev); | 623 | sch = to_subchannel(dev); |
790 | chpid = data; | 624 | chpid = data; |
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data) | |||
793 | return 0; | 627 | return 0; |
794 | } | 628 | } |
795 | 629 | ||
796 | static int | 630 | static int s390_subchannel_vary_chpid_on(struct device *dev, void *data) |
797 | s390_subchannel_vary_chpid_on(struct device *dev, void *data) | ||
798 | { | 631 | { |
799 | struct subchannel *sch; | 632 | struct subchannel *sch; |
800 | __u8 *chpid; | 633 | struct chp_id *chpid; |
801 | 634 | ||
802 | sch = to_subchannel(dev); | 635 | sch = to_subchannel(dev); |
803 | chpid = data; | 636 | chpid = data; |
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) | |||
821 | /* We're through */ | 654 | /* We're through */ |
822 | return -ENXIO; | 655 | return -ENXIO; |
823 | /* Put it on the slow path. */ | 656 | /* Put it on the slow path. */ |
824 | if (css_enqueue_subchannel_slow(schid)) { | 657 | css_schedule_eval(schid); |
825 | css_clear_subchannel_slow_list(); | ||
826 | need_rescan = 1; | ||
827 | return -EAGAIN; | ||
828 | } | ||
829 | return 0; | 658 | return 0; |
830 | } | 659 | } |
831 | 660 | ||
832 | /* | 661 | /** |
833 | * Function: s390_vary_chpid | 662 | * chsc_chp_vary - propagate channel-path vary operation to subchannels |
834 | * Varies the specified chpid online or offline | 663 | * @chpid: channl-path ID |
664 | * @on: non-zero for vary online, zero for vary offline | ||
835 | */ | 665 | */ |
836 | static int | 666 | int chsc_chp_vary(struct chp_id chpid, int on) |
837 | s390_vary_chpid( __u8 chpid, int on) | ||
838 | { | 667 | { |
839 | char dbf_text[15]; | ||
840 | int status; | ||
841 | |||
842 | sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); | ||
843 | CIO_TRACE_EVENT( 2, dbf_text); | ||
844 | |||
845 | status = get_chp_status(chpid); | ||
846 | if (status < 0) { | ||
847 | printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); | ||
848 | return -EINVAL; | ||
849 | } | ||
850 | |||
851 | if (!on && !status) { | ||
852 | printk(KERN_ERR "chpid %x is already offline\n", chpid); | ||
853 | return -EINVAL; | ||
854 | } | ||
855 | |||
856 | set_chp_logically_online(chpid, on); | ||
857 | |||
858 | /* | 668 | /* |
859 | * Redo PathVerification on the devices the chpid connects to | 669 | * Redo PathVerification on the devices the chpid connects to |
860 | */ | 670 | */ |
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on) | |||
865 | if (on) | 675 | if (on) |
866 | /* Scan for new devices on varied on path. */ | 676 | /* Scan for new devices on varied on path. */ |
867 | for_each_subchannel(__s390_vary_chpid_on, NULL); | 677 | for_each_subchannel(__s390_vary_chpid_on, NULL); |
868 | if (need_rescan || css_slow_subchannels_exist()) | ||
869 | queue_work(slow_path_wq, &slow_path_work); | ||
870 | return 0; | 678 | return 0; |
871 | } | 679 | } |
872 | 680 | ||
873 | /* | ||
874 | * Channel measurement related functions | ||
875 | */ | ||
876 | static ssize_t | ||
877 | chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, | ||
878 | size_t count) | ||
879 | { | ||
880 | struct channel_path *chp; | ||
881 | unsigned int size; | ||
882 | |||
883 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
884 | if (!chp->cmg_chars) | ||
885 | return 0; | ||
886 | |||
887 | size = sizeof(struct cmg_chars); | ||
888 | |||
889 | if (off > size) | ||
890 | return 0; | ||
891 | if (off + count > size) | ||
892 | count = size - off; | ||
893 | memcpy(buf, chp->cmg_chars + off, count); | ||
894 | return count; | ||
895 | } | ||
896 | |||
897 | static struct bin_attribute chp_measurement_chars_attr = { | ||
898 | .attr = { | ||
899 | .name = "measurement_chars", | ||
900 | .mode = S_IRUSR, | ||
901 | .owner = THIS_MODULE, | ||
902 | }, | ||
903 | .size = sizeof(struct cmg_chars), | ||
904 | .read = chp_measurement_chars_read, | ||
905 | }; | ||
906 | |||
907 | static void | ||
908 | chp_measurement_copy_block(struct cmg_entry *buf, | ||
909 | struct channel_subsystem *css, int chpid) | ||
910 | { | ||
911 | void *area; | ||
912 | struct cmg_entry *entry, reference_buf; | ||
913 | int idx; | ||
914 | |||
915 | if (chpid < 128) { | ||
916 | area = css->cub_addr1; | ||
917 | idx = chpid; | ||
918 | } else { | ||
919 | area = css->cub_addr2; | ||
920 | idx = chpid - 128; | ||
921 | } | ||
922 | entry = area + (idx * sizeof(struct cmg_entry)); | ||
923 | do { | ||
924 | memcpy(buf, entry, sizeof(*entry)); | ||
925 | memcpy(&reference_buf, entry, sizeof(*entry)); | ||
926 | } while (reference_buf.values[0] != buf->values[0]); | ||
927 | } | ||
928 | |||
929 | static ssize_t | ||
930 | chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) | ||
931 | { | ||
932 | struct channel_path *chp; | ||
933 | struct channel_subsystem *css; | ||
934 | unsigned int size; | ||
935 | |||
936 | chp = to_channelpath(container_of(kobj, struct device, kobj)); | ||
937 | css = to_css(chp->dev.parent); | ||
938 | |||
939 | size = sizeof(struct cmg_entry); | ||
940 | |||
941 | /* Only allow single reads. */ | ||
942 | if (off || count < size) | ||
943 | return 0; | ||
944 | chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); | ||
945 | count = size; | ||
946 | return count; | ||
947 | } | ||
948 | |||
949 | static struct bin_attribute chp_measurement_attr = { | ||
950 | .attr = { | ||
951 | .name = "measurement", | ||
952 | .mode = S_IRUSR, | ||
953 | .owner = THIS_MODULE, | ||
954 | }, | ||
955 | .size = sizeof(struct cmg_entry), | ||
956 | .read = chp_measurement_read, | ||
957 | }; | ||
958 | |||
959 | static void | ||
960 | chsc_remove_chp_cmg_attr(struct channel_path *chp) | ||
961 | { | ||
962 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
963 | device_remove_bin_file(&chp->dev, &chp_measurement_attr); | ||
964 | } | ||
965 | |||
966 | static int | ||
967 | chsc_add_chp_cmg_attr(struct channel_path *chp) | ||
968 | { | ||
969 | int ret; | ||
970 | |||
971 | ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
972 | if (ret) | ||
973 | return ret; | ||
974 | ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); | ||
975 | if (ret) | ||
976 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); | ||
977 | return ret; | ||
978 | } | ||
979 | |||
980 | static void | 681 | static void |
981 | chsc_remove_cmg_attr(struct channel_subsystem *css) | 682 | chsc_remove_cmg_attr(struct channel_subsystem *css) |
982 | { | 683 | { |
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css) | |||
985 | for (i = 0; i <= __MAX_CHPID; i++) { | 686 | for (i = 0; i <= __MAX_CHPID; i++) { |
986 | if (!css->chps[i]) | 687 | if (!css->chps[i]) |
987 | continue; | 688 | continue; |
988 | chsc_remove_chp_cmg_attr(css->chps[i]); | 689 | chp_remove_cmg_attr(css->chps[i]); |
989 | } | 690 | } |
990 | } | 691 | } |
991 | 692 | ||
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css) | |||
998 | for (i = 0; i <= __MAX_CHPID; i++) { | 699 | for (i = 0; i <= __MAX_CHPID; i++) { |
999 | if (!css->chps[i]) | 700 | if (!css->chps[i]) |
1000 | continue; | 701 | continue; |
1001 | ret = chsc_add_chp_cmg_attr(css->chps[i]); | 702 | ret = chp_add_cmg_attr(css->chps[i]); |
1002 | if (ret) | 703 | if (ret) |
1003 | goto cleanup; | 704 | goto cleanup; |
1004 | } | 705 | } |
@@ -1007,12 +708,11 @@ cleanup: | |||
1007 | for (--i; i >= 0; i--) { | 708 | for (--i; i >= 0; i--) { |
1008 | if (!css->chps[i]) | 709 | if (!css->chps[i]) |
1009 | continue; | 710 | continue; |
1010 | chsc_remove_chp_cmg_attr(css->chps[i]); | 711 | chp_remove_cmg_attr(css->chps[i]); |
1011 | } | 712 | } |
1012 | return ret; | 713 | return ret; |
1013 | } | 714 | } |
1014 | 715 | ||
1015 | |||
1016 | static int | 716 | static int |
1017 | __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | 717 | __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) |
1018 | { | 718 | { |
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
1118 | } else | 818 | } else |
1119 | chsc_remove_cmg_attr(css); | 819 | chsc_remove_cmg_attr(css); |
1120 | } | 820 | } |
1121 | if (enable && !css->cm_enabled) { | 821 | if (!css->cm_enabled) { |
1122 | free_page((unsigned long)css->cub_addr1); | 822 | free_page((unsigned long)css->cub_addr1); |
1123 | free_page((unsigned long)css->cub_addr2); | 823 | free_page((unsigned long)css->cub_addr2); |
1124 | } | 824 | } |
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
1127 | return ret; | 827 | return ret; |
1128 | } | 828 | } |
1129 | 829 | ||
1130 | /* | 830 | int chsc_determine_channel_path_description(struct chp_id chpid, |
1131 | * Files for the channel path entries. | 831 | struct channel_path_desc *desc) |
1132 | */ | ||
1133 | static ssize_t | ||
1134 | chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1135 | { | ||
1136 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
1137 | |||
1138 | if (!chp) | ||
1139 | return 0; | ||
1140 | return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : | ||
1141 | sprintf(buf, "offline\n")); | ||
1142 | } | ||
1143 | |||
1144 | static ssize_t | ||
1145 | chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | ||
1146 | { | ||
1147 | struct channel_path *cp = container_of(dev, struct channel_path, dev); | ||
1148 | char cmd[10]; | ||
1149 | int num_args; | ||
1150 | int error; | ||
1151 | |||
1152 | num_args = sscanf(buf, "%5s", cmd); | ||
1153 | if (!num_args) | ||
1154 | return count; | ||
1155 | |||
1156 | if (!strnicmp(cmd, "on", 2)) | ||
1157 | error = s390_vary_chpid(cp->id, 1); | ||
1158 | else if (!strnicmp(cmd, "off", 3)) | ||
1159 | error = s390_vary_chpid(cp->id, 0); | ||
1160 | else | ||
1161 | error = -EINVAL; | ||
1162 | |||
1163 | return error < 0 ? error : count; | ||
1164 | |||
1165 | } | ||
1166 | |||
1167 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | ||
1168 | |||
1169 | static ssize_t | ||
1170 | chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1171 | { | ||
1172 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
1173 | |||
1174 | if (!chp) | ||
1175 | return 0; | ||
1176 | return sprintf(buf, "%x\n", chp->desc.desc); | ||
1177 | } | ||
1178 | |||
1179 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | ||
1180 | |||
1181 | static ssize_t | ||
1182 | chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1183 | { | ||
1184 | struct channel_path *chp = to_channelpath(dev); | ||
1185 | |||
1186 | if (!chp) | ||
1187 | return 0; | ||
1188 | if (chp->cmg == -1) /* channel measurements not available */ | ||
1189 | return sprintf(buf, "unknown\n"); | ||
1190 | return sprintf(buf, "%x\n", chp->cmg); | ||
1191 | } | ||
1192 | |||
1193 | static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); | ||
1194 | |||
1195 | static ssize_t | ||
1196 | chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1197 | { | ||
1198 | struct channel_path *chp = to_channelpath(dev); | ||
1199 | |||
1200 | if (!chp) | ||
1201 | return 0; | ||
1202 | if (chp->shared == -1) /* channel measurements not available */ | ||
1203 | return sprintf(buf, "unknown\n"); | ||
1204 | return sprintf(buf, "%x\n", chp->shared); | ||
1205 | } | ||
1206 | |||
1207 | static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); | ||
1208 | |||
1209 | static struct attribute * chp_attrs[] = { | ||
1210 | &dev_attr_status.attr, | ||
1211 | &dev_attr_type.attr, | ||
1212 | &dev_attr_cmg.attr, | ||
1213 | &dev_attr_shared.attr, | ||
1214 | NULL, | ||
1215 | }; | ||
1216 | |||
1217 | static struct attribute_group chp_attr_group = { | ||
1218 | .attrs = chp_attrs, | ||
1219 | }; | ||
1220 | |||
1221 | static void | ||
1222 | chp_release(struct device *dev) | ||
1223 | { | ||
1224 | struct channel_path *cp; | ||
1225 | |||
1226 | cp = container_of(dev, struct channel_path, dev); | ||
1227 | kfree(cp); | ||
1228 | } | ||
1229 | |||
1230 | static int | ||
1231 | chsc_determine_channel_path_description(int chpid, | ||
1232 | struct channel_path_desc *desc) | ||
1233 | { | 832 | { |
1234 | int ccode, ret; | 833 | int ccode, ret; |
1235 | 834 | ||
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid, | |||
1252 | scpd_area->request.length = 0x0010; | 851 | scpd_area->request.length = 0x0010; |
1253 | scpd_area->request.code = 0x0002; | 852 | scpd_area->request.code = 0x0002; |
1254 | 853 | ||
1255 | scpd_area->first_chpid = chpid; | 854 | scpd_area->first_chpid = chpid.id; |
1256 | scpd_area->last_chpid = chpid; | 855 | scpd_area->last_chpid = chpid.id; |
1257 | 856 | ||
1258 | ccode = chsc(scpd_area); | 857 | ccode = chsc(scpd_area); |
1259 | if (ccode > 0) { | 858 | if (ccode > 0) { |
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | |||
1316 | } | 915 | } |
1317 | } | 916 | } |
1318 | 917 | ||
1319 | static int | 918 | int chsc_get_channel_measurement_chars(struct channel_path *chp) |
1320 | chsc_get_channel_measurement_chars(struct channel_path *chp) | ||
1321 | { | 919 | { |
1322 | int ccode, ret; | 920 | int ccode, ret; |
1323 | 921 | ||
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
1349 | scmc_area->request.length = 0x0010; | 947 | scmc_area->request.length = 0x0010; |
1350 | scmc_area->request.code = 0x0022; | 948 | scmc_area->request.code = 0x0022; |
1351 | 949 | ||
1352 | scmc_area->first_chpid = chp->id; | 950 | scmc_area->first_chpid = chp->chpid.id; |
1353 | scmc_area->last_chpid = chp->id; | 951 | scmc_area->last_chpid = chp->chpid.id; |
1354 | 952 | ||
1355 | ccode = chsc(scmc_area); | 953 | ccode = chsc(scmc_area); |
1356 | if (ccode > 0) { | 954 | if (ccode > 0) { |
@@ -1392,94 +990,6 @@ out: | |||
1392 | return ret; | 990 | return ret; |
1393 | } | 991 | } |
1394 | 992 | ||
1395 | /* | ||
1396 | * Entries for chpids on the system bus. | ||
1397 | * This replaces /proc/chpids. | ||
1398 | */ | ||
1399 | static int | ||
1400 | new_channel_path(int chpid) | ||
1401 | { | ||
1402 | struct channel_path *chp; | ||
1403 | int ret; | ||
1404 | |||
1405 | chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); | ||
1406 | if (!chp) | ||
1407 | return -ENOMEM; | ||
1408 | |||
1409 | /* fill in status, etc. */ | ||
1410 | chp->id = chpid; | ||
1411 | chp->state = 1; | ||
1412 | chp->dev.parent = &css[0]->device; | ||
1413 | chp->dev.release = chp_release; | ||
1414 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); | ||
1415 | |||
1416 | /* Obtain channel path description and fill it in. */ | ||
1417 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | ||
1418 | if (ret) | ||
1419 | goto out_free; | ||
1420 | /* Get channel-measurement characteristics. */ | ||
1421 | if (css_characteristics_avail && css_chsc_characteristics.scmc | ||
1422 | && css_chsc_characteristics.secm) { | ||
1423 | ret = chsc_get_channel_measurement_chars(chp); | ||
1424 | if (ret) | ||
1425 | goto out_free; | ||
1426 | } else { | ||
1427 | static int msg_done; | ||
1428 | |||
1429 | if (!msg_done) { | ||
1430 | printk(KERN_WARNING "cio: Channel measurements not " | ||
1431 | "available, continuing.\n"); | ||
1432 | msg_done = 1; | ||
1433 | } | ||
1434 | chp->cmg = -1; | ||
1435 | } | ||
1436 | |||
1437 | /* make it known to the system */ | ||
1438 | ret = device_register(&chp->dev); | ||
1439 | if (ret) { | ||
1440 | printk(KERN_WARNING "%s: could not register %02x\n", | ||
1441 | __func__, chpid); | ||
1442 | goto out_free; | ||
1443 | } | ||
1444 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | ||
1445 | if (ret) { | ||
1446 | device_unregister(&chp->dev); | ||
1447 | goto out_free; | ||
1448 | } | ||
1449 | mutex_lock(&css[0]->mutex); | ||
1450 | if (css[0]->cm_enabled) { | ||
1451 | ret = chsc_add_chp_cmg_attr(chp); | ||
1452 | if (ret) { | ||
1453 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); | ||
1454 | device_unregister(&chp->dev); | ||
1455 | mutex_unlock(&css[0]->mutex); | ||
1456 | goto out_free; | ||
1457 | } | ||
1458 | } | ||
1459 | css[0]->chps[chpid] = chp; | ||
1460 | mutex_unlock(&css[0]->mutex); | ||
1461 | return ret; | ||
1462 | out_free: | ||
1463 | kfree(chp); | ||
1464 | return ret; | ||
1465 | } | ||
1466 | |||
1467 | void * | ||
1468 | chsc_get_chp_desc(struct subchannel *sch, int chp_no) | ||
1469 | { | ||
1470 | struct channel_path *chp; | ||
1471 | struct channel_path_desc *desc; | ||
1472 | |||
1473 | chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; | ||
1474 | if (!chp) | ||
1475 | return NULL; | ||
1476 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | ||
1477 | if (!desc) | ||
1478 | return NULL; | ||
1479 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | ||
1480 | return desc; | ||
1481 | } | ||
1482 | |||
1483 | static int __init | 993 | static int __init |
1484 | chsc_alloc_sei_area(void) | 994 | chsc_alloc_sei_area(void) |
1485 | { | 995 | { |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 0fb2b024208f..2ad81d11cf7b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef S390_CHSC_H | 1 | #ifndef S390_CHSC_H |
2 | #define S390_CHSC_H | 2 | #define S390_CHSC_H |
3 | 3 | ||
4 | #define CHSC_SEI_ACC_CHPID 1 | 4 | #include <linux/types.h> |
5 | #define CHSC_SEI_ACC_LINKADDR 2 | 5 | #include <linux/device.h> |
6 | #define CHSC_SEI_ACC_FULLLINKADDR 3 | 6 | #include <asm/chpid.h> |
7 | #include "schid.h" | ||
7 | 8 | ||
8 | #define CHSC_SDA_OC_MSS 0x2 | 9 | #define CHSC_SDA_OC_MSS 0x2 |
9 | 10 | ||
@@ -33,23 +34,9 @@ struct channel_path_desc { | |||
33 | u8 chpp; | 34 | u8 chpp; |
34 | } __attribute__ ((packed)); | 35 | } __attribute__ ((packed)); |
35 | 36 | ||
36 | struct channel_path { | 37 | struct channel_path; |
37 | int id; | ||
38 | int state; | ||
39 | struct channel_path_desc desc; | ||
40 | /* Channel-measurement related stuff: */ | ||
41 | int cmg; | ||
42 | int shared; | ||
43 | void *cmg_chars; | ||
44 | struct device dev; | ||
45 | }; | ||
46 | 38 | ||
47 | extern void s390_process_css( void ); | 39 | extern void chsc_process_crw(void); |
48 | extern void chsc_validate_chpids(struct subchannel *); | ||
49 | extern void chpid_is_actually_online(int); | ||
50 | extern int css_get_ssd_info(struct subchannel *); | ||
51 | extern int chsc_process_crw(void); | ||
52 | extern int chp_process_crw(int, int); | ||
53 | 40 | ||
54 | struct css_general_char { | 41 | struct css_general_char { |
55 | u64 : 41; | 42 | u64 : 41; |
@@ -82,15 +69,26 @@ struct css_chsc_char { | |||
82 | extern struct css_general_char css_general_characteristics; | 69 | extern struct css_general_char css_general_characteristics; |
83 | extern struct css_chsc_char css_chsc_characteristics; | 70 | extern struct css_chsc_char css_chsc_characteristics; |
84 | 71 | ||
72 | struct chsc_ssd_info { | ||
73 | u8 path_mask; | ||
74 | u8 fla_valid_mask; | ||
75 | struct chp_id chpid[8]; | ||
76 | u16 fla[8]; | ||
77 | }; | ||
78 | extern int chsc_get_ssd_info(struct subchannel_id schid, | ||
79 | struct chsc_ssd_info *ssd); | ||
85 | extern int chsc_determine_css_characteristics(void); | 80 | extern int chsc_determine_css_characteristics(void); |
86 | extern int css_characteristics_avail; | 81 | extern int css_characteristics_avail; |
87 | 82 | ||
88 | extern void *chsc_get_chp_desc(struct subchannel*, int); | ||
89 | |||
90 | extern int chsc_enable_facility(int); | 83 | extern int chsc_enable_facility(int); |
91 | struct channel_subsystem; | 84 | struct channel_subsystem; |
92 | extern int chsc_secm(struct channel_subsystem *, int); | 85 | extern int chsc_secm(struct channel_subsystem *, int); |
93 | 86 | ||
94 | #define to_channelpath(device) container_of(device, struct channel_path, dev) | 87 | int chsc_chp_vary(struct chp_id chpid, int on); |
88 | int chsc_determine_channel_path_description(struct chp_id chpid, | ||
89 | struct channel_path_desc *desc); | ||
90 | void chsc_chp_online(struct chp_id chpid); | ||
91 | void chsc_chp_offline(struct chp_id chpid); | ||
92 | int chsc_get_channel_measurement_chars(struct channel_path *chp); | ||
95 | 93 | ||
96 | #endif | 94 | #endif |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9cb129ab5be5..ea1defba5693 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
23 | #include <asm/reset.h> | 23 | #include <asm/reset.h> |
24 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
25 | #include <asm/chpid.h> | ||
25 | #include "airq.h" | 26 | #include "airq.h" |
26 | #include "cio.h" | 27 | #include "cio.h" |
27 | #include "css.h" | 28 | #include "css.h" |
@@ -29,6 +30,7 @@ | |||
29 | #include "ioasm.h" | 30 | #include "ioasm.h" |
30 | #include "blacklist.h" | 31 | #include "blacklist.h" |
31 | #include "cio_debug.h" | 32 | #include "cio_debug.h" |
33 | #include "chp.h" | ||
32 | #include "../s390mach.h" | 34 | #include "../s390mach.h" |
33 | 35 | ||
34 | debug_info_t *cio_debug_msg_id; | 36 | debug_info_t *cio_debug_msg_id; |
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
592 | err = -ENODEV; | 594 | err = -ENODEV; |
593 | goto out; | 595 | goto out; |
594 | } | 596 | } |
595 | sch->opm = 0xff; | 597 | if (cio_is_console(sch->schid)) |
596 | if (!cio_is_console(sch->schid)) | 598 | sch->opm = 0xff; |
597 | chsc_validate_chpids(sch); | 599 | else |
600 | sch->opm = chp_get_sch_opm(sch); | ||
598 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 601 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
599 | 602 | ||
600 | CIO_DEBUG(KERN_INFO, 0, | 603 | CIO_DEBUG(KERN_INFO, 0, |
@@ -954,6 +957,7 @@ static void css_reset(void) | |||
954 | { | 957 | { |
955 | int i, ret; | 958 | int i, ret; |
956 | unsigned long long timeout; | 959 | unsigned long long timeout; |
960 | struct chp_id chpid; | ||
957 | 961 | ||
958 | /* Reset subchannels. */ | 962 | /* Reset subchannels. */ |
959 | for_each_subchannel(__shutdown_subchannel_easy, NULL); | 963 | for_each_subchannel(__shutdown_subchannel_easy, NULL); |
@@ -963,8 +967,10 @@ static void css_reset(void) | |||
963 | __ctl_set_bit(14, 28); | 967 | __ctl_set_bit(14, 28); |
964 | /* Temporarily reenable machine checks. */ | 968 | /* Temporarily reenable machine checks. */ |
965 | local_mcck_enable(); | 969 | local_mcck_enable(); |
970 | chp_id_init(&chpid); | ||
966 | for (i = 0; i <= __MAX_CHPID; i++) { | 971 | for (i = 0; i <= __MAX_CHPID; i++) { |
967 | ret = rchp(i); | 972 | chpid.id = i; |
973 | ret = rchp(chpid); | ||
968 | if ((ret == 0) || (ret == 2)) | 974 | if ((ret == 0) || (ret == 2)) |
969 | /* | 975 | /* |
970 | * rchp either succeeded, or another rchp is already | 976 | * rchp either succeeded, or another rchp is already |
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid) | |||
1048 | do_reipl_asm(*((__u32*)&schid)); | 1054 | do_reipl_asm(*((__u32*)&schid)); |
1049 | } | 1055 | } |
1050 | 1056 | ||
1051 | static struct schib __initdata ipl_schib; | 1057 | int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) |
1052 | |||
1053 | /* | ||
1054 | * ipl_save_parameters gets called very early. It is not allowed to access | ||
1055 | * anything in the bss section at all. The bss section is not cleared yet, | ||
1056 | * but may contain some ipl parameters written by the firmware. | ||
1057 | * These parameters (if present) are copied to 0x2000. | ||
1058 | * To avoid corruption of the ipl parameters, all variables used by this | ||
1059 | * function must reside on the stack or in the data section. | ||
1060 | */ | ||
1061 | void ipl_save_parameters(void) | ||
1062 | { | 1058 | { |
1063 | struct subchannel_id schid; | 1059 | struct subchannel_id schid; |
1064 | unsigned int *ipl_ptr; | 1060 | struct schib schib; |
1065 | void *src, *dst; | ||
1066 | 1061 | ||
1067 | schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; | 1062 | schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; |
1068 | if (!schid.one) | 1063 | if (!schid.one) |
1069 | return; | 1064 | return -ENODEV; |
1070 | if (stsch(schid, &ipl_schib)) | 1065 | if (stsch(schid, &schib)) |
1071 | return; | 1066 | return -ENODEV; |
1072 | if (!ipl_schib.pmcw.dnv) | 1067 | if (!schib.pmcw.dnv) |
1073 | return; | 1068 | return -ENODEV; |
1074 | ipl_devno = ipl_schib.pmcw.dev; | 1069 | iplinfo->devno = schib.pmcw.dev; |
1075 | ipl_flags |= IPL_DEVNO_VALID; | 1070 | iplinfo->is_qdio = schib.pmcw.qf; |
1076 | if (!ipl_schib.pmcw.qf) | 1071 | return 0; |
1077 | return; | ||
1078 | ipl_flags |= IPL_PARMBLOCK_VALID; | ||
1079 | ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; | ||
1080 | src = (void *)(unsigned long)*ipl_ptr; | ||
1081 | dst = (void *)IPL_PARMBLOCK_ORIGIN; | ||
1082 | memmove(dst, src, PAGE_SIZE); | ||
1083 | *ipl_ptr = IPL_PARMBLOCK_ORIGIN; | ||
1084 | } | 1072 | } |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 35154a210357..7446c39951a7 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -1,18 +1,11 @@ | |||
1 | #ifndef S390_CIO_H | 1 | #ifndef S390_CIO_H |
2 | #define S390_CIO_H | 2 | #define S390_CIO_H |
3 | 3 | ||
4 | #include "schid.h" | ||
5 | #include <linux/mutex.h> | 4 | #include <linux/mutex.h> |
6 | 5 | #include <linux/device.h> | |
7 | /* | 6 | #include <asm/chpid.h> |
8 | * where we put the ssd info | 7 | #include "chsc.h" |
9 | */ | 8 | #include "schid.h" |
10 | struct ssd_info { | ||
11 | __u8 valid:1; | ||
12 | __u8 type:7; /* subchannel type */ | ||
13 | __u8 chpid[8]; /* chpids */ | ||
14 | __u16 fla[8]; /* full link addresses */ | ||
15 | } __attribute__ ((packed)); | ||
16 | 9 | ||
17 | /* | 10 | /* |
18 | * path management control word | 11 | * path management control word |
@@ -108,7 +101,7 @@ struct subchannel { | |||
108 | struct schib schib; /* subchannel information block */ | 101 | struct schib schib; /* subchannel information block */ |
109 | struct orb orb; /* operation request block */ | 102 | struct orb orb; /* operation request block */ |
110 | struct ccw1 sense_ccw; /* static ccw for sense command */ | 103 | struct ccw1 sense_ccw; /* static ccw for sense command */ |
111 | struct ssd_info ssd_info; /* subchannel description */ | 104 | struct chsc_ssd_info ssd_info; /* subchannel description */ |
112 | struct device dev; /* entry in device tree */ | 105 | struct device dev; /* entry in device tree */ |
113 | struct css_driver *driver; | 106 | struct css_driver *driver; |
114 | } __attribute__ ((aligned(8))); | 107 | } __attribute__ ((aligned(8))); |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 90b22faabbf7..28abd697be1a 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -476,7 +476,7 @@ struct cmb_area { | |||
476 | }; | 476 | }; |
477 | 477 | ||
478 | static struct cmb_area cmb_area = { | 478 | static struct cmb_area cmb_area = { |
479 | .lock = SPIN_LOCK_UNLOCKED, | 479 | .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock), |
480 | .list = LIST_HEAD_INIT(cmb_area.list), | 480 | .list = LIST_HEAD_INIT(cmb_area.list), |
481 | .num_channels = 1024, | 481 | .num_channels = 1024, |
482 | }; | 482 | }; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fe0ace7aece8..27c6d9e55b23 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -20,8 +20,9 @@ | |||
20 | #include "ioasm.h" | 20 | #include "ioasm.h" |
21 | #include "chsc.h" | 21 | #include "chsc.h" |
22 | #include "device.h" | 22 | #include "device.h" |
23 | #include "idset.h" | ||
24 | #include "chp.h" | ||
23 | 25 | ||
24 | int need_rescan = 0; | ||
25 | int css_init_done = 0; | 26 | int css_init_done = 0; |
26 | static int need_reprobe = 0; | 27 | static int need_reprobe = 0; |
27 | static int max_ssid = 0; | 28 | static int max_ssid = 0; |
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch) | |||
125 | mutex_unlock(&sch->reg_mutex); | 126 | mutex_unlock(&sch->reg_mutex); |
126 | } | 127 | } |
127 | 128 | ||
128 | static int | 129 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
129 | css_register_subchannel(struct subchannel *sch) | 130 | { |
131 | int i; | ||
132 | int mask; | ||
133 | |||
134 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | ||
135 | ssd->path_mask = pmcw->pim; | ||
136 | for (i = 0; i < 8; i++) { | ||
137 | mask = 0x80 >> i; | ||
138 | if (pmcw->pim & mask) { | ||
139 | chp_id_init(&ssd->chpid[i]); | ||
140 | ssd->chpid[i].id = pmcw->chpid[i]; | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static void ssd_register_chpids(struct chsc_ssd_info *ssd) | ||
146 | { | ||
147 | int i; | ||
148 | int mask; | ||
149 | |||
150 | for (i = 0; i < 8; i++) { | ||
151 | mask = 0x80 >> i; | ||
152 | if (ssd->path_mask & mask) | ||
153 | if (!chp_is_registered(ssd->chpid[i])) | ||
154 | chp_new(ssd->chpid[i]); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | void css_update_ssd_info(struct subchannel *sch) | ||
159 | { | ||
160 | int ret; | ||
161 | |||
162 | if (cio_is_console(sch->schid)) { | ||
163 | /* Console is initialized too early for functions requiring | ||
164 | * memory allocation. */ | ||
165 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
166 | } else { | ||
167 | ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); | ||
168 | if (ret) | ||
169 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
170 | ssd_register_chpids(&sch->ssd_info); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | static int css_register_subchannel(struct subchannel *sch) | ||
130 | { | 175 | { |
131 | int ret; | 176 | int ret; |
132 | 177 | ||
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch) | |||
135 | sch->dev.bus = &css_bus_type; | 180 | sch->dev.bus = &css_bus_type; |
136 | sch->dev.release = &css_subchannel_release; | 181 | sch->dev.release = &css_subchannel_release; |
137 | sch->dev.groups = subch_attr_groups; | 182 | sch->dev.groups = subch_attr_groups; |
138 | 183 | css_update_ssd_info(sch); | |
139 | css_get_ssd_info(sch); | ||
140 | |||
141 | /* make it known to the system */ | 184 | /* make it known to the system */ |
142 | ret = css_sch_device_register(sch); | 185 | ret = css_sch_device_register(sch); |
143 | if (ret) { | 186 | if (ret) { |
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
306 | return css_probe_device(schid); | 349 | return css_probe_device(schid); |
307 | } | 350 | } |
308 | 351 | ||
309 | static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | 352 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
310 | { | 353 | { |
311 | struct subchannel *sch; | 354 | struct subchannel *sch; |
312 | int ret; | 355 | int ret; |
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
317 | put_device(&sch->dev); | 360 | put_device(&sch->dev); |
318 | } else | 361 | } else |
319 | ret = css_evaluate_new_subchannel(schid, slow); | 362 | ret = css_evaluate_new_subchannel(schid, slow); |
320 | 363 | if (ret == -EAGAIN) | |
321 | return ret; | 364 | css_schedule_eval(schid); |
322 | } | 365 | } |
323 | 366 | ||
324 | static int | 367 | static struct idset *slow_subchannel_set; |
325 | css_rescan_devices(struct subchannel_id schid, void *data) | 368 | static spinlock_t slow_subchannel_lock; |
369 | |||
370 | static int __init slow_subchannel_init(void) | ||
326 | { | 371 | { |
327 | return css_evaluate_subchannel(schid, 1); | 372 | spin_lock_init(&slow_subchannel_lock); |
373 | slow_subchannel_set = idset_sch_new(); | ||
374 | if (!slow_subchannel_set) { | ||
375 | printk(KERN_WARNING "cio: could not allocate slow subchannel " | ||
376 | "set\n"); | ||
377 | return -ENOMEM; | ||
378 | } | ||
379 | return 0; | ||
328 | } | 380 | } |
329 | 381 | ||
330 | struct slow_subchannel { | 382 | subsys_initcall(slow_subchannel_init); |
331 | struct list_head slow_list; | ||
332 | struct subchannel_id schid; | ||
333 | }; | ||
334 | |||
335 | static LIST_HEAD(slow_subchannels_head); | ||
336 | static DEFINE_SPINLOCK(slow_subchannel_lock); | ||
337 | 383 | ||
338 | static void | 384 | static void css_slow_path_func(struct work_struct *unused) |
339 | css_trigger_slow_path(struct work_struct *unused) | ||
340 | { | 385 | { |
341 | CIO_TRACE_EVENT(4, "slowpath"); | 386 | struct subchannel_id schid; |
342 | |||
343 | if (need_rescan) { | ||
344 | need_rescan = 0; | ||
345 | for_each_subchannel(css_rescan_devices, NULL); | ||
346 | return; | ||
347 | } | ||
348 | 387 | ||
388 | CIO_TRACE_EVENT(4, "slowpath"); | ||
349 | spin_lock_irq(&slow_subchannel_lock); | 389 | spin_lock_irq(&slow_subchannel_lock); |
350 | while (!list_empty(&slow_subchannels_head)) { | 390 | init_subchannel_id(&schid); |
351 | struct slow_subchannel *slow_sch = | 391 | while (idset_sch_get_first(slow_subchannel_set, &schid)) { |
352 | list_entry(slow_subchannels_head.next, | 392 | idset_sch_del(slow_subchannel_set, schid); |
353 | struct slow_subchannel, slow_list); | ||
354 | |||
355 | list_del_init(slow_subchannels_head.next); | ||
356 | spin_unlock_irq(&slow_subchannel_lock); | 393 | spin_unlock_irq(&slow_subchannel_lock); |
357 | css_evaluate_subchannel(slow_sch->schid, 1); | 394 | css_evaluate_subchannel(schid, 1); |
358 | spin_lock_irq(&slow_subchannel_lock); | 395 | spin_lock_irq(&slow_subchannel_lock); |
359 | kfree(slow_sch); | ||
360 | } | 396 | } |
361 | spin_unlock_irq(&slow_subchannel_lock); | 397 | spin_unlock_irq(&slow_subchannel_lock); |
362 | } | 398 | } |
363 | 399 | ||
364 | DECLARE_WORK(slow_path_work, css_trigger_slow_path); | 400 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
365 | struct workqueue_struct *slow_path_wq; | 401 | struct workqueue_struct *slow_path_wq; |
366 | 402 | ||
403 | void css_schedule_eval(struct subchannel_id schid) | ||
404 | { | ||
405 | unsigned long flags; | ||
406 | |||
407 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
408 | idset_sch_add(slow_subchannel_set, schid); | ||
409 | queue_work(slow_path_wq, &slow_path_work); | ||
410 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
411 | } | ||
412 | |||
413 | void css_schedule_eval_all(void) | ||
414 | { | ||
415 | unsigned long flags; | ||
416 | |||
417 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
418 | idset_fill(slow_subchannel_set); | ||
419 | queue_work(slow_path_wq, &slow_path_work); | ||
420 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
421 | } | ||
422 | |||
367 | /* Reprobe subchannel if unregistered. */ | 423 | /* Reprobe subchannel if unregistered. */ |
368 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | 424 | static int reprobe_subchannel(struct subchannel_id schid, void *data) |
369 | { | 425 | { |
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void) | |||
426 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 482 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
427 | 483 | ||
428 | /* | 484 | /* |
429 | * Rescan for new devices. FIXME: This is slow. | ||
430 | * This function is called when we have lost CRWs due to overflows and we have | ||
431 | * to do subchannel housekeeping. | ||
432 | */ | ||
433 | void | ||
434 | css_reiterate_subchannels(void) | ||
435 | { | ||
436 | css_clear_subchannel_slow_list(); | ||
437 | need_rescan = 1; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Called from the machine check handler for subchannel report words. | 485 | * Called from the machine check handler for subchannel report words. |
442 | */ | 486 | */ |
443 | int | 487 | void css_process_crw(int rsid1, int rsid2) |
444 | css_process_crw(int rsid1, int rsid2) | ||
445 | { | 488 | { |
446 | int ret; | ||
447 | struct subchannel_id mchk_schid; | 489 | struct subchannel_id mchk_schid; |
448 | 490 | ||
449 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", | 491 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", |
450 | rsid1, rsid2); | 492 | rsid1, rsid2); |
451 | |||
452 | if (need_rescan) | ||
453 | /* We need to iterate all subchannels anyway. */ | ||
454 | return -EAGAIN; | ||
455 | |||
456 | init_subchannel_id(&mchk_schid); | 493 | init_subchannel_id(&mchk_schid); |
457 | mchk_schid.sch_no = rsid1; | 494 | mchk_schid.sch_no = rsid1; |
458 | if (rsid2 != 0) | 495 | if (rsid2 != 0) |
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2) | |||
463 | * use stsch() to find out if the subchannel in question has come | 500 | * use stsch() to find out if the subchannel in question has come |
464 | * or gone. | 501 | * or gone. |
465 | */ | 502 | */ |
466 | ret = css_evaluate_subchannel(mchk_schid, 0); | 503 | css_evaluate_subchannel(mchk_schid, 0); |
467 | if (ret == -EAGAIN) { | ||
468 | if (css_enqueue_subchannel_slow(mchk_schid)) { | ||
469 | css_clear_subchannel_slow_list(); | ||
470 | need_rescan = 1; | ||
471 | } | ||
472 | } | ||
473 | return ret; | ||
474 | } | 504 | } |
475 | 505 | ||
476 | static int __init | 506 | static int __init |
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = { | |||
745 | 775 | ||
746 | subsys_initcall(init_channel_subsystem); | 776 | subsys_initcall(init_channel_subsystem); |
747 | 777 | ||
748 | int | ||
749 | css_enqueue_subchannel_slow(struct subchannel_id schid) | ||
750 | { | ||
751 | struct slow_subchannel *new_slow_sch; | ||
752 | unsigned long flags; | ||
753 | |||
754 | new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); | ||
755 | if (!new_slow_sch) | ||
756 | return -ENOMEM; | ||
757 | new_slow_sch->schid = schid; | ||
758 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
759 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); | ||
760 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
761 | return 0; | ||
762 | } | ||
763 | |||
764 | void | ||
765 | css_clear_subchannel_slow_list(void) | ||
766 | { | ||
767 | unsigned long flags; | ||
768 | |||
769 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
770 | while (!list_empty(&slow_subchannels_head)) { | ||
771 | struct slow_subchannel *slow_sch = | ||
772 | list_entry(slow_subchannels_head.next, | ||
773 | struct slow_subchannel, slow_list); | ||
774 | |||
775 | list_del_init(slow_subchannels_head.next); | ||
776 | kfree(slow_sch); | ||
777 | } | ||
778 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
779 | } | ||
780 | |||
781 | |||
782 | |||
783 | int | ||
784 | css_slow_subchannels_exist(void) | ||
785 | { | ||
786 | return (!list_empty(&slow_subchannels_head)); | ||
787 | } | ||
788 | |||
789 | MODULE_LICENSE("GPL"); | 778 | MODULE_LICENSE("GPL"); |
790 | EXPORT_SYMBOL(css_bus_type); | 779 | EXPORT_SYMBOL(css_bus_type); |
791 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | 780 | EXPORT_SYMBOL_GPL(css_characteristics_avail); |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index ca2bab932a8a..71fcfdc42800 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -4,8 +4,11 @@ | |||
4 | #include <linux/mutex.h> | 4 | #include <linux/mutex.h> |
5 | #include <linux/wait.h> | 5 | #include <linux/wait.h> |
6 | #include <linux/workqueue.h> | 6 | #include <linux/workqueue.h> |
7 | #include <linux/device.h> | ||
8 | #include <linux/types.h> | ||
7 | 9 | ||
8 | #include <asm/cio.h> | 10 | #include <asm/cio.h> |
11 | #include <asm/chpid.h> | ||
9 | 12 | ||
10 | #include "schid.h" | 13 | #include "schid.h" |
11 | 14 | ||
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *); | |||
143 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 146 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); |
144 | extern int css_init_done; | 147 | extern int css_init_done; |
145 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 148 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
146 | extern int css_process_crw(int, int); | 149 | extern void css_process_crw(int, int); |
147 | extern void css_reiterate_subchannels(void); | 150 | extern void css_reiterate_subchannels(void); |
151 | void css_update_ssd_info(struct subchannel *sch); | ||
148 | 152 | ||
149 | #define __MAX_SUBCHANNEL 65535 | 153 | #define __MAX_SUBCHANNEL 65535 |
150 | #define __MAX_SSID 3 | 154 | #define __MAX_SSID 3 |
151 | #define __MAX_CHPID 255 | ||
152 | #define __MAX_CSSID 0 | ||
153 | 155 | ||
154 | struct channel_subsystem { | 156 | struct channel_subsystem { |
155 | u8 cssid; | 157 | u8 cssid; |
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch); | |||
185 | void device_kill_pending_timer(struct subchannel *); | 187 | void device_kill_pending_timer(struct subchannel *); |
186 | 188 | ||
187 | /* Helper functions to build lists for the slow path. */ | 189 | /* Helper functions to build lists for the slow path. */ |
188 | extern int css_enqueue_subchannel_slow(struct subchannel_id schid); | 190 | void css_schedule_eval(struct subchannel_id schid); |
189 | void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); | 191 | void css_schedule_eval_all(void); |
190 | void css_clear_subchannel_slow_list(void); | ||
191 | int css_slow_subchannels_exist(void); | ||
192 | extern int need_rescan; | ||
193 | 192 | ||
194 | int sch_is_pseudo_sch(struct subchannel *); | 193 | int sch_is_pseudo_sch(struct subchannel *); |
195 | 194 | ||
196 | extern struct workqueue_struct *slow_path_wq; | 195 | extern struct workqueue_struct *slow_path_wq; |
197 | extern struct work_struct slow_path_work; | ||
198 | 196 | ||
199 | int subchannel_add_files (struct device *); | 197 | int subchannel_add_files (struct device *); |
200 | extern struct attribute_group *subch_attr_groups[]; | 198 | extern struct attribute_group *subch_attr_groups[]; |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e322111fb369..03355902c582 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv) | |||
56 | /* Store modalias string delimited by prefix/suffix string into buffer with | 56 | /* Store modalias string delimited by prefix/suffix string into buffer with |
57 | * specified size. Return length of resulting string (excluding trailing '\0') | 57 | * specified size. Return length of resulting string (excluding trailing '\0') |
58 | * even if string doesn't fit buffer (snprintf semantics). */ | 58 | * even if string doesn't fit buffer (snprintf semantics). */ |
59 | static int snprint_alias(char *buf, size_t size, const char *prefix, | 59 | static int snprint_alias(char *buf, size_t size, |
60 | struct ccw_device_id *id, const char *suffix) | 60 | struct ccw_device_id *id, const char *suffix) |
61 | { | 61 | { |
62 | int len; | 62 | int len; |
63 | 63 | ||
64 | len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, | 64 | len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); |
65 | id->cu_model); | ||
66 | if (len > size) | 65 | if (len > size) |
67 | return len; | 66 | return len; |
68 | buf += len; | 67 | buf += len; |
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp, | |||
85 | struct ccw_device *cdev = to_ccwdev(dev); | 84 | struct ccw_device *cdev = to_ccwdev(dev); |
86 | struct ccw_device_id *id = &(cdev->id); | 85 | struct ccw_device_id *id = &(cdev->id); |
87 | int i = 0; | 86 | int i = 0; |
88 | int len; | 87 | int len = 0; |
88 | int ret; | ||
89 | char modalias_buf[30]; | ||
89 | 90 | ||
90 | /* CU_TYPE= */ | 91 | /* CU_TYPE= */ |
91 | len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; | 92 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
92 | if (len > buffer_size || i >= num_envp) | 93 | "CU_TYPE=%04X", id->cu_type); |
93 | return -ENOMEM; | 94 | if (ret) |
94 | envp[i++] = buffer; | 95 | return ret; |
95 | buffer += len; | ||
96 | buffer_size -= len; | ||
97 | 96 | ||
98 | /* CU_MODEL= */ | 97 | /* CU_MODEL= */ |
99 | len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; | 98 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
100 | if (len > buffer_size || i >= num_envp) | 99 | "CU_MODEL=%02X", id->cu_model); |
101 | return -ENOMEM; | 100 | if (ret) |
102 | envp[i++] = buffer; | 101 | return ret; |
103 | buffer += len; | ||
104 | buffer_size -= len; | ||
105 | 102 | ||
106 | /* The next two can be zero, that's ok for us */ | 103 | /* The next two can be zero, that's ok for us */ |
107 | /* DEV_TYPE= */ | 104 | /* DEV_TYPE= */ |
108 | len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; | 105 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
109 | if (len > buffer_size || i >= num_envp) | 106 | "DEV_TYPE=%04X", id->dev_type); |
110 | return -ENOMEM; | 107 | if (ret) |
111 | envp[i++] = buffer; | 108 | return ret; |
112 | buffer += len; | ||
113 | buffer_size -= len; | ||
114 | 109 | ||
115 | /* DEV_MODEL= */ | 110 | /* DEV_MODEL= */ |
116 | len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", | 111 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
117 | (unsigned char) id->dev_model) + 1; | 112 | "DEV_MODEL=%02X", id->dev_model); |
118 | if (len > buffer_size || i >= num_envp) | 113 | if (ret) |
119 | return -ENOMEM; | 114 | return ret; |
120 | envp[i++] = buffer; | ||
121 | buffer += len; | ||
122 | buffer_size -= len; | ||
123 | 115 | ||
124 | /* MODALIAS= */ | 116 | /* MODALIAS= */ |
125 | len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; | 117 | snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); |
126 | if (len > buffer_size || i >= num_envp) | 118 | ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, |
127 | return -ENOMEM; | 119 | "MODALIAS=%s", modalias_buf); |
128 | envp[i++] = buffer; | 120 | return ret; |
129 | buffer += len; | ||
130 | buffer_size -= len; | ||
131 | |||
132 | envp[i] = NULL; | ||
133 | |||
134 | return 0; | ||
135 | } | 121 | } |
136 | 122 | ||
137 | struct bus_type ccw_bus_type; | 123 | struct bus_type ccw_bus_type; |
@@ -230,12 +216,18 @@ static ssize_t | |||
230 | chpids_show (struct device * dev, struct device_attribute *attr, char * buf) | 216 | chpids_show (struct device * dev, struct device_attribute *attr, char * buf) |
231 | { | 217 | { |
232 | struct subchannel *sch = to_subchannel(dev); | 218 | struct subchannel *sch = to_subchannel(dev); |
233 | struct ssd_info *ssd = &sch->ssd_info; | 219 | struct chsc_ssd_info *ssd = &sch->ssd_info; |
234 | ssize_t ret = 0; | 220 | ssize_t ret = 0; |
235 | int chp; | 221 | int chp; |
222 | int mask; | ||
236 | 223 | ||
237 | for (chp = 0; chp < 8; chp++) | 224 | for (chp = 0; chp < 8; chp++) { |
238 | ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); | 225 | mask = 0x80 >> chp; |
226 | if (ssd->path_mask & mask) | ||
227 | ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); | ||
228 | else | ||
229 | ret += sprintf(buf + ret, "00 "); | ||
230 | } | ||
239 | ret += sprintf (buf+ret, "\n"); | 231 | ret += sprintf (buf+ret, "\n"); |
240 | return min((ssize_t)PAGE_SIZE, ret); | 232 | return min((ssize_t)PAGE_SIZE, ret); |
241 | } | 233 | } |
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
280 | struct ccw_device_id *id = &(cdev->id); | 272 | struct ccw_device_id *id = &(cdev->id); |
281 | int len; | 273 | int len; |
282 | 274 | ||
283 | len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; | 275 | len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1; |
284 | 276 | ||
285 | return len > PAGE_SIZE ? PAGE_SIZE : len; | 277 | return len > PAGE_SIZE ? PAGE_SIZE : len; |
286 | } | 278 | } |
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev) | |||
298 | return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); | 290 | return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); |
299 | } | 291 | } |
300 | 292 | ||
301 | static void ccw_device_unregister(struct work_struct *work) | 293 | static void ccw_device_unregister(struct ccw_device *cdev) |
302 | { | 294 | { |
303 | struct ccw_device_private *priv; | ||
304 | struct ccw_device *cdev; | ||
305 | |||
306 | priv = container_of(work, struct ccw_device_private, kick_work); | ||
307 | cdev = priv->cdev; | ||
308 | if (test_and_clear_bit(1, &cdev->private->registered)) | 295 | if (test_and_clear_bit(1, &cdev->private->registered)) |
309 | device_unregister(&cdev->dev); | 296 | device_del(&cdev->dev); |
310 | put_device(&cdev->dev); | ||
311 | } | 297 | } |
312 | 298 | ||
313 | static void | 299 | static void |
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) | |||
324 | spin_lock_irqsave(cdev->ccwlock, flags); | 310 | spin_lock_irqsave(cdev->ccwlock, flags); |
325 | cdev->private->state = DEV_STATE_NOT_OPER; | 311 | cdev->private->state = DEV_STATE_NOT_OPER; |
326 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 312 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
327 | if (get_device(&cdev->dev)) { | 313 | ccw_device_unregister(cdev); |
328 | PREPARE_WORK(&cdev->private->kick_work, | 314 | put_device(&cdev->dev); |
329 | ccw_device_unregister); | ||
330 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
331 | } | ||
332 | return ; | 315 | return ; |
333 | } | 316 | } |
334 | sch = to_subchannel(cdev->dev.parent); | 317 | sch = to_subchannel(cdev->dev.parent); |
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev) | |||
413 | return (ret == 0) ? -ENODEV : ret; | 396 | return (ret == 0) ? -ENODEV : ret; |
414 | } | 397 | } |
415 | 398 | ||
416 | static ssize_t | 399 | static void online_store_handle_offline(struct ccw_device *cdev) |
417 | online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 400 | { |
401 | if (cdev->private->state == DEV_STATE_DISCONNECTED) | ||
402 | ccw_device_remove_disconnected(cdev); | ||
403 | else if (cdev->drv && cdev->drv->set_offline) | ||
404 | ccw_device_set_offline(cdev); | ||
405 | } | ||
406 | |||
407 | static int online_store_recog_and_online(struct ccw_device *cdev) | ||
408 | { | ||
409 | int ret; | ||
410 | |||
411 | /* Do device recognition, if needed. */ | ||
412 | if (cdev->id.cu_type == 0) { | ||
413 | ret = ccw_device_recognition(cdev); | ||
414 | if (ret) { | ||
415 | printk(KERN_WARNING"Couldn't start recognition " | ||
416 | "for device %s (ret=%d)\n", | ||
417 | cdev->dev.bus_id, ret); | ||
418 | return ret; | ||
419 | } | ||
420 | wait_event(cdev->private->wait_q, | ||
421 | cdev->private->flags.recog_done); | ||
422 | } | ||
423 | if (cdev->drv && cdev->drv->set_online) | ||
424 | ccw_device_set_online(cdev); | ||
425 | return 0; | ||
426 | } | ||
427 | static void online_store_handle_online(struct ccw_device *cdev, int force) | ||
428 | { | ||
429 | int ret; | ||
430 | |||
431 | ret = online_store_recog_and_online(cdev); | ||
432 | if (ret) | ||
433 | return; | ||
434 | if (force && cdev->private->state == DEV_STATE_BOXED) { | ||
435 | ret = ccw_device_stlck(cdev); | ||
436 | if (ret) { | ||
437 | printk(KERN_WARNING"ccw_device_stlck for device %s " | ||
438 | "returned %d!\n", cdev->dev.bus_id, ret); | ||
439 | return; | ||
440 | } | ||
441 | if (cdev->id.cu_type == 0) | ||
442 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
443 | online_store_recog_and_online(cdev); | ||
444 | } | ||
445 | |||
446 | } | ||
447 | |||
448 | static ssize_t online_store (struct device *dev, struct device_attribute *attr, | ||
449 | const char *buf, size_t count) | ||
418 | { | 450 | { |
419 | struct ccw_device *cdev = to_ccwdev(dev); | 451 | struct ccw_device *cdev = to_ccwdev(dev); |
420 | int i, force, ret; | 452 | int i, force; |
421 | char *tmp; | 453 | char *tmp; |
422 | 454 | ||
423 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) | 455 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) |
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf | |||
434 | force = 0; | 466 | force = 0; |
435 | i = simple_strtoul(buf, &tmp, 16); | 467 | i = simple_strtoul(buf, &tmp, 16); |
436 | } | 468 | } |
437 | if (i == 1) { | 469 | |
438 | /* Do device recognition, if needed. */ | 470 | switch (i) { |
439 | if (cdev->id.cu_type == 0) { | 471 | case 0: |
440 | ret = ccw_device_recognition(cdev); | 472 | online_store_handle_offline(cdev); |
441 | if (ret) { | 473 | break; |
442 | printk(KERN_WARNING"Couldn't start recognition " | 474 | case 1: |
443 | "for device %s (ret=%d)\n", | 475 | online_store_handle_online(cdev, force); |
444 | cdev->dev.bus_id, ret); | 476 | break; |
445 | goto out; | 477 | default: |
446 | } | 478 | count = -EINVAL; |
447 | wait_event(cdev->private->wait_q, | ||
448 | cdev->private->flags.recog_done); | ||
449 | } | ||
450 | if (cdev->drv && cdev->drv->set_online) | ||
451 | ccw_device_set_online(cdev); | ||
452 | } else if (i == 0) { | ||
453 | if (cdev->private->state == DEV_STATE_DISCONNECTED) | ||
454 | ccw_device_remove_disconnected(cdev); | ||
455 | else if (cdev->drv && cdev->drv->set_offline) | ||
456 | ccw_device_set_offline(cdev); | ||
457 | } | ||
458 | if (force && cdev->private->state == DEV_STATE_BOXED) { | ||
459 | ret = ccw_device_stlck(cdev); | ||
460 | if (ret) { | ||
461 | printk(KERN_WARNING"ccw_device_stlck for device %s " | ||
462 | "returned %d!\n", cdev->dev.bus_id, ret); | ||
463 | goto out; | ||
464 | } | ||
465 | /* Do device recognition, if needed. */ | ||
466 | if (cdev->id.cu_type == 0) { | ||
467 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
468 | ret = ccw_device_recognition(cdev); | ||
469 | if (ret) { | ||
470 | printk(KERN_WARNING"Couldn't start recognition " | ||
471 | "for device %s (ret=%d)\n", | ||
472 | cdev->dev.bus_id, ret); | ||
473 | goto out; | ||
474 | } | ||
475 | wait_event(cdev->private->wait_q, | ||
476 | cdev->private->flags.recog_done); | ||
477 | } | ||
478 | if (cdev->drv && cdev->drv->set_online) | ||
479 | ccw_device_set_online(cdev); | ||
480 | } | 479 | } |
481 | out: | ||
482 | if (cdev->drv) | 480 | if (cdev->drv) |
483 | module_put(cdev->drv->owner); | 481 | module_put(cdev->drv->owner); |
484 | atomic_set(&cdev->private->onoff, 0); | 482 | atomic_set(&cdev->private->onoff, 0); |
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = { | |||
548 | .attrs = ccwdev_attrs, | 546 | .attrs = ccwdev_attrs, |
549 | }; | 547 | }; |
550 | 548 | ||
551 | static int | 549 | struct attribute_group *ccwdev_attr_groups[] = { |
552 | device_add_files (struct device *dev) | 550 | &ccwdev_attr_group, |
553 | { | 551 | NULL, |
554 | return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); | 552 | }; |
555 | } | ||
556 | |||
557 | static void | ||
558 | device_remove_files(struct device *dev) | ||
559 | { | ||
560 | sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); | ||
561 | } | ||
562 | 553 | ||
563 | /* this is a simple abstraction for device_register that sets the | 554 | /* this is a simple abstraction for device_register that sets the |
564 | * correct bus type and adds the bus specific files */ | 555 | * correct bus type and adds the bus specific files */ |
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev) | |||
573 | return ret; | 564 | return ret; |
574 | 565 | ||
575 | set_bit(1, &cdev->private->registered); | 566 | set_bit(1, &cdev->private->registered); |
576 | if ((ret = device_add_files(dev))) { | ||
577 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
578 | device_del(dev); | ||
579 | } | ||
580 | return ret; | 567 | return ret; |
581 | } | 568 | } |
582 | 569 | ||
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work) | |||
648 | return; | 635 | return; |
649 | } | 636 | } |
650 | set_bit(1, &cdev->private->registered); | 637 | set_bit(1, &cdev->private->registered); |
651 | if (device_add_files(&cdev->dev)) { | ||
652 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
653 | device_unregister(&cdev->dev); | ||
654 | } | ||
655 | } | 638 | } |
656 | 639 | ||
657 | void ccw_device_do_unreg_rereg(struct work_struct *work) | 640 | void ccw_device_do_unreg_rereg(struct work_struct *work) |
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work) | |||
664 | cdev = priv->cdev; | 647 | cdev = priv->cdev; |
665 | sch = to_subchannel(cdev->dev.parent); | 648 | sch = to_subchannel(cdev->dev.parent); |
666 | 649 | ||
667 | device_remove_files(&cdev->dev); | 650 | ccw_device_unregister(cdev); |
668 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
669 | device_del(&cdev->dev); | ||
670 | PREPARE_WORK(&cdev->private->kick_work, | 651 | PREPARE_WORK(&cdev->private->kick_work, |
671 | ccw_device_add_changed); | 652 | ccw_device_add_changed); |
672 | queue_work(ccw_device_work, &cdev->private->kick_work); | 653 | queue_work(ccw_device_work, &cdev->private->kick_work); |
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
705 | cdev->dev.parent = &sch->dev; | 686 | cdev->dev.parent = &sch->dev; |
706 | cdev->dev.release = ccw_device_release; | 687 | cdev->dev.release = ccw_device_release; |
707 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); | 688 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); |
689 | cdev->dev.groups = ccwdev_attr_groups; | ||
708 | /* Do first half of device_register. */ | 690 | /* Do first half of device_register. */ |
709 | device_initialize(&cdev->dev); | 691 | device_initialize(&cdev->dev); |
710 | if (!get_device(&sch->dev)) { | 692 | if (!get_device(&sch->dev)) { |
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *); | |||
736 | static void sch_attach_device(struct subchannel *sch, | 718 | static void sch_attach_device(struct subchannel *sch, |
737 | struct ccw_device *cdev) | 719 | struct ccw_device *cdev) |
738 | { | 720 | { |
721 | css_update_ssd_info(sch); | ||
739 | spin_lock_irq(sch->lock); | 722 | spin_lock_irq(sch->lock); |
740 | sch->dev.driver_data = cdev; | 723 | sch->dev.driver_data = cdev; |
741 | cdev->private->schid = sch->schid; | 724 | cdev->private->schid = sch->schid; |
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work) | |||
871 | priv = container_of(work, struct ccw_device_private, kick_work); | 854 | priv = container_of(work, struct ccw_device_private, kick_work); |
872 | cdev = priv->cdev; | 855 | cdev = priv->cdev; |
873 | sch = to_subchannel(cdev->dev.parent); | 856 | sch = to_subchannel(cdev->dev.parent); |
874 | 857 | css_update_ssd_info(sch); | |
875 | /* | 858 | /* |
876 | * io_subchannel_register() will also be called after device | 859 | * io_subchannel_register() will also be called after device |
877 | * recognition has been done for a boxed device (which will already | 860 | * recognition has been done for a boxed device (which will already |
@@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch) | |||
1133 | sch->dev.driver_data = NULL; | 1116 | sch->dev.driver_data = NULL; |
1134 | cdev->private->state = DEV_STATE_NOT_OPER; | 1117 | cdev->private->state = DEV_STATE_NOT_OPER; |
1135 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 1118 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
1136 | /* | 1119 | ccw_device_unregister(cdev); |
1137 | * Put unregistration on workqueue to avoid livelocks on the css bus | 1120 | put_device(&cdev->dev); |
1138 | * semaphore. | ||
1139 | */ | ||
1140 | if (get_device(&cdev->dev)) { | ||
1141 | PREPARE_WORK(&cdev->private->kick_work, | ||
1142 | ccw_device_unregister); | ||
1143 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
1144 | } | ||
1145 | return 0; | 1121 | return 0; |
1146 | } | 1122 | } |
1147 | 1123 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 089a3ddd6265..898ec3b2bebb 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/ccwdev.h> | 16 | #include <asm/ccwdev.h> |
17 | #include <asm/cio.h> | 17 | #include <asm/cio.h> |
18 | #include <asm/chpid.h> | ||
18 | 19 | ||
19 | #include "cio.h" | 20 | #include "cio.h" |
20 | #include "cio_debug.h" | 21 | #include "cio_debug.h" |
@@ -22,6 +23,7 @@ | |||
22 | #include "device.h" | 23 | #include "device.h" |
23 | #include "chsc.h" | 24 | #include "chsc.h" |
24 | #include "ioasm.h" | 25 | #include "ioasm.h" |
26 | #include "chp.h" | ||
25 | 27 | ||
26 | int | 28 | int |
27 | device_is_online(struct subchannel *sch) | 29 | device_is_online(struct subchannel *sch) |
@@ -210,14 +212,18 @@ static void | |||
210 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) | 212 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) |
211 | { | 213 | { |
212 | int mask, i; | 214 | int mask, i; |
215 | struct chp_id chpid; | ||
213 | 216 | ||
217 | chp_id_init(&chpid); | ||
214 | for (i = 0; i<8; i++) { | 218 | for (i = 0; i<8; i++) { |
215 | mask = 0x80 >> i; | 219 | mask = 0x80 >> i; |
216 | if (!(sch->lpm & mask)) | 220 | if (!(sch->lpm & mask)) |
217 | continue; | 221 | continue; |
218 | if (old_lpm & mask) | 222 | if (old_lpm & mask) |
219 | continue; | 223 | continue; |
220 | chpid_is_actually_online(sch->schib.pmcw.chpid[i]); | 224 | chpid.id = sch->schib.pmcw.chpid[i]; |
225 | if (!chp_is_registered(chpid)) | ||
226 | css_schedule_eval_all(); | ||
221 | } | 227 | } |
222 | } | 228 | } |
223 | 229 | ||
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 7c7775aae38a..16f59fcb66b1 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -16,12 +16,14 @@ | |||
16 | 16 | ||
17 | #include <asm/ccwdev.h> | 17 | #include <asm/ccwdev.h> |
18 | #include <asm/idals.h> | 18 | #include <asm/idals.h> |
19 | #include <asm/chpid.h> | ||
19 | 20 | ||
20 | #include "cio.h" | 21 | #include "cio.h" |
21 | #include "cio_debug.h" | 22 | #include "cio_debug.h" |
22 | #include "css.h" | 23 | #include "css.h" |
23 | #include "chsc.h" | 24 | #include "chsc.h" |
24 | #include "device.h" | 25 | #include "device.h" |
26 | #include "chp.h" | ||
25 | 27 | ||
26 | int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) | 28 | int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) |
27 | { | 29 | { |
@@ -606,9 +608,12 @@ void * | |||
606 | ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) | 608 | ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) |
607 | { | 609 | { |
608 | struct subchannel *sch; | 610 | struct subchannel *sch; |
611 | struct chp_id chpid; | ||
609 | 612 | ||
610 | sch = to_subchannel(cdev->dev.parent); | 613 | sch = to_subchannel(cdev->dev.parent); |
611 | return chsc_get_chp_desc(sch, chp_no); | 614 | chp_id_init(&chpid); |
615 | chpid.id = sch->schib.pmcw.chpid[chp_no]; | ||
616 | return chp_get_chp_desc(chpid); | ||
612 | } | 617 | } |
613 | 618 | ||
614 | // FIXME: these have to go: | 619 | // FIXME: these have to go: |
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c new file mode 100644 index 000000000000..16ea828e99f7 --- /dev/null +++ b/drivers/s390/cio/idset.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/idset.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/slab.h> | ||
9 | #include <asm/bitops.h> | ||
10 | #include "idset.h" | ||
11 | #include "css.h" | ||
12 | |||
13 | struct idset { | ||
14 | int num_ssid; | ||
15 | int num_id; | ||
16 | unsigned long bitmap[0]; | ||
17 | }; | ||
18 | |||
19 | static inline unsigned long bitmap_size(int num_ssid, int num_id) | ||
20 | { | ||
21 | return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); | ||
22 | } | ||
23 | |||
24 | static struct idset *idset_new(int num_ssid, int num_id) | ||
25 | { | ||
26 | struct idset *set; | ||
27 | |||
28 | set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id), | ||
29 | GFP_KERNEL); | ||
30 | if (set) { | ||
31 | set->num_ssid = num_ssid; | ||
32 | set->num_id = num_id; | ||
33 | } | ||
34 | return set; | ||
35 | } | ||
36 | |||
37 | void idset_free(struct idset *set) | ||
38 | { | ||
39 | kfree(set); | ||
40 | } | ||
41 | |||
42 | void idset_clear(struct idset *set) | ||
43 | { | ||
44 | memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id)); | ||
45 | } | ||
46 | |||
47 | void idset_fill(struct idset *set) | ||
48 | { | ||
49 | memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); | ||
50 | } | ||
51 | |||
52 | static inline void idset_add(struct idset *set, int ssid, int id) | ||
53 | { | ||
54 | set_bit(ssid * set->num_id + id, set->bitmap); | ||
55 | } | ||
56 | |||
57 | static inline void idset_del(struct idset *set, int ssid, int id) | ||
58 | { | ||
59 | clear_bit(ssid * set->num_id + id, set->bitmap); | ||
60 | } | ||
61 | |||
62 | static inline int idset_contains(struct idset *set, int ssid, int id) | ||
63 | { | ||
64 | return test_bit(ssid * set->num_id + id, set->bitmap); | ||
65 | } | ||
66 | |||
67 | static inline int idset_get_first(struct idset *set, int *ssid, int *id) | ||
68 | { | ||
69 | int bitnum; | ||
70 | |||
71 | bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); | ||
72 | if (bitnum >= set->num_ssid * set->num_id) | ||
73 | return 0; | ||
74 | *ssid = bitnum / set->num_id; | ||
75 | *id = bitnum % set->num_id; | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | struct idset *idset_sch_new(void) | ||
80 | { | ||
81 | return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1); | ||
82 | } | ||
83 | |||
84 | void idset_sch_add(struct idset *set, struct subchannel_id schid) | ||
85 | { | ||
86 | idset_add(set, schid.ssid, schid.sch_no); | ||
87 | } | ||
88 | |||
89 | void idset_sch_del(struct idset *set, struct subchannel_id schid) | ||
90 | { | ||
91 | idset_del(set, schid.ssid, schid.sch_no); | ||
92 | } | ||
93 | |||
94 | int idset_sch_contains(struct idset *set, struct subchannel_id schid) | ||
95 | { | ||
96 | return idset_contains(set, schid.ssid, schid.sch_no); | ||
97 | } | ||
98 | |||
99 | int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) | ||
100 | { | ||
101 | int ssid = 0; | ||
102 | int id = 0; | ||
103 | int rc; | ||
104 | |||
105 | rc = idset_get_first(set, &ssid, &id); | ||
106 | if (rc) { | ||
107 | init_subchannel_id(schid); | ||
108 | schid->ssid = ssid; | ||
109 | schid->sch_no = id; | ||
110 | } | ||
111 | return rc; | ||
112 | } | ||
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h new file mode 100644 index 000000000000..144466ab8c15 --- /dev/null +++ b/drivers/s390/cio/idset.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/idset.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef S390_IDSET_H | ||
9 | #define S390_IDSET_H S390_IDSET_H | ||
10 | |||
11 | #include "schid.h" | ||
12 | |||
13 | struct idset; | ||
14 | |||
15 | void idset_free(struct idset *set); | ||
16 | void idset_clear(struct idset *set); | ||
17 | void idset_fill(struct idset *set); | ||
18 | |||
19 | struct idset *idset_sch_new(void); | ||
20 | void idset_sch_add(struct idset *set, struct subchannel_id id); | ||
21 | void idset_sch_del(struct idset *set, struct subchannel_id id); | ||
22 | int idset_sch_contains(struct idset *set, struct subchannel_id id); | ||
23 | int idset_sch_get_first(struct idset *set, struct subchannel_id *id); | ||
24 | |||
25 | #endif /* S390_IDSET_H */ | ||
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index ad6d82940069..7153dd959082 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef S390_CIO_IOASM_H | 1 | #ifndef S390_CIO_IOASM_H |
2 | #define S390_CIO_IOASM_H | 2 | #define S390_CIO_IOASM_H |
3 | 3 | ||
4 | #include <asm/chpid.h> | ||
4 | #include "schid.h" | 5 | #include "schid.h" |
5 | 6 | ||
6 | /* | 7 | /* |
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area) | |||
189 | return cc; | 190 | return cc; |
190 | } | 191 | } |
191 | 192 | ||
192 | static inline int rchp(int chpid) | 193 | static inline int rchp(struct chp_id chpid) |
193 | { | 194 | { |
194 | register unsigned int reg1 asm ("1") = chpid; | 195 | register struct chp_id reg1 asm ("1") = chpid; |
195 | int ccode; | 196 | int ccode; |
196 | 197 | ||
197 | asm volatile( | 198 | asm volatile( |