aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/blacklist.c35
-rw-r--r--drivers/s390/cio/ccwgroup.c17
-rw-r--r--drivers/s390/cio/chsc.c6
-rw-r--r--drivers/s390/cio/cmf.c623
-rw-r--r--drivers/s390/cio/css.c63
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device.h10
-rw-r--r--drivers/s390/cio/device_fsm.c20
-rw-r--r--drivers/s390/cio/device_ops.c10
9 files changed, 563 insertions, 225 deletions
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 0960bef7b199..15b895496a45 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -224,39 +224,6 @@ is_blacklisted (int ssid, int devno)
224} 224}
225 225
226#ifdef CONFIG_PROC_FS 226#ifdef CONFIG_PROC_FS
227static int
228__s390_redo_validation(struct subchannel_id schid, void *data)
229{
230 int ret;
231 struct subchannel *sch;
232
233 sch = get_subchannel_by_schid(schid);
234 if (sch) {
235 /* Already known. */
236 put_device(&sch->dev);
237 return 0;
238 }
239 ret = css_probe_device(schid);
240 if (ret == -ENXIO)
241 return ret; /* We're through. */
242 if (ret == -ENOMEM)
243 /* Stop validation for now. Bad, but no need for a panic. */
244 return ret;
245 return 0;
246}
247
248/*
249 * Function: s390_redo_validation
250 * Look for no longer blacklisted devices
251 * FIXME: there must be a better way to do this */
252static inline void
253s390_redo_validation (void)
254{
255 CIO_TRACE_EVENT (0, "redoval");
256
257 for_each_subchannel(__s390_redo_validation, NULL);
258}
259
260/* 227/*
261 * Function: blacklist_parse_proc_parameters 228 * Function: blacklist_parse_proc_parameters
262 * parse the stuff which is piped to /proc/cio_ignore 229 * parse the stuff which is piped to /proc/cio_ignore
@@ -281,7 +248,7 @@ blacklist_parse_proc_parameters (char *buf)
281 return; 248 return;
282 } 249 }
283 250
284 s390_redo_validation (); 251 css_schedule_reprobe();
285} 252}
286 253
287/* Iterator struct for all devices. */ 254/* Iterator struct for all devices. */
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index bdfee7fbaa2e..c7319a07ba35 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -404,21 +404,24 @@ ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
404} 404}
405 405
406static int 406static int
407__ccwgroup_driver_unregister_device(struct device *dev, void *data) 407__ccwgroup_match_all(struct device *dev, void *data)
408{ 408{
409 __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); 409 return 1;
410 device_unregister(dev);
411 put_device(dev);
412 return 0;
413} 410}
414 411
415void 412void
416ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) 413ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
417{ 414{
415 struct device *dev;
416
418 /* We don't want ccwgroup devices to live longer than their driver. */ 417 /* We don't want ccwgroup devices to live longer than their driver. */
419 get_driver(&cdriver->driver); 418 get_driver(&cdriver->driver);
420 driver_for_each_device(&cdriver->driver, NULL, NULL, 419 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
421 __ccwgroup_driver_unregister_device); 420 __ccwgroup_match_all))) {
421 __ccwgroup_remove_symlinks(to_ccwgroupdev(dev));
422 device_unregister(dev);
423 put_device(dev);
424 }
422 put_driver(&cdriver->driver); 425 put_driver(&cdriver->driver);
423 driver_unregister(&cdriver->driver); 426 driver_unregister(&cdriver->driver);
424} 427}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 72187e54dcac..b00f3ed051a0 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -244,8 +244,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
244 244
245 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 245 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
246 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 246 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
247 (sch->schib.pmcw.lpum == mask) && 247 (sch->schib.pmcw.lpum == mask)) {
248 (sch->vpm == 0)) {
249 int cc; 248 int cc;
250 249
251 cc = cio_clear(sch); 250 cc = cio_clear(sch);
@@ -918,12 +917,13 @@ chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
918 chp = to_channelpath(container_of(kobj, struct device, kobj)); 917 chp = to_channelpath(container_of(kobj, struct device, kobj));
919 css = to_css(chp->dev.parent); 918 css = to_css(chp->dev.parent);
920 919
921 size = sizeof(struct cmg_chars); 920 size = sizeof(struct cmg_entry);
922 921
923 /* Only allow single reads. */ 922 /* Only allow single reads. */
924 if (off || count < size) 923 if (off || count < size)
925 return 0; 924 return 0;
926 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); 925 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
926 count = size;
927 return count; 927 return count;
928} 928}
929 929
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 07ef3f640f4a..1c3e8e9012b0 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -3,9 +3,10 @@
3 * 3 *
4 * Linux on zSeries Channel Measurement Facility support 4 * Linux on zSeries Channel Measurement Facility support
5 * 5 *
6 * Copyright 2000,2003 IBM Corporation 6 * Copyright 2000,2006 IBM Corporation
7 * 7 *
8 * Author: Arnd Bergmann <arndb@de.ibm.com> 8 * Authors: Arnd Bergmann <arndb@de.ibm.com>
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * 10 *
10 * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com> 11 * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
11 * 12 *
@@ -96,9 +97,9 @@ module_param(format, bool, 0444);
96/** 97/**
97 * struct cmb_operations - functions to use depending on cmb_format 98 * struct cmb_operations - functions to use depending on cmb_format
98 * 99 *
99 * all these functions operate on a struct cmf_device. There is only 100 * Most of these functions operate on a struct ccw_device. There is only
100 * one instance of struct cmb_operations because all cmf_device 101 * one instance of struct cmb_operations because the format of the measurement
101 * objects are guaranteed to be of the same type. 102 * data is guaranteed to be the same for every ccw_device.
102 * 103 *
103 * @alloc: allocate memory for a channel measurement block, 104 * @alloc: allocate memory for a channel measurement block,
104 * either with the help of a special pool or with kmalloc 105 * either with the help of a special pool or with kmalloc
@@ -107,6 +108,7 @@ module_param(format, bool, 0444);
107 * @readall: read a measurement block in a common format 108 * @readall: read a measurement block in a common format
108 * @reset: clear the data in the associated measurement block and 109 * @reset: clear the data in the associated measurement block and
109 * reset its time stamp 110 * reset its time stamp
111 * @align: align an allocated block so that the hardware can use it
110 */ 112 */
111struct cmb_operations { 113struct cmb_operations {
112 int (*alloc) (struct ccw_device*); 114 int (*alloc) (struct ccw_device*);
@@ -115,11 +117,19 @@ struct cmb_operations {
115 u64 (*read) (struct ccw_device*, int); 117 u64 (*read) (struct ccw_device*, int);
116 int (*readall)(struct ccw_device*, struct cmbdata *); 118 int (*readall)(struct ccw_device*, struct cmbdata *);
117 void (*reset) (struct ccw_device*); 119 void (*reset) (struct ccw_device*);
120 void * (*align) (void *);
118 121
119 struct attribute_group *attr_group; 122 struct attribute_group *attr_group;
120}; 123};
121static struct cmb_operations *cmbops; 124static struct cmb_operations *cmbops;
122 125
126struct cmb_data {
127 void *hw_block; /* Pointer to block updated by hardware */
128 void *last_block; /* Last changed block copied from hardware block */
129 int size; /* Size of hw_block and last_block */
130 unsigned long long last_update; /* when last_block was updated */
131};
132
123/* our user interface is designed in terms of nanoseconds, 133/* our user interface is designed in terms of nanoseconds,
124 * while the hardware measures total times in its own 134 * while the hardware measures total times in its own
125 * unit.*/ 135 * unit.*/
@@ -226,63 +236,229 @@ struct set_schib_struct {
226 unsigned long address; 236 unsigned long address;
227 wait_queue_head_t wait; 237 wait_queue_head_t wait;
228 int ret; 238 int ret;
239 struct kref kref;
229}; 240};
230 241
242static void cmf_set_schib_release(struct kref *kref)
243{
244 struct set_schib_struct *set_data;
245
246 set_data = container_of(kref, struct set_schib_struct, kref);
247 kfree(set_data);
248}
249
250#define CMF_PENDING 1
251
231static int set_schib_wait(struct ccw_device *cdev, u32 mme, 252static int set_schib_wait(struct ccw_device *cdev, u32 mme,
232 int mbfc, unsigned long address) 253 int mbfc, unsigned long address)
233{ 254{
234 struct set_schib_struct s = { 255 struct set_schib_struct *set_data;
235 .mme = mme, 256 int ret;
236 .mbfc = mbfc,
237 .address = address,
238 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
239 };
240 257
241 spin_lock_irq(cdev->ccwlock); 258 spin_lock_irq(cdev->ccwlock);
242 s.ret = set_schib(cdev, mme, mbfc, address); 259 if (!cdev->private->cmb) {
243 if (s.ret != -EBUSY) { 260 ret = -ENODEV;
244 goto out_nowait; 261 goto out;
245 } 262 }
263 set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
264 if (!set_data) {
265 ret = -ENOMEM;
266 goto out;
267 }
268 init_waitqueue_head(&set_data->wait);
269 kref_init(&set_data->kref);
270 set_data->mme = mme;
271 set_data->mbfc = mbfc;
272 set_data->address = address;
273
274 ret = set_schib(cdev, mme, mbfc, address);
275 if (ret != -EBUSY)
276 goto out_put;
246 277
247 if (cdev->private->state != DEV_STATE_ONLINE) { 278 if (cdev->private->state != DEV_STATE_ONLINE) {
248 s.ret = -EBUSY;
249 /* if the device is not online, don't even try again */ 279 /* if the device is not online, don't even try again */
250 goto out_nowait; 280 ret = -EBUSY;
281 goto out_put;
251 } 282 }
283
252 cdev->private->state = DEV_STATE_CMFCHANGE; 284 cdev->private->state = DEV_STATE_CMFCHANGE;
253 cdev->private->cmb_wait = &s; 285 set_data->ret = CMF_PENDING;
254 s.ret = 1; 286 cdev->private->cmb_wait = set_data;
255 287
256 spin_unlock_irq(cdev->ccwlock); 288 spin_unlock_irq(cdev->ccwlock);
257 if (wait_event_interruptible(s.wait, s.ret != 1)) { 289 if (wait_event_interruptible(set_data->wait,
290 set_data->ret != CMF_PENDING)) {
258 spin_lock_irq(cdev->ccwlock); 291 spin_lock_irq(cdev->ccwlock);
259 if (s.ret == 1) { 292 if (set_data->ret == CMF_PENDING) {
260 s.ret = -ERESTARTSYS; 293 set_data->ret = -ERESTARTSYS;
261 cdev->private->cmb_wait = 0;
262 if (cdev->private->state == DEV_STATE_CMFCHANGE) 294 if (cdev->private->state == DEV_STATE_CMFCHANGE)
263 cdev->private->state = DEV_STATE_ONLINE; 295 cdev->private->state = DEV_STATE_ONLINE;
264 } 296 }
265 spin_unlock_irq(cdev->ccwlock); 297 spin_unlock_irq(cdev->ccwlock);
266 } 298 }
267 return s.ret; 299 spin_lock_irq(cdev->ccwlock);
268 300 cdev->private->cmb_wait = NULL;
269out_nowait: 301 ret = set_data->ret;
302out_put:
303 kref_put(&set_data->kref, cmf_set_schib_release);
304out:
270 spin_unlock_irq(cdev->ccwlock); 305 spin_unlock_irq(cdev->ccwlock);
271 return s.ret; 306 return ret;
272} 307}
273 308
274void retry_set_schib(struct ccw_device *cdev) 309void retry_set_schib(struct ccw_device *cdev)
275{ 310{
276 struct set_schib_struct *s; 311 struct set_schib_struct *set_data;
312
313 set_data = cdev->private->cmb_wait;
314 if (!set_data) {
315 WARN_ON(1);
316 return;
317 }
318 kref_get(&set_data->kref);
319 set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
320 set_data->address);
321 wake_up(&set_data->wait);
322 kref_put(&set_data->kref, cmf_set_schib_release);
323}
324
325static int cmf_copy_block(struct ccw_device *cdev)
326{
327 struct subchannel *sch;
328 void *reference_buf;
329 void *hw_block;
330 struct cmb_data *cmb_data;
331
332 sch = to_subchannel(cdev->dev.parent);
333
334 if (stsch(sch->schid, &sch->schib))
335 return -ENODEV;
336
337 if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) {
338 /* Don't copy if a start function is in progress. */
339 if ((!sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED) &&
340 (sch->schib.scsw.actl &
341 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
342 (!sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))
343 return -EBUSY;
344 }
345 cmb_data = cdev->private->cmb;
346 hw_block = cmbops->align(cmb_data->hw_block);
347 if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
348 /* No need to copy. */
349 return 0;
350 reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
351 if (!reference_buf)
352 return -ENOMEM;
353 /* Ensure consistency of block copied from hardware. */
354 do {
355 memcpy(cmb_data->last_block, hw_block, cmb_data->size);
356 memcpy(reference_buf, hw_block, cmb_data->size);
357 } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
358 cmb_data->last_update = get_clock();
359 kfree(reference_buf);
360 return 0;
361}
362
363struct copy_block_struct {
364 wait_queue_head_t wait;
365 int ret;
366 struct kref kref;
367};
368
369static void cmf_copy_block_release(struct kref *kref)
370{
371 struct copy_block_struct *copy_block;
372
373 copy_block = container_of(kref, struct copy_block_struct, kref);
374 kfree(copy_block);
375}
376
377static int cmf_cmb_copy_wait(struct ccw_device *cdev)
378{
379 struct copy_block_struct *copy_block;
380 int ret;
381 unsigned long flags;
382
383 spin_lock_irqsave(cdev->ccwlock, flags);
384 if (!cdev->private->cmb) {
385 ret = -ENODEV;
386 goto out;
387 }
388 copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
389 if (!copy_block) {
390 ret = -ENOMEM;
391 goto out;
392 }
393 init_waitqueue_head(&copy_block->wait);
394 kref_init(&copy_block->kref);
395
396 ret = cmf_copy_block(cdev);
397 if (ret != -EBUSY)
398 goto out_put;
399
400 if (cdev->private->state != DEV_STATE_ONLINE) {
401 ret = -EBUSY;
402 goto out_put;
403 }
404
405 cdev->private->state = DEV_STATE_CMFUPDATE;
406 copy_block->ret = CMF_PENDING;
407 cdev->private->cmb_wait = copy_block;
408
409 spin_unlock_irqrestore(cdev->ccwlock, flags);
410 if (wait_event_interruptible(copy_block->wait,
411 copy_block->ret != CMF_PENDING)) {
412 spin_lock_irqsave(cdev->ccwlock, flags);
413 if (copy_block->ret == CMF_PENDING) {
414 copy_block->ret = -ERESTARTSYS;
415 if (cdev->private->state == DEV_STATE_CMFUPDATE)
416 cdev->private->state = DEV_STATE_ONLINE;
417 }
418 spin_unlock_irqrestore(cdev->ccwlock, flags);
419 }
420 spin_lock_irqsave(cdev->ccwlock, flags);
421 cdev->private->cmb_wait = NULL;
422 ret = copy_block->ret;
423out_put:
424 kref_put(&copy_block->kref, cmf_copy_block_release);
425out:
426 spin_unlock_irqrestore(cdev->ccwlock, flags);
427 return ret;
428}
429
430void cmf_retry_copy_block(struct ccw_device *cdev)
431{
432 struct copy_block_struct *copy_block;
277 433
278 s = cdev->private->cmb_wait; 434 copy_block = cdev->private->cmb_wait;
279 cdev->private->cmb_wait = 0; 435 if (!copy_block) {
280 if (!s) {
281 WARN_ON(1); 436 WARN_ON(1);
282 return; 437 return;
283 } 438 }
284 s->ret = set_schib(cdev, s->mme, s->mbfc, s->address); 439 kref_get(&copy_block->kref);
285 wake_up(&s->wait); 440 copy_block->ret = cmf_copy_block(cdev);
441 wake_up(&copy_block->wait);
442 kref_put(&copy_block->kref, cmf_copy_block_release);
443}
444
445static void cmf_generic_reset(struct ccw_device *cdev)
446{
447 struct cmb_data *cmb_data;
448
449 spin_lock_irq(cdev->ccwlock);
450 cmb_data = cdev->private->cmb;
451 if (cmb_data) {
452 memset(cmb_data->last_block, 0, cmb_data->size);
453 /*
454 * Need to reset hw block as well to make the hardware start
455 * from 0 again.
456 */
457 memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
458 cmb_data->last_update = 0;
459 }
460 cdev->private->cmb_start_time = get_clock();
461 spin_unlock_irq(cdev->ccwlock);
286} 462}
287 463
288/** 464/**
@@ -343,8 +519,8 @@ struct cmb {
343/* insert a single device into the cmb_area list 519/* insert a single device into the cmb_area list
344 * called with cmb_area.lock held from alloc_cmb 520 * called with cmb_area.lock held from alloc_cmb
345 */ 521 */
346static inline int 522static inline int alloc_cmb_single (struct ccw_device *cdev,
347alloc_cmb_single (struct ccw_device *cdev) 523 struct cmb_data *cmb_data)
348{ 524{
349 struct cmb *cmb; 525 struct cmb *cmb;
350 struct ccw_device_private *node; 526 struct ccw_device_private *node;
@@ -358,10 +534,12 @@ alloc_cmb_single (struct ccw_device *cdev)
358 534
359 /* find first unused cmb in cmb_area.mem. 535 /* find first unused cmb in cmb_area.mem.
360 * this is a little tricky: cmb_area.list 536 * this is a little tricky: cmb_area.list
361 * remains sorted by ->cmb pointers */ 537 * remains sorted by ->cmb->hw_data pointers */
362 cmb = cmb_area.mem; 538 cmb = cmb_area.mem;
363 list_for_each_entry(node, &cmb_area.list, cmb_list) { 539 list_for_each_entry(node, &cmb_area.list, cmb_list) {
364 if ((struct cmb*)node->cmb > cmb) 540 struct cmb_data *data;
541 data = node->cmb;
542 if ((struct cmb*)data->hw_block > cmb)
365 break; 543 break;
366 cmb++; 544 cmb++;
367 } 545 }
@@ -372,7 +550,8 @@ alloc_cmb_single (struct ccw_device *cdev)
372 550
373 /* insert new cmb */ 551 /* insert new cmb */
374 list_add_tail(&cdev->private->cmb_list, &node->cmb_list); 552 list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
375 cdev->private->cmb = cmb; 553 cmb_data->hw_block = cmb;
554 cdev->private->cmb = cmb_data;
376 ret = 0; 555 ret = 0;
377out: 556out:
378 spin_unlock_irq(cdev->ccwlock); 557 spin_unlock_irq(cdev->ccwlock);
@@ -385,7 +564,19 @@ alloc_cmb (struct ccw_device *cdev)
385 int ret; 564 int ret;
386 struct cmb *mem; 565 struct cmb *mem;
387 ssize_t size; 566 ssize_t size;
567 struct cmb_data *cmb_data;
568
569 /* Allocate private cmb_data. */
570 cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
571 if (!cmb_data)
572 return -ENOMEM;
388 573
574 cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
575 if (!cmb_data->last_block) {
576 kfree(cmb_data);
577 return -ENOMEM;
578 }
579 cmb_data->size = sizeof(struct cmb);
389 spin_lock(&cmb_area.lock); 580 spin_lock(&cmb_area.lock);
390 581
391 if (!cmb_area.mem) { 582 if (!cmb_area.mem) {
@@ -414,29 +605,36 @@ alloc_cmb (struct ccw_device *cdev)
414 } 605 }
415 606
416 /* do the actual allocation */ 607 /* do the actual allocation */
417 ret = alloc_cmb_single(cdev); 608 ret = alloc_cmb_single(cdev, cmb_data);
418out: 609out:
419 spin_unlock(&cmb_area.lock); 610 spin_unlock(&cmb_area.lock);
420 611 if (ret) {
612 kfree(cmb_data->last_block);
613 kfree(cmb_data);
614 }
421 return ret; 615 return ret;
422} 616}
423 617
424static void 618static void free_cmb(struct ccw_device *cdev)
425free_cmb(struct ccw_device *cdev)
426{ 619{
427 struct ccw_device_private *priv; 620 struct ccw_device_private *priv;
428 621 struct cmb_data *cmb_data;
429 priv = cdev->private;
430 622
431 spin_lock(&cmb_area.lock); 623 spin_lock(&cmb_area.lock);
432 spin_lock_irq(cdev->ccwlock); 624 spin_lock_irq(cdev->ccwlock);
433 625
626 priv = cdev->private;
627
434 if (list_empty(&priv->cmb_list)) { 628 if (list_empty(&priv->cmb_list)) {
435 /* already freed */ 629 /* already freed */
436 goto out; 630 goto out;
437 } 631 }
438 632
633 cmb_data = priv->cmb;
439 priv->cmb = NULL; 634 priv->cmb = NULL;
635 if (cmb_data)
636 kfree(cmb_data->last_block);
637 kfree(cmb_data);
440 list_del_init(&priv->cmb_list); 638 list_del_init(&priv->cmb_list);
441 639
442 if (list_empty(&cmb_area.list)) { 640 if (list_empty(&cmb_area.list)) {
@@ -451,83 +649,97 @@ out:
451 spin_unlock(&cmb_area.lock); 649 spin_unlock(&cmb_area.lock);
452} 650}
453 651
454static int 652static int set_cmb(struct ccw_device *cdev, u32 mme)
455set_cmb(struct ccw_device *cdev, u32 mme)
456{ 653{
457 u16 offset; 654 u16 offset;
655 struct cmb_data *cmb_data;
656 unsigned long flags;
458 657
459 if (!cdev->private->cmb) 658 spin_lock_irqsave(cdev->ccwlock, flags);
659 if (!cdev->private->cmb) {
660 spin_unlock_irqrestore(cdev->ccwlock, flags);
460 return -EINVAL; 661 return -EINVAL;
461 662 }
462 offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0; 663 cmb_data = cdev->private->cmb;
664 offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
665 spin_unlock_irqrestore(cdev->ccwlock, flags);
463 666
464 return set_schib_wait(cdev, mme, 0, offset); 667 return set_schib_wait(cdev, mme, 0, offset);
465} 668}
466 669
467static u64 670static u64 read_cmb (struct ccw_device *cdev, int index)
468read_cmb (struct ccw_device *cdev, int index)
469{ 671{
470 /* yes, we have to put it on the stack 672 struct cmb *cmb;
471 * because the cmb must only be accessed
472 * atomically, e.g. with mvc */
473 struct cmb cmb;
474 unsigned long flags;
475 u32 val; 673 u32 val;
674 int ret;
675 unsigned long flags;
676
677 ret = cmf_cmb_copy_wait(cdev);
678 if (ret < 0)
679 return 0;
476 680
477 spin_lock_irqsave(cdev->ccwlock, flags); 681 spin_lock_irqsave(cdev->ccwlock, flags);
478 if (!cdev->private->cmb) { 682 if (!cdev->private->cmb) {
479 spin_unlock_irqrestore(cdev->ccwlock, flags); 683 ret = 0;
480 return 0; 684 goto out;
481 } 685 }
482 686 cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
483 cmb = *(struct cmb*)cdev->private->cmb;
484 spin_unlock_irqrestore(cdev->ccwlock, flags);
485 687
486 switch (index) { 688 switch (index) {
487 case cmb_ssch_rsch_count: 689 case cmb_ssch_rsch_count:
488 return cmb.ssch_rsch_count; 690 ret = cmb->ssch_rsch_count;
691 goto out;
489 case cmb_sample_count: 692 case cmb_sample_count:
490 return cmb.sample_count; 693 ret = cmb->sample_count;
694 goto out;
491 case cmb_device_connect_time: 695 case cmb_device_connect_time:
492 val = cmb.device_connect_time; 696 val = cmb->device_connect_time;
493 break; 697 break;
494 case cmb_function_pending_time: 698 case cmb_function_pending_time:
495 val = cmb.function_pending_time; 699 val = cmb->function_pending_time;
496 break; 700 break;
497 case cmb_device_disconnect_time: 701 case cmb_device_disconnect_time:
498 val = cmb.device_disconnect_time; 702 val = cmb->device_disconnect_time;
499 break; 703 break;
500 case cmb_control_unit_queuing_time: 704 case cmb_control_unit_queuing_time:
501 val = cmb.control_unit_queuing_time; 705 val = cmb->control_unit_queuing_time;
502 break; 706 break;
503 case cmb_device_active_only_time: 707 case cmb_device_active_only_time:
504 val = cmb.device_active_only_time; 708 val = cmb->device_active_only_time;
505 break; 709 break;
506 default: 710 default:
507 return 0; 711 ret = 0;
712 goto out;
508 } 713 }
509 return time_to_avg_nsec(val, cmb.sample_count); 714 ret = time_to_avg_nsec(val, cmb->sample_count);
715out:
716 spin_unlock_irqrestore(cdev->ccwlock, flags);
717 return ret;
510} 718}
511 719
512static int 720static int readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
513readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
514{ 721{
515 /* yes, we have to put it on the stack 722 struct cmb *cmb;
516 * because the cmb must only be accessed 723 struct cmb_data *cmb_data;
517 * atomically, e.g. with mvc */
518 struct cmb cmb;
519 unsigned long flags;
520 u64 time; 724 u64 time;
725 unsigned long flags;
726 int ret;
521 727
728 ret = cmf_cmb_copy_wait(cdev);
729 if (ret < 0)
730 return ret;
522 spin_lock_irqsave(cdev->ccwlock, flags); 731 spin_lock_irqsave(cdev->ccwlock, flags);
523 if (!cdev->private->cmb) { 732 cmb_data = cdev->private->cmb;
524 spin_unlock_irqrestore(cdev->ccwlock, flags); 733 if (!cmb_data) {
525 return -ENODEV; 734 ret = -ENODEV;
735 goto out;
526 } 736 }
527 737 if (cmb_data->last_update == 0) {
528 cmb = *(struct cmb*)cdev->private->cmb; 738 ret = -EAGAIN;
529 time = get_clock() - cdev->private->cmb_start_time; 739 goto out;
530 spin_unlock_irqrestore(cdev->ccwlock, flags); 740 }
741 cmb = cmb_data->last_block;
742 time = cmb_data->last_update - cdev->private->cmb_start_time;
531 743
532 memset(data, 0, sizeof(struct cmbdata)); 744 memset(data, 0, sizeof(struct cmbdata));
533 745
@@ -538,31 +750,32 @@ readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
538 data->elapsed_time = (time * 1000) >> 12; 750 data->elapsed_time = (time * 1000) >> 12;
539 751
540 /* copy data to new structure */ 752 /* copy data to new structure */
541 data->ssch_rsch_count = cmb.ssch_rsch_count; 753 data->ssch_rsch_count = cmb->ssch_rsch_count;
542 data->sample_count = cmb.sample_count; 754 data->sample_count = cmb->sample_count;
543 755
544 /* time fields are converted to nanoseconds while copying */ 756 /* time fields are converted to nanoseconds while copying */
545 data->device_connect_time = time_to_nsec(cmb.device_connect_time); 757 data->device_connect_time = time_to_nsec(cmb->device_connect_time);
546 data->function_pending_time = time_to_nsec(cmb.function_pending_time); 758 data->function_pending_time = time_to_nsec(cmb->function_pending_time);
547 data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); 759 data->device_disconnect_time =
760 time_to_nsec(cmb->device_disconnect_time);
548 data->control_unit_queuing_time 761 data->control_unit_queuing_time
549 = time_to_nsec(cmb.control_unit_queuing_time); 762 = time_to_nsec(cmb->control_unit_queuing_time);
550 data->device_active_only_time 763 data->device_active_only_time
551 = time_to_nsec(cmb.device_active_only_time); 764 = time_to_nsec(cmb->device_active_only_time);
765 ret = 0;
766out:
767 spin_unlock_irqrestore(cdev->ccwlock, flags);
768 return ret;
769}
552 770
553 return 0; 771static void reset_cmb(struct ccw_device *cdev)
772{
773 cmf_generic_reset(cdev);
554} 774}
555 775
556static void 776static void * align_cmb(void *area)
557reset_cmb(struct ccw_device *cdev)
558{ 777{
559 struct cmb *cmb; 778 return area;
560 spin_lock_irq(cdev->ccwlock);
561 cmb = cdev->private->cmb;
562 if (cmb)
563 memset (cmb, 0, sizeof (*cmb));
564 cdev->private->cmb_start_time = get_clock();
565 spin_unlock_irq(cdev->ccwlock);
566} 779}
567 780
568static struct attribute_group cmf_attr_group; 781static struct attribute_group cmf_attr_group;
@@ -574,6 +787,7 @@ static struct cmb_operations cmbops_basic = {
574 .read = read_cmb, 787 .read = read_cmb,
575 .readall = readall_cmb, 788 .readall = readall_cmb,
576 .reset = reset_cmb, 789 .reset = reset_cmb,
790 .align = align_cmb,
577 .attr_group = &cmf_attr_group, 791 .attr_group = &cmf_attr_group,
578}; 792};
579 793
@@ -610,22 +824,34 @@ static inline struct cmbe* cmbe_align(struct cmbe *c)
610 return (struct cmbe*)addr; 824 return (struct cmbe*)addr;
611} 825}
612 826
613static int 827static int alloc_cmbe (struct ccw_device *cdev)
614alloc_cmbe (struct ccw_device *cdev)
615{ 828{
616 struct cmbe *cmbe; 829 struct cmbe *cmbe;
617 cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL); 830 struct cmb_data *cmb_data;
831 int ret;
832
833 cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
618 if (!cmbe) 834 if (!cmbe)
619 return -ENOMEM; 835 return -ENOMEM;
620 836 cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
837 if (!cmb_data) {
838 ret = -ENOMEM;
839 goto out_free;
840 }
841 cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
842 if (!cmb_data->last_block) {
843 ret = -ENOMEM;
844 goto out_free;
845 }
846 cmb_data->size = sizeof(struct cmbe);
621 spin_lock_irq(cdev->ccwlock); 847 spin_lock_irq(cdev->ccwlock);
622 if (cdev->private->cmb) { 848 if (cdev->private->cmb) {
623 kfree(cmbe);
624 spin_unlock_irq(cdev->ccwlock); 849 spin_unlock_irq(cdev->ccwlock);
625 return -EBUSY; 850 ret = -EBUSY;
851 goto out_free;
626 } 852 }
627 853 cmb_data->hw_block = cmbe;
628 cdev->private->cmb = cmbe; 854 cdev->private->cmb = cmb_data;
629 spin_unlock_irq(cdev->ccwlock); 855 spin_unlock_irq(cdev->ccwlock);
630 856
631 /* activate global measurement if this is the first channel */ 857 /* activate global measurement if this is the first channel */
@@ -636,14 +862,24 @@ alloc_cmbe (struct ccw_device *cdev)
636 spin_unlock(&cmb_area.lock); 862 spin_unlock(&cmb_area.lock);
637 863
638 return 0; 864 return 0;
865out_free:
866 if (cmb_data)
867 kfree(cmb_data->last_block);
868 kfree(cmb_data);
869 kfree(cmbe);
870 return ret;
639} 871}
640 872
641static void 873static void free_cmbe (struct ccw_device *cdev)
642free_cmbe (struct ccw_device *cdev)
643{ 874{
875 struct cmb_data *cmb_data;
876
644 spin_lock_irq(cdev->ccwlock); 877 spin_lock_irq(cdev->ccwlock);
645 kfree(cdev->private->cmb); 878 cmb_data = cdev->private->cmb;
646 cdev->private->cmb = NULL; 879 cdev->private->cmb = NULL;
880 if (cmb_data)
881 kfree(cmb_data->last_block);
882 kfree(cmb_data);
647 spin_unlock_irq(cdev->ccwlock); 883 spin_unlock_irq(cdev->ccwlock);
648 884
649 /* deactivate global measurement if this is the last channel */ 885 /* deactivate global measurement if this is the last channel */
@@ -654,89 +890,105 @@ free_cmbe (struct ccw_device *cdev)
654 spin_unlock(&cmb_area.lock); 890 spin_unlock(&cmb_area.lock);
655} 891}
656 892
657static int 893static int set_cmbe(struct ccw_device *cdev, u32 mme)
658set_cmbe(struct ccw_device *cdev, u32 mme)
659{ 894{
660 unsigned long mba; 895 unsigned long mba;
896 struct cmb_data *cmb_data;
897 unsigned long flags;
661 898
662 if (!cdev->private->cmb) 899 spin_lock_irqsave(cdev->ccwlock, flags);
900 if (!cdev->private->cmb) {
901 spin_unlock_irqrestore(cdev->ccwlock, flags);
663 return -EINVAL; 902 return -EINVAL;
664 mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0; 903 }
904 cmb_data = cdev->private->cmb;
905 mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
906 spin_unlock_irqrestore(cdev->ccwlock, flags);
665 907
666 return set_schib_wait(cdev, mme, 1, mba); 908 return set_schib_wait(cdev, mme, 1, mba);
667} 909}
668 910
669 911
670u64 912static u64 read_cmbe (struct ccw_device *cdev, int index)
671read_cmbe (struct ccw_device *cdev, int index)
672{ 913{
673 /* yes, we have to put it on the stack 914 struct cmbe *cmb;
674 * because the cmb must only be accessed 915 struct cmb_data *cmb_data;
675 * atomically, e.g. with mvc */
676 struct cmbe cmb;
677 unsigned long flags;
678 u32 val; 916 u32 val;
917 int ret;
918 unsigned long flags;
679 919
680 spin_lock_irqsave(cdev->ccwlock, flags); 920 ret = cmf_cmb_copy_wait(cdev);
681 if (!cdev->private->cmb) { 921 if (ret < 0)
682 spin_unlock_irqrestore(cdev->ccwlock, flags);
683 return 0; 922 return 0;
684 }
685 923
686 cmb = *cmbe_align(cdev->private->cmb); 924 spin_lock_irqsave(cdev->ccwlock, flags);
687 spin_unlock_irqrestore(cdev->ccwlock, flags); 925 cmb_data = cdev->private->cmb;
926 if (!cmb_data) {
927 ret = 0;
928 goto out;
929 }
930 cmb = cmb_data->last_block;
688 931
689 switch (index) { 932 switch (index) {
690 case cmb_ssch_rsch_count: 933 case cmb_ssch_rsch_count:
691 return cmb.ssch_rsch_count; 934 ret = cmb->ssch_rsch_count;
935 goto out;
692 case cmb_sample_count: 936 case cmb_sample_count:
693 return cmb.sample_count; 937 ret = cmb->sample_count;
938 goto out;
694 case cmb_device_connect_time: 939 case cmb_device_connect_time:
695 val = cmb.device_connect_time; 940 val = cmb->device_connect_time;
696 break; 941 break;
697 case cmb_function_pending_time: 942 case cmb_function_pending_time:
698 val = cmb.function_pending_time; 943 val = cmb->function_pending_time;
699 break; 944 break;
700 case cmb_device_disconnect_time: 945 case cmb_device_disconnect_time:
701 val = cmb.device_disconnect_time; 946 val = cmb->device_disconnect_time;
702 break; 947 break;
703 case cmb_control_unit_queuing_time: 948 case cmb_control_unit_queuing_time:
704 val = cmb.control_unit_queuing_time; 949 val = cmb->control_unit_queuing_time;
705 break; 950 break;
706 case cmb_device_active_only_time: 951 case cmb_device_active_only_time:
707 val = cmb.device_active_only_time; 952 val = cmb->device_active_only_time;
708 break; 953 break;
709 case cmb_device_busy_time: 954 case cmb_device_busy_time:
710 val = cmb.device_busy_time; 955 val = cmb->device_busy_time;
711 break; 956 break;
712 case cmb_initial_command_response_time: 957 case cmb_initial_command_response_time:
713 val = cmb.initial_command_response_time; 958 val = cmb->initial_command_response_time;
714 break; 959 break;
715 default: 960 default:
716 return 0; 961 ret = 0;
962 goto out;
717 } 963 }
718 return time_to_avg_nsec(val, cmb.sample_count); 964 ret = time_to_avg_nsec(val, cmb->sample_count);
965out:
966 spin_unlock_irqrestore(cdev->ccwlock, flags);
967 return ret;
719} 968}
720 969
721static int 970static int readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
722readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
723{ 971{
724 /* yes, we have to put it on the stack 972 struct cmbe *cmb;
725 * because the cmb must only be accessed 973 struct cmb_data *cmb_data;
726 * atomically, e.g. with mvc */
727 struct cmbe cmb;
728 unsigned long flags;
729 u64 time; 974 u64 time;
975 unsigned long flags;
976 int ret;
730 977
978 ret = cmf_cmb_copy_wait(cdev);
979 if (ret < 0)
980 return ret;
731 spin_lock_irqsave(cdev->ccwlock, flags); 981 spin_lock_irqsave(cdev->ccwlock, flags);
732 if (!cdev->private->cmb) { 982 cmb_data = cdev->private->cmb;
733 spin_unlock_irqrestore(cdev->ccwlock, flags); 983 if (!cmb_data) {
734 return -ENODEV; 984 ret = -ENODEV;
985 goto out;
735 } 986 }
736 987 if (cmb_data->last_update == 0) {
737 cmb = *cmbe_align(cdev->private->cmb); 988 ret = -EAGAIN;
738 time = get_clock() - cdev->private->cmb_start_time; 989 goto out;
739 spin_unlock_irqrestore(cdev->ccwlock, flags); 990 }
991 time = cmb_data->last_update - cdev->private->cmb_start_time;
740 992
741 memset (data, 0, sizeof(struct cmbdata)); 993 memset (data, 0, sizeof(struct cmbdata));
742 994
@@ -746,35 +998,38 @@ readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
746 /* conver to nanoseconds */ 998 /* conver to nanoseconds */
747 data->elapsed_time = (time * 1000) >> 12; 999 data->elapsed_time = (time * 1000) >> 12;
748 1000
1001 cmb = cmb_data->last_block;
749 /* copy data to new structure */ 1002 /* copy data to new structure */
750 data->ssch_rsch_count = cmb.ssch_rsch_count; 1003 data->ssch_rsch_count = cmb->ssch_rsch_count;
751 data->sample_count = cmb.sample_count; 1004 data->sample_count = cmb->sample_count;
752 1005
753 /* time fields are converted to nanoseconds while copying */ 1006 /* time fields are converted to nanoseconds while copying */
754 data->device_connect_time = time_to_nsec(cmb.device_connect_time); 1007 data->device_connect_time = time_to_nsec(cmb->device_connect_time);
755 data->function_pending_time = time_to_nsec(cmb.function_pending_time); 1008 data->function_pending_time = time_to_nsec(cmb->function_pending_time);
756 data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); 1009 data->device_disconnect_time =
1010 time_to_nsec(cmb->device_disconnect_time);
757 data->control_unit_queuing_time 1011 data->control_unit_queuing_time
758 = time_to_nsec(cmb.control_unit_queuing_time); 1012 = time_to_nsec(cmb->control_unit_queuing_time);
759 data->device_active_only_time 1013 data->device_active_only_time
760 = time_to_nsec(cmb.device_active_only_time); 1014 = time_to_nsec(cmb->device_active_only_time);
761 data->device_busy_time = time_to_nsec(cmb.device_busy_time); 1015 data->device_busy_time = time_to_nsec(cmb->device_busy_time);
762 data->initial_command_response_time 1016 data->initial_command_response_time
763 = time_to_nsec(cmb.initial_command_response_time); 1017 = time_to_nsec(cmb->initial_command_response_time);
764 1018
765 return 0; 1019 ret = 0;
1020out:
1021 spin_unlock_irqrestore(cdev->ccwlock, flags);
1022 return ret;
766} 1023}
767 1024
768static void 1025static void reset_cmbe(struct ccw_device *cdev)
769reset_cmbe(struct ccw_device *cdev)
770{ 1026{
771 struct cmbe *cmb; 1027 cmf_generic_reset(cdev);
772 spin_lock_irq(cdev->ccwlock); 1028}
773 cmb = cmbe_align(cdev->private->cmb); 1029
774 if (cmb) 1030static void * align_cmbe(void *area)
775 memset (cmb, 0, sizeof (*cmb)); 1031{
776 cdev->private->cmb_start_time = get_clock(); 1032 return cmbe_align(area);
777 spin_unlock_irq(cdev->ccwlock);
778} 1033}
779 1034
780static struct attribute_group cmf_attr_group_ext; 1035static struct attribute_group cmf_attr_group_ext;
@@ -786,6 +1041,7 @@ static struct cmb_operations cmbops_extended = {
786 .read = read_cmbe, 1041 .read = read_cmbe,
787 .readall = readall_cmbe, 1042 .readall = readall_cmbe,
788 .reset = reset_cmbe, 1043 .reset = reset_cmbe,
1044 .align = align_cmbe,
789 .attr_group = &cmf_attr_group_ext, 1045 .attr_group = &cmf_attr_group_ext,
790}; 1046};
791 1047
@@ -803,14 +1059,19 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
803 struct ccw_device *cdev; 1059 struct ccw_device *cdev;
804 long interval; 1060 long interval;
805 unsigned long count; 1061 unsigned long count;
1062 struct cmb_data *cmb_data;
806 1063
807 cdev = to_ccwdev(dev); 1064 cdev = to_ccwdev(dev);
808 interval = get_clock() - cdev->private->cmb_start_time;
809 count = cmf_read(cdev, cmb_sample_count); 1065 count = cmf_read(cdev, cmb_sample_count);
810 if (count) 1066 spin_lock_irq(cdev->ccwlock);
1067 cmb_data = cdev->private->cmb;
1068 if (count) {
1069 interval = cmb_data->last_update -
1070 cdev->private->cmb_start_time;
811 interval /= count; 1071 interval /= count;
812 else 1072 } else
813 interval = -1; 1073 interval = -1;
1074 spin_unlock_irq(cdev->ccwlock);
814 return sprintf(buf, "%ld\n", interval); 1075 return sprintf(buf, "%ld\n", interval);
815} 1076}
816 1077
@@ -823,7 +1084,10 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char
823 int ret; 1084 int ret;
824 1085
825 ret = cmf_readall(to_ccwdev(dev), &data); 1086 ret = cmf_readall(to_ccwdev(dev), &data);
826 if (ret) 1087 if (ret == -EAGAIN || ret == -ENODEV)
1088 /* No data (yet/currently) available to use for calculation. */
1089 return sprintf(buf, "n/a\n");
1090 else if (ret)
827 return ret; 1091 return ret;
828 1092
829 utilization = data.device_connect_time + 1093 utilization = data.device_connect_time +
@@ -982,6 +1246,13 @@ cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
982 return cmbops->readall(cdev, data); 1246 return cmbops->readall(cdev, data);
983} 1247}
984 1248
1249/* Reenable cmf when a disconnected device becomes available again. */
1250int cmf_reenable(struct ccw_device *cdev)
1251{
1252 cmbops->reset(cdev);
1253 return cmbops->set(cdev, 2);
1254}
1255
985static int __init 1256static int __init
986init_cmf(void) 1257init_cmf(void)
987{ 1258{
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 74ea8aac4b7d..1d3be80797f8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -19,9 +19,11 @@
19#include "cio_debug.h" 19#include "cio_debug.h"
20#include "ioasm.h" 20#include "ioasm.h"
21#include "chsc.h" 21#include "chsc.h"
22#include "device.h"
22 23
23int need_rescan = 0; 24int need_rescan = 0;
24int css_init_done = 0; 25int css_init_done = 0;
26static int need_reprobe = 0;
25static int max_ssid = 0; 27static int max_ssid = 0;
26 28
27struct channel_subsystem *css[__MAX_CSSID + 1]; 29struct channel_subsystem *css[__MAX_CSSID + 1];
@@ -339,6 +341,67 @@ typedef void (*workfunc)(void *);
339DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); 341DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
340struct workqueue_struct *slow_path_wq; 342struct workqueue_struct *slow_path_wq;
341 343
344/* Reprobe subchannel if unregistered. */
345static int reprobe_subchannel(struct subchannel_id schid, void *data)
346{
347 struct subchannel *sch;
348 int ret;
349
350 CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n",
351 schid.ssid, schid.sch_no);
352 if (need_reprobe)
353 return -EAGAIN;
354
355 sch = get_subchannel_by_schid(schid);
356 if (sch) {
357 /* Already known. */
358 put_device(&sch->dev);
359 return 0;
360 }
361
362 ret = css_probe_device(schid);
363 switch (ret) {
364 case 0:
365 break;
366 case -ENXIO:
367 case -ENOMEM:
368 /* These should abort looping */
369 break;
370 default:
371 ret = 0;
372 }
373
374 return ret;
375}
376
377/* Work function used to reprobe all unregistered subchannels. */
378static void reprobe_all(void *data)
379{
380 int ret;
381
382 CIO_MSG_EVENT(2, "reprobe start\n");
383
384 need_reprobe = 0;
385 /* Make sure initial subchannel scan is done. */
386 wait_event(ccw_device_init_wq,
387 atomic_read(&ccw_device_init_count) == 0);
388 ret = for_each_subchannel(reprobe_subchannel, NULL);
389
390 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
391 need_reprobe);
392}
393
394DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);
395
396/* Schedule reprobing of all unregistered subchannels. */
397void css_schedule_reprobe(void)
398{
399 need_reprobe = 1;
400 queue_work(ccw_device_work, &css_reprobe_work);
401}
402
403EXPORT_SYMBOL_GPL(css_schedule_reprobe);
404
342/* 405/*
343 * Rescan for new devices. FIXME: This is slow. 406 * Rescan for new devices. FIXME: This is slow.
344 * This function is called when we have lost CRWs due to overflows and we have 407 * This function is called when we have lost CRWs due to overflows and we have
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 8e3053c2a451..eafde43e8410 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -133,8 +133,8 @@ struct css_driver io_subchannel_driver = {
133 133
134struct workqueue_struct *ccw_device_work; 134struct workqueue_struct *ccw_device_work;
135struct workqueue_struct *ccw_device_notify_work; 135struct workqueue_struct *ccw_device_notify_work;
136static wait_queue_head_t ccw_device_init_wq; 136wait_queue_head_t ccw_device_init_wq;
137static atomic_t ccw_device_init_count; 137atomic_t ccw_device_init_count;
138 138
139static int __init 139static int __init
140init_ccw_bus_type (void) 140init_ccw_bus_type (void)
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 11587ebb7289..00be9a5b4acd 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -1,6 +1,10 @@
1#ifndef S390_DEVICE_H 1#ifndef S390_DEVICE_H
2#define S390_DEVICE_H 2#define S390_DEVICE_H
3 3
4#include <asm/ccwdev.h>
5#include <asm/atomic.h>
6#include <linux/wait.h>
7
4/* 8/*
5 * states of the device statemachine 9 * states of the device statemachine
6 */ 10 */
@@ -23,6 +27,7 @@ enum dev_state {
23 DEV_STATE_DISCONNECTED, 27 DEV_STATE_DISCONNECTED,
24 DEV_STATE_DISCONNECTED_SENSE_ID, 28 DEV_STATE_DISCONNECTED_SENSE_ID,
25 DEV_STATE_CMFCHANGE, 29 DEV_STATE_CMFCHANGE,
30 DEV_STATE_CMFUPDATE,
26 /* last element! */ 31 /* last element! */
27 NR_DEV_STATES 32 NR_DEV_STATES
28}; 33};
@@ -67,6 +72,8 @@ dev_fsm_final_state(struct ccw_device *cdev)
67 72
68extern struct workqueue_struct *ccw_device_work; 73extern struct workqueue_struct *ccw_device_work;
69extern struct workqueue_struct *ccw_device_notify_work; 74extern struct workqueue_struct *ccw_device_notify_work;
75extern wait_queue_head_t ccw_device_init_wq;
76extern atomic_t ccw_device_init_count;
70 77
71void io_subchannel_recog_done(struct ccw_device *cdev); 78void io_subchannel_recog_done(struct ccw_device *cdev);
72 79
@@ -112,5 +119,8 @@ int ccw_device_stlck(struct ccw_device *);
112void ccw_device_set_timeout(struct ccw_device *, int); 119void ccw_device_set_timeout(struct ccw_device *, int);
113extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 120extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
114 121
122/* Channel measurement facility related */
115void retry_set_schib(struct ccw_device *cdev); 123void retry_set_schib(struct ccw_device *cdev);
124void cmf_retry_copy_block(struct ccw_device *);
125int cmf_reenable(struct ccw_device *);
116#endif 126#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 49ec562d7f60..7d0dd72635eb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -336,8 +336,11 @@ ccw_device_oper_notify(void *data)
336 if (!ret) 336 if (!ret)
337 /* Driver doesn't want device back. */ 337 /* Driver doesn't want device back. */
338 ccw_device_do_unreg_rereg((void *)cdev); 338 ccw_device_do_unreg_rereg((void *)cdev);
339 else 339 else {
340 /* Reenable channel measurements, if needed. */
341 cmf_reenable(cdev);
340 wake_up(&cdev->private->wait_q); 342 wake_up(&cdev->private->wait_q);
343 }
341} 344}
342 345
343/* 346/*
@@ -861,6 +864,8 @@ ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
861 irb = (struct irb *) __LC_IRB; 864 irb = (struct irb *) __LC_IRB;
862 /* Accumulate status. We don't do basic sense. */ 865 /* Accumulate status. We don't do basic sense. */
863 ccw_device_accumulate_irb(cdev, irb); 866 ccw_device_accumulate_irb(cdev, irb);
867 /* Remember to clear irb to avoid residuals. */
868 memset(&cdev->private->irb, 0, sizeof(struct irb));
864 /* Try to start delayed device verification. */ 869 /* Try to start delayed device verification. */
865 ccw_device_online_verify(cdev, 0); 870 ccw_device_online_verify(cdev, 0);
866 /* Note: Don't call handler for cio initiated clear! */ 871 /* Note: Don't call handler for cio initiated clear! */
@@ -1093,6 +1098,13 @@ ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1093 dev_fsm_event(cdev, dev_event); 1098 dev_fsm_event(cdev, dev_event);
1094} 1099}
1095 1100
1101static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1102 enum dev_event dev_event)
1103{
1104 cmf_retry_copy_block(cdev);
1105 cdev->private->state = DEV_STATE_ONLINE;
1106 dev_fsm_event(cdev, dev_event);
1107}
1096 1108
1097static void 1109static void
1098ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 1110ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
@@ -1247,6 +1259,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1247 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1259 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
1248 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1260 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
1249 }, 1261 },
1262 [DEV_STATE_CMFUPDATE] = {
1263 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
1264 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
1265 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1266 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1267 },
1250}; 1268};
1251 1269
1252/* 1270/*
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 795abb5a65ba..b266ad8e14ff 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -78,7 +78,8 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
78 return -ENODEV; 78 return -ENODEV;
79 if (cdev->private->state == DEV_STATE_NOT_OPER) 79 if (cdev->private->state == DEV_STATE_NOT_OPER)
80 return -ENODEV; 80 return -ENODEV;
81 if (cdev->private->state == DEV_STATE_VERIFY) { 81 if (cdev->private->state == DEV_STATE_VERIFY ||
82 cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
82 /* Remember to fake irb when finished. */ 83 /* Remember to fake irb when finished. */
83 if (!cdev->private->flags.fake_irb) { 84 if (!cdev->private->flags.fake_irb) {
84 cdev->private->flags.fake_irb = 1; 85 cdev->private->flags.fake_irb = 1;
@@ -270,7 +271,8 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
270 * We didn't get channel end / device end. Check if path 271 * We didn't get channel end / device end. Check if path
271 * verification has been started; we can retry after it has 272 * verification has been started; we can retry after it has
272 * finished. We also retry unit checks except for command reject 273 * finished. We also retry unit checks except for command reject
273 * or intervention required. 274 * or intervention required. Also check for long busy
275 * conditions.
274 */ 276 */
275 if (cdev->private->flags.doverify || 277 if (cdev->private->flags.doverify ||
276 cdev->private->state == DEV_STATE_VERIFY) 278 cdev->private->state == DEV_STATE_VERIFY)
@@ -279,6 +281,10 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
279 !(irb->ecw[0] & 281 !(irb->ecw[0] &
280 (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ))) 282 (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
281 cdev->private->intparm = -EAGAIN; 283 cdev->private->intparm = -EAGAIN;
284 else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) &&
285 (irb->scsw.dstat & DEV_STAT_DEV_END) &&
286 (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP))
287 cdev->private->intparm = -EAGAIN;
282 else 288 else
283 cdev->private->intparm = -EIO; 289 cdev->private->intparm = -EIO;
284 290