aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/s390/cio/cmf.c623
-rw-r--r--drivers/s390/cio/device.h4
-rw-r--r--drivers/s390/cio/device_fsm.c18
-rw-r--r--include/asm-s390/cmb.h4
4 files changed, 468 insertions, 181 deletions
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 07ef3f640f4a..1c3e8e9012b0 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -3,9 +3,10 @@
3 * 3 *
4 * Linux on zSeries Channel Measurement Facility support 4 * Linux on zSeries Channel Measurement Facility support
5 * 5 *
6 * Copyright 2000,2003 IBM Corporation 6 * Copyright 2000,2006 IBM Corporation
7 * 7 *
8 * Author: Arnd Bergmann <arndb@de.ibm.com> 8 * Authors: Arnd Bergmann <arndb@de.ibm.com>
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * 10 *
10 * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com> 11 * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
11 * 12 *
@@ -96,9 +97,9 @@ module_param(format, bool, 0444);
96/** 97/**
97 * struct cmb_operations - functions to use depending on cmb_format 98 * struct cmb_operations - functions to use depending on cmb_format
98 * 99 *
99 * all these functions operate on a struct cmf_device. There is only 100 * Most of these functions operate on a struct ccw_device. There is only
100 * one instance of struct cmb_operations because all cmf_device 101 * one instance of struct cmb_operations because the format of the measurement
101 * objects are guaranteed to be of the same type. 102 * data is guaranteed to be the same for every ccw_device.
102 * 103 *
103 * @alloc: allocate memory for a channel measurement block, 104 * @alloc: allocate memory for a channel measurement block,
104 * either with the help of a special pool or with kmalloc 105 * either with the help of a special pool or with kmalloc
@@ -107,6 +108,7 @@ module_param(format, bool, 0444);
107 * @readall: read a measurement block in a common format 108 * @readall: read a measurement block in a common format
108 * @reset: clear the data in the associated measurement block and 109 * @reset: clear the data in the associated measurement block and
109 * reset its time stamp 110 * reset its time stamp
111 * @align: align an allocated block so that the hardware can use it
110 */ 112 */
111struct cmb_operations { 113struct cmb_operations {
112 int (*alloc) (struct ccw_device*); 114 int (*alloc) (struct ccw_device*);
@@ -115,11 +117,19 @@ struct cmb_operations {
115 u64 (*read) (struct ccw_device*, int); 117 u64 (*read) (struct ccw_device*, int);
116 int (*readall)(struct ccw_device*, struct cmbdata *); 118 int (*readall)(struct ccw_device*, struct cmbdata *);
117 void (*reset) (struct ccw_device*); 119 void (*reset) (struct ccw_device*);
120 void * (*align) (void *);
118 121
119 struct attribute_group *attr_group; 122 struct attribute_group *attr_group;
120}; 123};
121static struct cmb_operations *cmbops; 124static struct cmb_operations *cmbops;
122 125
126struct cmb_data {
127 void *hw_block; /* Pointer to block updated by hardware */
128 void *last_block; /* Last changed block copied from hardware block */
129 int size; /* Size of hw_block and last_block */
130 unsigned long long last_update; /* when last_block was updated */
131};
132
123/* our user interface is designed in terms of nanoseconds, 133/* our user interface is designed in terms of nanoseconds,
124 * while the hardware measures total times in its own 134 * while the hardware measures total times in its own
125 * unit.*/ 135 * unit.*/
@@ -226,63 +236,229 @@ struct set_schib_struct {
226 unsigned long address; 236 unsigned long address;
227 wait_queue_head_t wait; 237 wait_queue_head_t wait;
228 int ret; 238 int ret;
239 struct kref kref;
229}; 240};
230 241
242static void cmf_set_schib_release(struct kref *kref)
243{
244 struct set_schib_struct *set_data;
245
246 set_data = container_of(kref, struct set_schib_struct, kref);
247 kfree(set_data);
248}
249
250#define CMF_PENDING 1
251
231static int set_schib_wait(struct ccw_device *cdev, u32 mme, 252static int set_schib_wait(struct ccw_device *cdev, u32 mme,
232 int mbfc, unsigned long address) 253 int mbfc, unsigned long address)
233{ 254{
234 struct set_schib_struct s = { 255 struct set_schib_struct *set_data;
235 .mme = mme, 256 int ret;
236 .mbfc = mbfc,
237 .address = address,
238 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait),
239 };
240 257
241 spin_lock_irq(cdev->ccwlock); 258 spin_lock_irq(cdev->ccwlock);
242 s.ret = set_schib(cdev, mme, mbfc, address); 259 if (!cdev->private->cmb) {
243 if (s.ret != -EBUSY) { 260 ret = -ENODEV;
244 goto out_nowait; 261 goto out;
245 } 262 }
263 set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
264 if (!set_data) {
265 ret = -ENOMEM;
266 goto out;
267 }
268 init_waitqueue_head(&set_data->wait);
269 kref_init(&set_data->kref);
270 set_data->mme = mme;
271 set_data->mbfc = mbfc;
272 set_data->address = address;
273
274 ret = set_schib(cdev, mme, mbfc, address);
275 if (ret != -EBUSY)
276 goto out_put;
246 277
247 if (cdev->private->state != DEV_STATE_ONLINE) { 278 if (cdev->private->state != DEV_STATE_ONLINE) {
248 s.ret = -EBUSY;
249 /* if the device is not online, don't even try again */ 279 /* if the device is not online, don't even try again */
250 goto out_nowait; 280 ret = -EBUSY;
281 goto out_put;
251 } 282 }
283
252 cdev->private->state = DEV_STATE_CMFCHANGE; 284 cdev->private->state = DEV_STATE_CMFCHANGE;
253 cdev->private->cmb_wait = &s; 285 set_data->ret = CMF_PENDING;
254 s.ret = 1; 286 cdev->private->cmb_wait = set_data;
255 287
256 spin_unlock_irq(cdev->ccwlock); 288 spin_unlock_irq(cdev->ccwlock);
257 if (wait_event_interruptible(s.wait, s.ret != 1)) { 289 if (wait_event_interruptible(set_data->wait,
290 set_data->ret != CMF_PENDING)) {
258 spin_lock_irq(cdev->ccwlock); 291 spin_lock_irq(cdev->ccwlock);
259 if (s.ret == 1) { 292 if (set_data->ret == CMF_PENDING) {
260 s.ret = -ERESTARTSYS; 293 set_data->ret = -ERESTARTSYS;
261 cdev->private->cmb_wait = 0;
262 if (cdev->private->state == DEV_STATE_CMFCHANGE) 294 if (cdev->private->state == DEV_STATE_CMFCHANGE)
263 cdev->private->state = DEV_STATE_ONLINE; 295 cdev->private->state = DEV_STATE_ONLINE;
264 } 296 }
265 spin_unlock_irq(cdev->ccwlock); 297 spin_unlock_irq(cdev->ccwlock);
266 } 298 }
267 return s.ret; 299 spin_lock_irq(cdev->ccwlock);
268 300 cdev->private->cmb_wait = NULL;
269out_nowait: 301 ret = set_data->ret;
302out_put:
303 kref_put(&set_data->kref, cmf_set_schib_release);
304out:
270 spin_unlock_irq(cdev->ccwlock); 305 spin_unlock_irq(cdev->ccwlock);
271 return s.ret; 306 return ret;
272} 307}
273 308
274void retry_set_schib(struct ccw_device *cdev) 309void retry_set_schib(struct ccw_device *cdev)
275{ 310{
276 struct set_schib_struct *s; 311 struct set_schib_struct *set_data;
312
313 set_data = cdev->private->cmb_wait;
314 if (!set_data) {
315 WARN_ON(1);
316 return;
317 }
318 kref_get(&set_data->kref);
319 set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
320 set_data->address);
321 wake_up(&set_data->wait);
322 kref_put(&set_data->kref, cmf_set_schib_release);
323}
324
325static int cmf_copy_block(struct ccw_device *cdev)
326{
327 struct subchannel *sch;
328 void *reference_buf;
329 void *hw_block;
330 struct cmb_data *cmb_data;
331
332 sch = to_subchannel(cdev->dev.parent);
333
334 if (stsch(sch->schid, &sch->schib))
335 return -ENODEV;
336
337 if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) {
338 /* Don't copy if a start function is in progress. */
339 if ((!sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED) &&
340 (sch->schib.scsw.actl &
341 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
342 (!sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))
343 return -EBUSY;
344 }
345 cmb_data = cdev->private->cmb;
346 hw_block = cmbops->align(cmb_data->hw_block);
347 if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
348 /* No need to copy. */
349 return 0;
350 reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
351 if (!reference_buf)
352 return -ENOMEM;
353 /* Ensure consistency of block copied from hardware. */
354 do {
355 memcpy(cmb_data->last_block, hw_block, cmb_data->size);
356 memcpy(reference_buf, hw_block, cmb_data->size);
357 } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
358 cmb_data->last_update = get_clock();
359 kfree(reference_buf);
360 return 0;
361}
362
363struct copy_block_struct {
364 wait_queue_head_t wait;
365 int ret;
366 struct kref kref;
367};
368
369static void cmf_copy_block_release(struct kref *kref)
370{
371 struct copy_block_struct *copy_block;
372
373 copy_block = container_of(kref, struct copy_block_struct, kref);
374 kfree(copy_block);
375}
376
377static int cmf_cmb_copy_wait(struct ccw_device *cdev)
378{
379 struct copy_block_struct *copy_block;
380 int ret;
381 unsigned long flags;
382
383 spin_lock_irqsave(cdev->ccwlock, flags);
384 if (!cdev->private->cmb) {
385 ret = -ENODEV;
386 goto out;
387 }
388 copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
389 if (!copy_block) {
390 ret = -ENOMEM;
391 goto out;
392 }
393 init_waitqueue_head(&copy_block->wait);
394 kref_init(&copy_block->kref);
395
396 ret = cmf_copy_block(cdev);
397 if (ret != -EBUSY)
398 goto out_put;
399
400 if (cdev->private->state != DEV_STATE_ONLINE) {
401 ret = -EBUSY;
402 goto out_put;
403 }
404
405 cdev->private->state = DEV_STATE_CMFUPDATE;
406 copy_block->ret = CMF_PENDING;
407 cdev->private->cmb_wait = copy_block;
408
409 spin_unlock_irqrestore(cdev->ccwlock, flags);
410 if (wait_event_interruptible(copy_block->wait,
411 copy_block->ret != CMF_PENDING)) {
412 spin_lock_irqsave(cdev->ccwlock, flags);
413 if (copy_block->ret == CMF_PENDING) {
414 copy_block->ret = -ERESTARTSYS;
415 if (cdev->private->state == DEV_STATE_CMFUPDATE)
416 cdev->private->state = DEV_STATE_ONLINE;
417 }
418 spin_unlock_irqrestore(cdev->ccwlock, flags);
419 }
420 spin_lock_irqsave(cdev->ccwlock, flags);
421 cdev->private->cmb_wait = NULL;
422 ret = copy_block->ret;
423out_put:
424 kref_put(&copy_block->kref, cmf_copy_block_release);
425out:
426 spin_unlock_irqrestore(cdev->ccwlock, flags);
427 return ret;
428}
429
430void cmf_retry_copy_block(struct ccw_device *cdev)
431{
432 struct copy_block_struct *copy_block;
277 433
278 s = cdev->private->cmb_wait; 434 copy_block = cdev->private->cmb_wait;
279 cdev->private->cmb_wait = 0; 435 if (!copy_block) {
280 if (!s) {
281 WARN_ON(1); 436 WARN_ON(1);
282 return; 437 return;
283 } 438 }
284 s->ret = set_schib(cdev, s->mme, s->mbfc, s->address); 439 kref_get(&copy_block->kref);
285 wake_up(&s->wait); 440 copy_block->ret = cmf_copy_block(cdev);
441 wake_up(&copy_block->wait);
442 kref_put(&copy_block->kref, cmf_copy_block_release);
443}
444
445static void cmf_generic_reset(struct ccw_device *cdev)
446{
447 struct cmb_data *cmb_data;
448
449 spin_lock_irq(cdev->ccwlock);
450 cmb_data = cdev->private->cmb;
451 if (cmb_data) {
452 memset(cmb_data->last_block, 0, cmb_data->size);
453 /*
454 * Need to reset hw block as well to make the hardware start
455 * from 0 again.
456 */
457 memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
458 cmb_data->last_update = 0;
459 }
460 cdev->private->cmb_start_time = get_clock();
461 spin_unlock_irq(cdev->ccwlock);
286} 462}
287 463
288/** 464/**
@@ -343,8 +519,8 @@ struct cmb {
343/* insert a single device into the cmb_area list 519/* insert a single device into the cmb_area list
344 * called with cmb_area.lock held from alloc_cmb 520 * called with cmb_area.lock held from alloc_cmb
345 */ 521 */
346static inline int 522static inline int alloc_cmb_single (struct ccw_device *cdev,
347alloc_cmb_single (struct ccw_device *cdev) 523 struct cmb_data *cmb_data)
348{ 524{
349 struct cmb *cmb; 525 struct cmb *cmb;
350 struct ccw_device_private *node; 526 struct ccw_device_private *node;
@@ -358,10 +534,12 @@ alloc_cmb_single (struct ccw_device *cdev)
358 534
359 /* find first unused cmb in cmb_area.mem. 535 /* find first unused cmb in cmb_area.mem.
360 * this is a little tricky: cmb_area.list 536 * this is a little tricky: cmb_area.list
361 * remains sorted by ->cmb pointers */ 537 * remains sorted by ->cmb->hw_data pointers */
362 cmb = cmb_area.mem; 538 cmb = cmb_area.mem;
363 list_for_each_entry(node, &cmb_area.list, cmb_list) { 539 list_for_each_entry(node, &cmb_area.list, cmb_list) {
364 if ((struct cmb*)node->cmb > cmb) 540 struct cmb_data *data;
541 data = node->cmb;
542 if ((struct cmb*)data->hw_block > cmb)
365 break; 543 break;
366 cmb++; 544 cmb++;
367 } 545 }
@@ -372,7 +550,8 @@ alloc_cmb_single (struct ccw_device *cdev)
372 550
373 /* insert new cmb */ 551 /* insert new cmb */
374 list_add_tail(&cdev->private->cmb_list, &node->cmb_list); 552 list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
375 cdev->private->cmb = cmb; 553 cmb_data->hw_block = cmb;
554 cdev->private->cmb = cmb_data;
376 ret = 0; 555 ret = 0;
377out: 556out:
378 spin_unlock_irq(cdev->ccwlock); 557 spin_unlock_irq(cdev->ccwlock);
@@ -385,7 +564,19 @@ alloc_cmb (struct ccw_device *cdev)
385 int ret; 564 int ret;
386 struct cmb *mem; 565 struct cmb *mem;
387 ssize_t size; 566 ssize_t size;
567 struct cmb_data *cmb_data;
568
569 /* Allocate private cmb_data. */
570 cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
571 if (!cmb_data)
572 return -ENOMEM;
388 573
574 cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
575 if (!cmb_data->last_block) {
576 kfree(cmb_data);
577 return -ENOMEM;
578 }
579 cmb_data->size = sizeof(struct cmb);
389 spin_lock(&cmb_area.lock); 580 spin_lock(&cmb_area.lock);
390 581
391 if (!cmb_area.mem) { 582 if (!cmb_area.mem) {
@@ -414,29 +605,36 @@ alloc_cmb (struct ccw_device *cdev)
414 } 605 }
415 606
416 /* do the actual allocation */ 607 /* do the actual allocation */
417 ret = alloc_cmb_single(cdev); 608 ret = alloc_cmb_single(cdev, cmb_data);
418out: 609out:
419 spin_unlock(&cmb_area.lock); 610 spin_unlock(&cmb_area.lock);
420 611 if (ret) {
612 kfree(cmb_data->last_block);
613 kfree(cmb_data);
614 }
421 return ret; 615 return ret;
422} 616}
423 617
424static void 618static void free_cmb(struct ccw_device *cdev)
425free_cmb(struct ccw_device *cdev)
426{ 619{
427 struct ccw_device_private *priv; 620 struct ccw_device_private *priv;
428 621 struct cmb_data *cmb_data;
429 priv = cdev->private;
430 622
431 spin_lock(&cmb_area.lock); 623 spin_lock(&cmb_area.lock);
432 spin_lock_irq(cdev->ccwlock); 624 spin_lock_irq(cdev->ccwlock);
433 625
626 priv = cdev->private;
627
434 if (list_empty(&priv->cmb_list)) { 628 if (list_empty(&priv->cmb_list)) {
435 /* already freed */ 629 /* already freed */
436 goto out; 630 goto out;
437 } 631 }
438 632
633 cmb_data = priv->cmb;
439 priv->cmb = NULL; 634 priv->cmb = NULL;
635 if (cmb_data)
636 kfree(cmb_data->last_block);
637 kfree(cmb_data);
440 list_del_init(&priv->cmb_list); 638 list_del_init(&priv->cmb_list);
441 639
442 if (list_empty(&cmb_area.list)) { 640 if (list_empty(&cmb_area.list)) {
@@ -451,83 +649,97 @@ out:
451 spin_unlock(&cmb_area.lock); 649 spin_unlock(&cmb_area.lock);
452} 650}
453 651
454static int 652static int set_cmb(struct ccw_device *cdev, u32 mme)
455set_cmb(struct ccw_device *cdev, u32 mme)
456{ 653{
457 u16 offset; 654 u16 offset;
655 struct cmb_data *cmb_data;
656 unsigned long flags;
458 657
459 if (!cdev->private->cmb) 658 spin_lock_irqsave(cdev->ccwlock, flags);
659 if (!cdev->private->cmb) {
660 spin_unlock_irqrestore(cdev->ccwlock, flags);
460 return -EINVAL; 661 return -EINVAL;
461 662 }
462 offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0; 663 cmb_data = cdev->private->cmb;
664 offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
665 spin_unlock_irqrestore(cdev->ccwlock, flags);
463 666
464 return set_schib_wait(cdev, mme, 0, offset); 667 return set_schib_wait(cdev, mme, 0, offset);
465} 668}
466 669
467static u64 670static u64 read_cmb (struct ccw_device *cdev, int index)
468read_cmb (struct ccw_device *cdev, int index)
469{ 671{
470 /* yes, we have to put it on the stack 672 struct cmb *cmb;
471 * because the cmb must only be accessed
472 * atomically, e.g. with mvc */
473 struct cmb cmb;
474 unsigned long flags;
475 u32 val; 673 u32 val;
674 int ret;
675 unsigned long flags;
676
677 ret = cmf_cmb_copy_wait(cdev);
678 if (ret < 0)
679 return 0;
476 680
477 spin_lock_irqsave(cdev->ccwlock, flags); 681 spin_lock_irqsave(cdev->ccwlock, flags);
478 if (!cdev->private->cmb) { 682 if (!cdev->private->cmb) {
479 spin_unlock_irqrestore(cdev->ccwlock, flags); 683 ret = 0;
480 return 0; 684 goto out;
481 } 685 }
482 686 cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
483 cmb = *(struct cmb*)cdev->private->cmb;
484 spin_unlock_irqrestore(cdev->ccwlock, flags);
485 687
486 switch (index) { 688 switch (index) {
487 case cmb_ssch_rsch_count: 689 case cmb_ssch_rsch_count:
488 return cmb.ssch_rsch_count; 690 ret = cmb->ssch_rsch_count;
691 goto out;
489 case cmb_sample_count: 692 case cmb_sample_count:
490 return cmb.sample_count; 693 ret = cmb->sample_count;
694 goto out;
491 case cmb_device_connect_time: 695 case cmb_device_connect_time:
492 val = cmb.device_connect_time; 696 val = cmb->device_connect_time;
493 break; 697 break;
494 case cmb_function_pending_time: 698 case cmb_function_pending_time:
495 val = cmb.function_pending_time; 699 val = cmb->function_pending_time;
496 break; 700 break;
497 case cmb_device_disconnect_time: 701 case cmb_device_disconnect_time:
498 val = cmb.device_disconnect_time; 702 val = cmb->device_disconnect_time;
499 break; 703 break;
500 case cmb_control_unit_queuing_time: 704 case cmb_control_unit_queuing_time:
501 val = cmb.control_unit_queuing_time; 705 val = cmb->control_unit_queuing_time;
502 break; 706 break;
503 case cmb_device_active_only_time: 707 case cmb_device_active_only_time:
504 val = cmb.device_active_only_time; 708 val = cmb->device_active_only_time;
505 break; 709 break;
506 default: 710 default:
507 return 0; 711 ret = 0;
712 goto out;
508 } 713 }
509 return time_to_avg_nsec(val, cmb.sample_count); 714 ret = time_to_avg_nsec(val, cmb->sample_count);
715out:
716 spin_unlock_irqrestore(cdev->ccwlock, flags);
717 return ret;
510} 718}
511 719
512static int 720static int readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
513readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
514{ 721{
515 /* yes, we have to put it on the stack 722 struct cmb *cmb;
516 * because the cmb must only be accessed 723 struct cmb_data *cmb_data;
517 * atomically, e.g. with mvc */
518 struct cmb cmb;
519 unsigned long flags;
520 u64 time; 724 u64 time;
725 unsigned long flags;
726 int ret;
521 727
728 ret = cmf_cmb_copy_wait(cdev);
729 if (ret < 0)
730 return ret;
522 spin_lock_irqsave(cdev->ccwlock, flags); 731 spin_lock_irqsave(cdev->ccwlock, flags);
523 if (!cdev->private->cmb) { 732 cmb_data = cdev->private->cmb;
524 spin_unlock_irqrestore(cdev->ccwlock, flags); 733 if (!cmb_data) {
525 return -ENODEV; 734 ret = -ENODEV;
735 goto out;
526 } 736 }
527 737 if (cmb_data->last_update == 0) {
528 cmb = *(struct cmb*)cdev->private->cmb; 738 ret = -EAGAIN;
529 time = get_clock() - cdev->private->cmb_start_time; 739 goto out;
530 spin_unlock_irqrestore(cdev->ccwlock, flags); 740 }
741 cmb = cmb_data->last_block;
742 time = cmb_data->last_update - cdev->private->cmb_start_time;
531 743
532 memset(data, 0, sizeof(struct cmbdata)); 744 memset(data, 0, sizeof(struct cmbdata));
533 745
@@ -538,31 +750,32 @@ readall_cmb (struct ccw_device *cdev, struct cmbdata *data)
538 data->elapsed_time = (time * 1000) >> 12; 750 data->elapsed_time = (time * 1000) >> 12;
539 751
540 /* copy data to new structure */ 752 /* copy data to new structure */
541 data->ssch_rsch_count = cmb.ssch_rsch_count; 753 data->ssch_rsch_count = cmb->ssch_rsch_count;
542 data->sample_count = cmb.sample_count; 754 data->sample_count = cmb->sample_count;
543 755
544 /* time fields are converted to nanoseconds while copying */ 756 /* time fields are converted to nanoseconds while copying */
545 data->device_connect_time = time_to_nsec(cmb.device_connect_time); 757 data->device_connect_time = time_to_nsec(cmb->device_connect_time);
546 data->function_pending_time = time_to_nsec(cmb.function_pending_time); 758 data->function_pending_time = time_to_nsec(cmb->function_pending_time);
547 data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); 759 data->device_disconnect_time =
760 time_to_nsec(cmb->device_disconnect_time);
548 data->control_unit_queuing_time 761 data->control_unit_queuing_time
549 = time_to_nsec(cmb.control_unit_queuing_time); 762 = time_to_nsec(cmb->control_unit_queuing_time);
550 data->device_active_only_time 763 data->device_active_only_time
551 = time_to_nsec(cmb.device_active_only_time); 764 = time_to_nsec(cmb->device_active_only_time);
765 ret = 0;
766out:
767 spin_unlock_irqrestore(cdev->ccwlock, flags);
768 return ret;
769}
552 770
553 return 0; 771static void reset_cmb(struct ccw_device *cdev)
772{
773 cmf_generic_reset(cdev);
554} 774}
555 775
556static void 776static void * align_cmb(void *area)
557reset_cmb(struct ccw_device *cdev)
558{ 777{
559 struct cmb *cmb; 778 return area;
560 spin_lock_irq(cdev->ccwlock);
561 cmb = cdev->private->cmb;
562 if (cmb)
563 memset (cmb, 0, sizeof (*cmb));
564 cdev->private->cmb_start_time = get_clock();
565 spin_unlock_irq(cdev->ccwlock);
566} 779}
567 780
568static struct attribute_group cmf_attr_group; 781static struct attribute_group cmf_attr_group;
@@ -574,6 +787,7 @@ static struct cmb_operations cmbops_basic = {
574 .read = read_cmb, 787 .read = read_cmb,
575 .readall = readall_cmb, 788 .readall = readall_cmb,
576 .reset = reset_cmb, 789 .reset = reset_cmb,
790 .align = align_cmb,
577 .attr_group = &cmf_attr_group, 791 .attr_group = &cmf_attr_group,
578}; 792};
579 793
@@ -610,22 +824,34 @@ static inline struct cmbe* cmbe_align(struct cmbe *c)
610 return (struct cmbe*)addr; 824 return (struct cmbe*)addr;
611} 825}
612 826
613static int 827static int alloc_cmbe (struct ccw_device *cdev)
614alloc_cmbe (struct ccw_device *cdev)
615{ 828{
616 struct cmbe *cmbe; 829 struct cmbe *cmbe;
617 cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL); 830 struct cmb_data *cmb_data;
831 int ret;
832
833 cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL);
618 if (!cmbe) 834 if (!cmbe)
619 return -ENOMEM; 835 return -ENOMEM;
620 836 cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
837 if (!cmb_data) {
838 ret = -ENOMEM;
839 goto out_free;
840 }
841 cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
842 if (!cmb_data->last_block) {
843 ret = -ENOMEM;
844 goto out_free;
845 }
846 cmb_data->size = sizeof(struct cmbe);
621 spin_lock_irq(cdev->ccwlock); 847 spin_lock_irq(cdev->ccwlock);
622 if (cdev->private->cmb) { 848 if (cdev->private->cmb) {
623 kfree(cmbe);
624 spin_unlock_irq(cdev->ccwlock); 849 spin_unlock_irq(cdev->ccwlock);
625 return -EBUSY; 850 ret = -EBUSY;
851 goto out_free;
626 } 852 }
627 853 cmb_data->hw_block = cmbe;
628 cdev->private->cmb = cmbe; 854 cdev->private->cmb = cmb_data;
629 spin_unlock_irq(cdev->ccwlock); 855 spin_unlock_irq(cdev->ccwlock);
630 856
631 /* activate global measurement if this is the first channel */ 857 /* activate global measurement if this is the first channel */
@@ -636,14 +862,24 @@ alloc_cmbe (struct ccw_device *cdev)
636 spin_unlock(&cmb_area.lock); 862 spin_unlock(&cmb_area.lock);
637 863
638 return 0; 864 return 0;
865out_free:
866 if (cmb_data)
867 kfree(cmb_data->last_block);
868 kfree(cmb_data);
869 kfree(cmbe);
870 return ret;
639} 871}
640 872
641static void 873static void free_cmbe (struct ccw_device *cdev)
642free_cmbe (struct ccw_device *cdev)
643{ 874{
875 struct cmb_data *cmb_data;
876
644 spin_lock_irq(cdev->ccwlock); 877 spin_lock_irq(cdev->ccwlock);
645 kfree(cdev->private->cmb); 878 cmb_data = cdev->private->cmb;
646 cdev->private->cmb = NULL; 879 cdev->private->cmb = NULL;
880 if (cmb_data)
881 kfree(cmb_data->last_block);
882 kfree(cmb_data);
647 spin_unlock_irq(cdev->ccwlock); 883 spin_unlock_irq(cdev->ccwlock);
648 884
649 /* deactivate global measurement if this is the last channel */ 885 /* deactivate global measurement if this is the last channel */
@@ -654,89 +890,105 @@ free_cmbe (struct ccw_device *cdev)
654 spin_unlock(&cmb_area.lock); 890 spin_unlock(&cmb_area.lock);
655} 891}
656 892
657static int 893static int set_cmbe(struct ccw_device *cdev, u32 mme)
658set_cmbe(struct ccw_device *cdev, u32 mme)
659{ 894{
660 unsigned long mba; 895 unsigned long mba;
896 struct cmb_data *cmb_data;
897 unsigned long flags;
661 898
662 if (!cdev->private->cmb) 899 spin_lock_irqsave(cdev->ccwlock, flags);
900 if (!cdev->private->cmb) {
901 spin_unlock_irqrestore(cdev->ccwlock, flags);
663 return -EINVAL; 902 return -EINVAL;
664 mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0; 903 }
904 cmb_data = cdev->private->cmb;
905 mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0;
906 spin_unlock_irqrestore(cdev->ccwlock, flags);
665 907
666 return set_schib_wait(cdev, mme, 1, mba); 908 return set_schib_wait(cdev, mme, 1, mba);
667} 909}
668 910
669 911
670u64 912static u64 read_cmbe (struct ccw_device *cdev, int index)
671read_cmbe (struct ccw_device *cdev, int index)
672{ 913{
673 /* yes, we have to put it on the stack 914 struct cmbe *cmb;
674 * because the cmb must only be accessed 915 struct cmb_data *cmb_data;
675 * atomically, e.g. with mvc */
676 struct cmbe cmb;
677 unsigned long flags;
678 u32 val; 916 u32 val;
917 int ret;
918 unsigned long flags;
679 919
680 spin_lock_irqsave(cdev->ccwlock, flags); 920 ret = cmf_cmb_copy_wait(cdev);
681 if (!cdev->private->cmb) { 921 if (ret < 0)
682 spin_unlock_irqrestore(cdev->ccwlock, flags);
683 return 0; 922 return 0;
684 }
685 923
686 cmb = *cmbe_align(cdev->private->cmb); 924 spin_lock_irqsave(cdev->ccwlock, flags);
687 spin_unlock_irqrestore(cdev->ccwlock, flags); 925 cmb_data = cdev->private->cmb;
926 if (!cmb_data) {
927 ret = 0;
928 goto out;
929 }
930 cmb = cmb_data->last_block;
688 931
689 switch (index) { 932 switch (index) {
690 case cmb_ssch_rsch_count: 933 case cmb_ssch_rsch_count:
691 return cmb.ssch_rsch_count; 934 ret = cmb->ssch_rsch_count;
935 goto out;
692 case cmb_sample_count: 936 case cmb_sample_count:
693 return cmb.sample_count; 937 ret = cmb->sample_count;
938 goto out;
694 case cmb_device_connect_time: 939 case cmb_device_connect_time:
695 val = cmb.device_connect_time; 940 val = cmb->device_connect_time;
696 break; 941 break;
697 case cmb_function_pending_time: 942 case cmb_function_pending_time:
698 val = cmb.function_pending_time; 943 val = cmb->function_pending_time;
699 break; 944 break;
700 case cmb_device_disconnect_time: 945 case cmb_device_disconnect_time:
701 val = cmb.device_disconnect_time; 946 val = cmb->device_disconnect_time;
702 break; 947 break;
703 case cmb_control_unit_queuing_time: 948 case cmb_control_unit_queuing_time:
704 val = cmb.control_unit_queuing_time; 949 val = cmb->control_unit_queuing_time;
705 break; 950 break;
706 case cmb_device_active_only_time: 951 case cmb_device_active_only_time:
707 val = cmb.device_active_only_time; 952 val = cmb->device_active_only_time;
708 break; 953 break;
709 case cmb_device_busy_time: 954 case cmb_device_busy_time:
710 val = cmb.device_busy_time; 955 val = cmb->device_busy_time;
711 break; 956 break;
712 case cmb_initial_command_response_time: 957 case cmb_initial_command_response_time:
713 val = cmb.initial_command_response_time; 958 val = cmb->initial_command_response_time;
714 break; 959 break;
715 default: 960 default:
716 return 0; 961 ret = 0;
962 goto out;
717 } 963 }
718 return time_to_avg_nsec(val, cmb.sample_count); 964 ret = time_to_avg_nsec(val, cmb->sample_count);
965out:
966 spin_unlock_irqrestore(cdev->ccwlock, flags);
967 return ret;
719} 968}
720 969
721static int 970static int readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
722readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
723{ 971{
724 /* yes, we have to put it on the stack 972 struct cmbe *cmb;
725 * because the cmb must only be accessed 973 struct cmb_data *cmb_data;
726 * atomically, e.g. with mvc */
727 struct cmbe cmb;
728 unsigned long flags;
729 u64 time; 974 u64 time;
975 unsigned long flags;
976 int ret;
730 977
978 ret = cmf_cmb_copy_wait(cdev);
979 if (ret < 0)
980 return ret;
731 spin_lock_irqsave(cdev->ccwlock, flags); 981 spin_lock_irqsave(cdev->ccwlock, flags);
732 if (!cdev->private->cmb) { 982 cmb_data = cdev->private->cmb;
733 spin_unlock_irqrestore(cdev->ccwlock, flags); 983 if (!cmb_data) {
734 return -ENODEV; 984 ret = -ENODEV;
985 goto out;
735 } 986 }
736 987 if (cmb_data->last_update == 0) {
737 cmb = *cmbe_align(cdev->private->cmb); 988 ret = -EAGAIN;
738 time = get_clock() - cdev->private->cmb_start_time; 989 goto out;
739 spin_unlock_irqrestore(cdev->ccwlock, flags); 990 }
991 time = cmb_data->last_update - cdev->private->cmb_start_time;
740 992
741 memset (data, 0, sizeof(struct cmbdata)); 993 memset (data, 0, sizeof(struct cmbdata));
742 994
@@ -746,35 +998,38 @@ readall_cmbe (struct ccw_device *cdev, struct cmbdata *data)
746 /* conver to nanoseconds */ 998 /* conver to nanoseconds */
747 data->elapsed_time = (time * 1000) >> 12; 999 data->elapsed_time = (time * 1000) >> 12;
748 1000
1001 cmb = cmb_data->last_block;
749 /* copy data to new structure */ 1002 /* copy data to new structure */
750 data->ssch_rsch_count = cmb.ssch_rsch_count; 1003 data->ssch_rsch_count = cmb->ssch_rsch_count;
751 data->sample_count = cmb.sample_count; 1004 data->sample_count = cmb->sample_count;
752 1005
753 /* time fields are converted to nanoseconds while copying */ 1006 /* time fields are converted to nanoseconds while copying */
754 data->device_connect_time = time_to_nsec(cmb.device_connect_time); 1007 data->device_connect_time = time_to_nsec(cmb->device_connect_time);
755 data->function_pending_time = time_to_nsec(cmb.function_pending_time); 1008 data->function_pending_time = time_to_nsec(cmb->function_pending_time);
756 data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); 1009 data->device_disconnect_time =
1010 time_to_nsec(cmb->device_disconnect_time);
757 data->control_unit_queuing_time 1011 data->control_unit_queuing_time
758 = time_to_nsec(cmb.control_unit_queuing_time); 1012 = time_to_nsec(cmb->control_unit_queuing_time);
759 data->device_active_only_time 1013 data->device_active_only_time
760 = time_to_nsec(cmb.device_active_only_time); 1014 = time_to_nsec(cmb->device_active_only_time);
761 data->device_busy_time = time_to_nsec(cmb.device_busy_time); 1015 data->device_busy_time = time_to_nsec(cmb->device_busy_time);
762 data->initial_command_response_time 1016 data->initial_command_response_time
763 = time_to_nsec(cmb.initial_command_response_time); 1017 = time_to_nsec(cmb->initial_command_response_time);
764 1018
765 return 0; 1019 ret = 0;
1020out:
1021 spin_unlock_irqrestore(cdev->ccwlock, flags);
1022 return ret;
766} 1023}
767 1024
768static void 1025static void reset_cmbe(struct ccw_device *cdev)
769reset_cmbe(struct ccw_device *cdev)
770{ 1026{
771 struct cmbe *cmb; 1027 cmf_generic_reset(cdev);
772 spin_lock_irq(cdev->ccwlock); 1028}
773 cmb = cmbe_align(cdev->private->cmb); 1029
774 if (cmb) 1030static void * align_cmbe(void *area)
775 memset (cmb, 0, sizeof (*cmb)); 1031{
776 cdev->private->cmb_start_time = get_clock(); 1032 return cmbe_align(area);
777 spin_unlock_irq(cdev->ccwlock);
778} 1033}
779 1034
780static struct attribute_group cmf_attr_group_ext; 1035static struct attribute_group cmf_attr_group_ext;
@@ -786,6 +1041,7 @@ static struct cmb_operations cmbops_extended = {
786 .read = read_cmbe, 1041 .read = read_cmbe,
787 .readall = readall_cmbe, 1042 .readall = readall_cmbe,
788 .reset = reset_cmbe, 1043 .reset = reset_cmbe,
1044 .align = align_cmbe,
789 .attr_group = &cmf_attr_group_ext, 1045 .attr_group = &cmf_attr_group_ext,
790}; 1046};
791 1047
@@ -803,14 +1059,19 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
803 struct ccw_device *cdev; 1059 struct ccw_device *cdev;
804 long interval; 1060 long interval;
805 unsigned long count; 1061 unsigned long count;
1062 struct cmb_data *cmb_data;
806 1063
807 cdev = to_ccwdev(dev); 1064 cdev = to_ccwdev(dev);
808 interval = get_clock() - cdev->private->cmb_start_time;
809 count = cmf_read(cdev, cmb_sample_count); 1065 count = cmf_read(cdev, cmb_sample_count);
810 if (count) 1066 spin_lock_irq(cdev->ccwlock);
1067 cmb_data = cdev->private->cmb;
1068 if (count) {
1069 interval = cmb_data->last_update -
1070 cdev->private->cmb_start_time;
811 interval /= count; 1071 interval /= count;
812 else 1072 } else
813 interval = -1; 1073 interval = -1;
1074 spin_unlock_irq(cdev->ccwlock);
814 return sprintf(buf, "%ld\n", interval); 1075 return sprintf(buf, "%ld\n", interval);
815} 1076}
816 1077
@@ -823,7 +1084,10 @@ cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char
823 int ret; 1084 int ret;
824 1085
825 ret = cmf_readall(to_ccwdev(dev), &data); 1086 ret = cmf_readall(to_ccwdev(dev), &data);
826 if (ret) 1087 if (ret == -EAGAIN || ret == -ENODEV)
1088 /* No data (yet/currently) available to use for calculation. */
1089 return sprintf(buf, "n/a\n");
1090 else if (ret)
827 return ret; 1091 return ret;
828 1092
829 utilization = data.device_connect_time + 1093 utilization = data.device_connect_time +
@@ -982,6 +1246,13 @@ cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
982 return cmbops->readall(cdev, data); 1246 return cmbops->readall(cdev, data);
983} 1247}
984 1248
1249/* Reenable cmf when a disconnected device becomes available again. */
1250int cmf_reenable(struct ccw_device *cdev)
1251{
1252 cmbops->reset(cdev);
1253 return cmbops->set(cdev, 2);
1254}
1255
985static int __init 1256static int __init
986init_cmf(void) 1257init_cmf(void)
987{ 1258{
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 8e0d1db3dd4e..00be9a5b4acd 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -27,6 +27,7 @@ enum dev_state {
27 DEV_STATE_DISCONNECTED, 27 DEV_STATE_DISCONNECTED,
28 DEV_STATE_DISCONNECTED_SENSE_ID, 28 DEV_STATE_DISCONNECTED_SENSE_ID,
29 DEV_STATE_CMFCHANGE, 29 DEV_STATE_CMFCHANGE,
30 DEV_STATE_CMFUPDATE,
30 /* last element! */ 31 /* last element! */
31 NR_DEV_STATES 32 NR_DEV_STATES
32}; 33};
@@ -118,5 +119,8 @@ int ccw_device_stlck(struct ccw_device *);
118void ccw_device_set_timeout(struct ccw_device *, int); 119void ccw_device_set_timeout(struct ccw_device *, int);
119extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 120extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
120 121
122/* Channel measurement facility related */
121void retry_set_schib(struct ccw_device *cdev); 123void retry_set_schib(struct ccw_device *cdev);
124void cmf_retry_copy_block(struct ccw_device *);
125int cmf_reenable(struct ccw_device *);
122#endif 126#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 5ec8ef1df9fa..7d0dd72635eb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -336,8 +336,11 @@ ccw_device_oper_notify(void *data)
336 if (!ret) 336 if (!ret)
337 /* Driver doesn't want device back. */ 337 /* Driver doesn't want device back. */
338 ccw_device_do_unreg_rereg((void *)cdev); 338 ccw_device_do_unreg_rereg((void *)cdev);
339 else 339 else {
340 /* Reenable channel measurements, if needed. */
341 cmf_reenable(cdev);
340 wake_up(&cdev->private->wait_q); 342 wake_up(&cdev->private->wait_q);
343 }
341} 344}
342 345
343/* 346/*
@@ -1095,6 +1098,13 @@ ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1095 dev_fsm_event(cdev, dev_event); 1098 dev_fsm_event(cdev, dev_event);
1096} 1099}
1097 1100
1101static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1102 enum dev_event dev_event)
1103{
1104 cmf_retry_copy_block(cdev);
1105 cdev->private->state = DEV_STATE_ONLINE;
1106 dev_fsm_event(cdev, dev_event);
1107}
1098 1108
1099static void 1109static void
1100ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 1110ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
@@ -1249,6 +1259,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1249 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1259 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
1250 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1260 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
1251 }, 1261 },
1262 [DEV_STATE_CMFUPDATE] = {
1263 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
1264 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
1265 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1266 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1267 },
1252}; 1268};
1253 1269
1254/* 1270/*
diff --git a/include/asm-s390/cmb.h b/include/asm-s390/cmb.h
index 2d09950a9c11..241756f80df3 100644
--- a/include/asm-s390/cmb.h
+++ b/include/asm-s390/cmb.h
@@ -44,10 +44,6 @@ struct cmbdata {
44#define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER,32) 44#define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER,32)
45/* enable channel measurement */ 45/* enable channel measurement */
46#define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER,33) 46#define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER,33)
47/* reset channel measurement block */
48#define BIODASDRESETCMB _IO(DASD_IOCTL_LETTER,34)
49/* read channel measurement data */
50#define BIODASDREADCMB _IOWR(DASD_IOCTL_LETTER,32,__u64)
51/* read channel measurement data */ 47/* read channel measurement data */
52#define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata) 48#define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata)
53 49