aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/device_ops.c
diff options
context:
space:
mode:
authorPeter Oberparleiter <peter.oberparleiter@de.ibm.com>2009-12-07 06:51:32 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2009-12-07 06:51:32 -0500
commitd7d12ef2befac4fed0dccaddff11338b654804df (patch)
tree1563b299e609024844affbc3ebba99c0718db238 /drivers/s390/cio/device_ops.c
parent52ef0608e3ee4a511725e443c4b572fece22b353 (diff)
[S390] cio: make steal lock procedure more robust
An Unconditional Reserve + Release operation (steal lock) for a boxed device may fail when encountering special error cases (e.g. unit checks or path errors). Fix this by using the more robust ccw_request infrastructure for performing the steal lock CCW program. Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/device_ops.c')
-rw-r--r--drivers/s390/cio/device_ops.c112
1 files changed, 52 insertions, 60 deletions
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index d4be16acebe4..6da84543dfe9 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/completion.h>
14 15
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include <asm/idals.h> 17#include <asm/idals.h>
@@ -504,74 +505,65 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
504 return sch->lpm; 505 return sch->lpm;
505} 506}
506 507
507/* 508struct stlck_data {
508 * Try to break the lock on a boxed device. 509 struct completion done;
509 */ 510 int rc;
510int 511};
511ccw_device_stlck(struct ccw_device *cdev)
512{
513 void *buf, *buf2;
514 unsigned long flags;
515 struct subchannel *sch;
516 int ret;
517 512
518 if (!cdev) 513void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
519 return -ENODEV; 514{
515 struct stlck_data *sdata = data;
520 516
521 if (cdev->drv && !cdev->private->options.force) 517 sdata->rc = rc;
522 return -EINVAL; 518 complete(&sdata->done);
519}
523 520
524 sch = to_subchannel(cdev->dev.parent); 521/*
525 522 * Perform unconditional reserve + release.
526 CIO_TRACE_EVENT(2, "stl lock"); 523 */
527 CIO_TRACE_EVENT(2, dev_name(&cdev->dev)); 524int ccw_device_stlck(struct ccw_device *cdev)
525{
526 struct subchannel *sch = to_subchannel(cdev->dev.parent);
527 struct stlck_data data;
528 u8 *buffer;
529 int rc;
528 530
529 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 531 /* Check if steal lock operation is valid for this device. */
530 if (!buf) 532 if (cdev->drv) {
531 return -ENOMEM; 533 if (!cdev->private->options.force)
532 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 534 return -EINVAL;
533 if (!buf2) {
534 kfree(buf);
535 return -ENOMEM;
536 } 535 }
537 spin_lock_irqsave(sch->lock, flags); 536 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
538 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 537 if (!buffer)
539 if (ret) 538 return -ENOMEM;
540 goto out_unlock; 539 init_completion(&data.done);
541 /* 540 data.rc = -EIO;
542 * Setup ccw. We chain an unconditional reserve and a release so we 541 spin_lock_irq(sch->lock);
543 * only break the lock. 542 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
544 */ 543 if (rc)
545 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
546 cdev->private->iccws[0].cda = (__u32) __pa(buf);
547 cdev->private->iccws[0].count = 32;
548 cdev->private->iccws[0].flags = CCW_FLAG_CC;
549 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
550 cdev->private->iccws[1].cda = (__u32) __pa(buf2);
551 cdev->private->iccws[1].count = 32;
552 cdev->private->iccws[1].flags = 0;
553 ret = cio_start(sch, cdev->private->iccws, 0);
554 if (ret) {
555 cio_disable_subchannel(sch); //FIXME: return code?
556 goto out_unlock; 544 goto out_unlock;
545 /* Perform operation. */
546 cdev->private->state = DEV_STATE_STEAL_LOCK,
547 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
548 spin_unlock_irq(sch->lock);
549 /* Wait for operation to finish. */
550 if (wait_for_completion_interruptible(&data.done)) {
551 /* Got a signal. */
552 spin_lock_irq(sch->lock);
553 ccw_request_cancel(cdev);
554 spin_unlock_irq(sch->lock);
555 wait_for_completion(&data.done);
557 } 556 }
558 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 557 rc = data.rc;
559 spin_unlock_irqrestore(sch->lock, flags); 558 /* Check results. */
560 wait_event(cdev->private->wait_q, 559 spin_lock_irq(sch->lock);
561 cdev->private->irb.scsw.cmd.actl == 0); 560 cio_disable_subchannel(sch);
562 spin_lock_irqsave(sch->lock, flags); 561 cdev->private->state = DEV_STATE_BOXED;
563 cio_disable_subchannel(sch); //FIXME: return code?
564 if ((cdev->private->irb.scsw.cmd.dstat !=
565 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
566 (cdev->private->irb.scsw.cmd.cstat != 0))
567 ret = -EIO;
568 /* Clear irb. */
569 memset(&cdev->private->irb, 0, sizeof(struct irb));
570out_unlock: 562out_unlock:
571 kfree(buf); 563 spin_unlock_irq(sch->lock);
572 kfree(buf2); 564 kfree(buffer);
573 spin_unlock_irqrestore(sch->lock, flags); 565
574 return ret; 566 return rc;
575} 567}
576 568
577void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 569void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)