diff options
Diffstat (limited to 'drivers/s390/cio/device_fsm.c')
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 210 |
1 files changed, 71 insertions, 139 deletions
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index e268d5a77c12..8b5fe57fb2f3 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -2,8 +2,7 @@ | |||
2 | * drivers/s390/cio/device_fsm.c | 2 | * drivers/s390/cio/device_fsm.c |
3 | * finite state machine for device handling | 3 | * finite state machine for device handling |
4 | * | 4 | * |
5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 2002,2008 |
6 | * IBM Corporation | ||
7 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) | 6 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
9 | */ | 8 | */ |
@@ -27,65 +26,6 @@ | |||
27 | 26 | ||
28 | static int timeout_log_enabled; | 27 | static int timeout_log_enabled; |
29 | 28 | ||
30 | int | ||
31 | device_is_online(struct subchannel *sch) | ||
32 | { | ||
33 | struct ccw_device *cdev; | ||
34 | |||
35 | cdev = sch_get_cdev(sch); | ||
36 | if (!cdev) | ||
37 | return 0; | ||
38 | return (cdev->private->state == DEV_STATE_ONLINE); | ||
39 | } | ||
40 | |||
41 | int | ||
42 | device_is_disconnected(struct subchannel *sch) | ||
43 | { | ||
44 | struct ccw_device *cdev; | ||
45 | |||
46 | cdev = sch_get_cdev(sch); | ||
47 | if (!cdev) | ||
48 | return 0; | ||
49 | return (cdev->private->state == DEV_STATE_DISCONNECTED || | ||
50 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); | ||
51 | } | ||
52 | |||
53 | void | ||
54 | device_set_disconnected(struct subchannel *sch) | ||
55 | { | ||
56 | struct ccw_device *cdev; | ||
57 | |||
58 | cdev = sch_get_cdev(sch); | ||
59 | if (!cdev) | ||
60 | return; | ||
61 | ccw_device_set_timeout(cdev, 0); | ||
62 | cdev->private->flags.fake_irb = 0; | ||
63 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
64 | if (cdev->online) | ||
65 | ccw_device_schedule_recovery(); | ||
66 | } | ||
67 | |||
68 | void device_set_intretry(struct subchannel *sch) | ||
69 | { | ||
70 | struct ccw_device *cdev; | ||
71 | |||
72 | cdev = sch_get_cdev(sch); | ||
73 | if (!cdev) | ||
74 | return; | ||
75 | cdev->private->flags.intretry = 1; | ||
76 | } | ||
77 | |||
78 | int device_trigger_verify(struct subchannel *sch) | ||
79 | { | ||
80 | struct ccw_device *cdev; | ||
81 | |||
82 | cdev = sch_get_cdev(sch); | ||
83 | if (!cdev || !cdev->online) | ||
84 | return -EINVAL; | ||
85 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int __init ccw_timeout_log_setup(char *unused) | 29 | static int __init ccw_timeout_log_setup(char *unused) |
90 | { | 30 | { |
91 | timeout_log_enabled = 1; | 31 | timeout_log_enabled = 1; |
@@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev) | |||
99 | struct schib schib; | 39 | struct schib schib; |
100 | struct subchannel *sch; | 40 | struct subchannel *sch; |
101 | struct io_subchannel_private *private; | 41 | struct io_subchannel_private *private; |
42 | union orb *orb; | ||
102 | int cc; | 43 | int cc; |
103 | 44 | ||
104 | sch = to_subchannel(cdev->dev.parent); | 45 | sch = to_subchannel(cdev->dev.parent); |
105 | private = to_io_private(sch); | 46 | private = to_io_private(sch); |
47 | orb = &private->orb; | ||
106 | cc = stsch(sch->schid, &schib); | 48 | cc = stsch(sch->schid, &schib); |
107 | 49 | ||
108 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " | 50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " |
109 | "device information:\n", get_clock()); | 51 | "device information:\n", get_clock()); |
110 | printk(KERN_WARNING "cio: orb:\n"); | 52 | printk(KERN_WARNING "cio: orb:\n"); |
111 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | 53 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, |
112 | &private->orb, sizeof(private->orb), 0); | 54 | orb, sizeof(*orb), 0); |
113 | printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); | 55 | printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); |
114 | printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); | 56 | printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); |
115 | printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " | 57 | printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " |
116 | "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); | 58 | "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); |
117 | 59 | ||
118 | if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || | 60 | if (orb->tm.b) { |
119 | (void *)(addr_t)private->orb.cpa == cdev->private->iccws) | 61 | printk(KERN_WARNING "cio: orb indicates transport mode\n"); |
120 | printk(KERN_WARNING "cio: last channel program (intern):\n"); | 62 | printk(KERN_WARNING "cio: last tcw:\n"); |
121 | else | 63 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, |
122 | printk(KERN_WARNING "cio: last channel program:\n"); | 64 | (void *)(addr_t)orb->tm.tcw, |
123 | 65 | sizeof(struct tcw), 0); | |
124 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | 66 | } else { |
125 | (void *)(addr_t)private->orb.cpa, | 67 | printk(KERN_WARNING "cio: orb indicates command mode\n"); |
126 | sizeof(struct ccw1), 0); | 68 | if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || |
69 | (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) | ||
70 | printk(KERN_WARNING "cio: last channel program " | ||
71 | "(intern):\n"); | ||
72 | else | ||
73 | printk(KERN_WARNING "cio: last channel program:\n"); | ||
74 | |||
75 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | ||
76 | (void *)(addr_t)orb->cmd.cpa, | ||
77 | sizeof(struct ccw1), 0); | ||
78 | } | ||
127 | printk(KERN_WARNING "cio: ccw device state: %d\n", | 79 | printk(KERN_WARNING "cio: ccw device state: %d\n", |
128 | cdev->private->state); | 80 | cdev->private->state); |
129 | printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); | 81 | printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); |
@@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires) | |||
171 | add_timer(&cdev->private->timer); | 123 | add_timer(&cdev->private->timer); |
172 | } | 124 | } |
173 | 125 | ||
174 | /* Kill any pending timers after machine check. */ | ||
175 | void | ||
176 | device_kill_pending_timer(struct subchannel *sch) | ||
177 | { | ||
178 | struct ccw_device *cdev; | ||
179 | |||
180 | cdev = sch_get_cdev(sch); | ||
181 | if (!cdev) | ||
182 | return; | ||
183 | ccw_device_set_timeout(cdev, 0); | ||
184 | } | ||
185 | |||
186 | /* | 126 | /* |
187 | * Cancel running i/o. This is called repeatedly since halt/clear are | 127 | * Cancel running i/o. This is called repeatedly since halt/clear are |
188 | * asynchronous operations. We do one try with cio_cancel, two tries | 128 | * asynchronous operations. We do one try with cio_cancel, two tries |
@@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
205 | /* Not operational -> done. */ | 145 | /* Not operational -> done. */ |
206 | return 0; | 146 | return 0; |
207 | /* Stage 1: cancel io. */ | 147 | /* Stage 1: cancel io. */ |
208 | if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && | 148 | if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && |
209 | !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { | 149 | !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { |
210 | ret = cio_cancel(sch); | 150 | if (!scsw_is_tm(&sch->schib.scsw)) { |
211 | if (ret != -EINVAL) | 151 | ret = cio_cancel(sch); |
212 | return ret; | 152 | if (ret != -EINVAL) |
213 | /* cancel io unsuccessful. From now on it is asynchronous. */ | 153 | return ret; |
154 | } | ||
155 | /* cancel io unsuccessful or not applicable (transport mode). | ||
156 | * Continue with asynchronous instructions. */ | ||
214 | cdev->private->iretry = 3; /* 3 halt retries. */ | 157 | cdev->private->iretry = 3; /* 3 halt retries. */ |
215 | } | 158 | } |
216 | if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { | 159 | if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { |
217 | /* Stage 2: halt io. */ | 160 | /* Stage 2: halt io. */ |
218 | if (cdev->private->iretry) { | 161 | if (cdev->private->iretry) { |
219 | cdev->private->iretry--; | 162 | cdev->private->iretry--; |
@@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) | |||
388 | } | 331 | } |
389 | } | 332 | } |
390 | 333 | ||
334 | int ccw_device_notify(struct ccw_device *cdev, int event) | ||
335 | { | ||
336 | if (!cdev->drv) | ||
337 | return 0; | ||
338 | if (!cdev->online) | ||
339 | return 0; | ||
340 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | ||
341 | } | ||
342 | |||
391 | static void | 343 | static void |
392 | ccw_device_oper_notify(struct work_struct *work) | 344 | ccw_device_oper_notify(struct work_struct *work) |
393 | { | 345 | { |
394 | struct ccw_device_private *priv; | 346 | struct ccw_device_private *priv; |
395 | struct ccw_device *cdev; | 347 | struct ccw_device *cdev; |
396 | struct subchannel *sch; | ||
397 | int ret; | 348 | int ret; |
398 | unsigned long flags; | ||
399 | 349 | ||
400 | priv = container_of(work, struct ccw_device_private, kick_work); | 350 | priv = container_of(work, struct ccw_device_private, kick_work); |
401 | cdev = priv->cdev; | 351 | cdev = priv->cdev; |
402 | spin_lock_irqsave(cdev->ccwlock, flags); | 352 | ret = ccw_device_notify(cdev, CIO_OPER); |
403 | sch = to_subchannel(cdev->dev.parent); | ||
404 | if (sch->driver && sch->driver->notify) { | ||
405 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
406 | ret = sch->driver->notify(sch, CIO_OPER); | ||
407 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
408 | } else | ||
409 | ret = 0; | ||
410 | if (ret) { | 353 | if (ret) { |
411 | /* Reenable channel measurements, if needed. */ | 354 | /* Reenable channel measurements, if needed. */ |
412 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
413 | cmf_reenable(cdev); | 355 | cmf_reenable(cdev); |
414 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
415 | wake_up(&cdev->private->wait_q); | 356 | wake_up(&cdev->private->wait_q); |
416 | } | 357 | } else |
417 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
418 | if (!ret) | ||
419 | /* Driver doesn't want device back. */ | 358 | /* Driver doesn't want device back. */ |
420 | ccw_device_do_unreg_rereg(work); | 359 | ccw_device_do_unreg_rereg(work); |
421 | } | 360 | } |
@@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
621 | /* Deliver fake irb to device driver, if needed. */ | 560 | /* Deliver fake irb to device driver, if needed. */ |
622 | if (cdev->private->flags.fake_irb) { | 561 | if (cdev->private->flags.fake_irb) { |
623 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 562 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
624 | cdev->private->irb.scsw.cc = 1; | 563 | cdev->private->irb.scsw.cmd.cc = 1; |
625 | cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; | 564 | cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; |
626 | cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; | 565 | cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; |
627 | cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; | 566 | cdev->private->irb.scsw.cmd.stctl = |
567 | SCSW_STCTL_STATUS_PEND; | ||
628 | cdev->private->flags.fake_irb = 0; | 568 | cdev->private->flags.fake_irb = 0; |
629 | if (cdev->handler) | 569 | if (cdev->handler) |
630 | cdev->handler(cdev, cdev->private->intparm, | 570 | cdev->handler(cdev, cdev->private->intparm, |
@@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev) | |||
718 | sch = to_subchannel(cdev->dev.parent); | 658 | sch = to_subchannel(cdev->dev.parent); |
719 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) | 659 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) |
720 | return -ENODEV; | 660 | return -ENODEV; |
721 | if (cdev->private->state != DEV_STATE_ONLINE) { | 661 | if (scsw_actl(&sch->schib.scsw) != 0) |
722 | if (sch->schib.scsw.actl != 0) | ||
723 | return -EBUSY; | ||
724 | return -EINVAL; | ||
725 | } | ||
726 | if (sch->schib.scsw.actl != 0) | ||
727 | return -EBUSY; | 662 | return -EBUSY; |
663 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
664 | return -EINVAL; | ||
728 | /* Are we doing path grouping? */ | 665 | /* Are we doing path grouping? */ |
729 | if (!cdev->private->options.pgroup) { | 666 | if (!cdev->private->options.pgroup) { |
730 | /* No, set state offline immediately. */ | 667 | /* No, set state offline immediately. */ |
@@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
799 | */ | 736 | */ |
800 | stsch(sch->schid, &sch->schib); | 737 | stsch(sch->schid, &sch->schib); |
801 | 738 | ||
802 | if (sch->schib.scsw.actl != 0 || | 739 | if (scsw_actl(&sch->schib.scsw) != 0 || |
803 | (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || | 740 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || |
804 | (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { | 741 | (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { |
805 | /* | 742 | /* |
806 | * No final status yet or final status not yet delivered | 743 | * No final status yet or final status not yet delivered |
807 | * to the device driver. Can't do path verfication now, | 744 | * to the device driver. Can't do path verfication now, |
@@ -823,13 +760,13 @@ static void | |||
823 | ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | 760 | ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) |
824 | { | 761 | { |
825 | struct irb *irb; | 762 | struct irb *irb; |
763 | int is_cmd; | ||
826 | 764 | ||
827 | irb = (struct irb *) __LC_IRB; | 765 | irb = (struct irb *) __LC_IRB; |
766 | is_cmd = !scsw_is_tm(&irb->scsw); | ||
828 | /* Check for unsolicited interrupt. */ | 767 | /* Check for unsolicited interrupt. */ |
829 | if ((irb->scsw.stctl == | 768 | if (!scsw_is_solicited(&irb->scsw)) { |
830 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) | 769 | if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
831 | && (!irb->scsw.cc)) { | ||
832 | if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | ||
833 | !irb->esw.esw0.erw.cons) { | 770 | !irb->esw.esw0.erw.cons) { |
834 | /* Unit check but no sense data. Need basic sense. */ | 771 | /* Unit check but no sense data. Need basic sense. */ |
835 | if (ccw_device_do_sense(cdev, irb) != 0) | 772 | if (ccw_device_do_sense(cdev, irb) != 0) |
@@ -848,7 +785,7 @@ call_handler_unsol: | |||
848 | } | 785 | } |
849 | /* Accumulate status and find out if a basic sense is needed. */ | 786 | /* Accumulate status and find out if a basic sense is needed. */ |
850 | ccw_device_accumulate_irb(cdev, irb); | 787 | ccw_device_accumulate_irb(cdev, irb); |
851 | if (cdev->private->flags.dosense) { | 788 | if (is_cmd && cdev->private->flags.dosense) { |
852 | if (ccw_device_do_sense(cdev, irb) == 0) { | 789 | if (ccw_device_do_sense(cdev, irb) == 0) { |
853 | cdev->private->state = DEV_STATE_W4SENSE; | 790 | cdev->private->state = DEV_STATE_W4SENSE; |
854 | } | 791 | } |
@@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
892 | 829 | ||
893 | irb = (struct irb *) __LC_IRB; | 830 | irb = (struct irb *) __LC_IRB; |
894 | /* Check for unsolicited interrupt. */ | 831 | /* Check for unsolicited interrupt. */ |
895 | if (irb->scsw.stctl == | 832 | if (scsw_stctl(&irb->scsw) == |
896 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 833 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
897 | if (irb->scsw.cc == 1) | 834 | if (scsw_cc(&irb->scsw) == 1) |
898 | /* Basic sense hasn't started. Try again. */ | 835 | /* Basic sense hasn't started. Try again. */ |
899 | ccw_device_do_sense(cdev, irb); | 836 | ccw_device_do_sense(cdev, irb); |
900 | else { | 837 | else { |
@@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
912 | * only deliver the halt/clear interrupt to the device driver as if it | 849 | * only deliver the halt/clear interrupt to the device driver as if it |
913 | * had killed the original request. | 850 | * had killed the original request. |
914 | */ | 851 | */ |
915 | if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { | 852 | if (scsw_fctl(&irb->scsw) & |
853 | (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { | ||
916 | /* Retry Basic Sense if requested. */ | 854 | /* Retry Basic Sense if requested. */ |
917 | if (cdev->private->flags.intretry) { | 855 | if (cdev->private->flags.intretry) { |
918 | cdev->private->flags.intretry = 0; | 856 | cdev->private->flags.intretry = 0; |
@@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
986 | ERR_PTR(-EIO)); | 924 | ERR_PTR(-EIO)); |
987 | } | 925 | } |
988 | 926 | ||
989 | void device_kill_io(struct subchannel *sch) | 927 | void ccw_device_kill_io(struct ccw_device *cdev) |
990 | { | 928 | { |
991 | int ret; | 929 | int ret; |
992 | struct ccw_device *cdev; | ||
993 | 930 | ||
994 | cdev = sch_get_cdev(sch); | ||
995 | ret = ccw_device_cancel_halt_clear(cdev); | 931 | ret = ccw_device_cancel_halt_clear(cdev); |
996 | if (ret == -EBUSY) { | 932 | if (ret == -EBUSY) { |
997 | ccw_device_set_timeout(cdev, 3*HZ); | 933 | ccw_device_set_timeout(cdev, 3*HZ); |
@@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) | |||
1021 | case DEV_EVENT_INTERRUPT: | 957 | case DEV_EVENT_INTERRUPT: |
1022 | irb = (struct irb *) __LC_IRB; | 958 | irb = (struct irb *) __LC_IRB; |
1023 | /* Check for unsolicited interrupt. */ | 959 | /* Check for unsolicited interrupt. */ |
1024 | if ((irb->scsw.stctl == | 960 | if ((scsw_stctl(&irb->scsw) == |
1025 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && | 961 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && |
1026 | (!irb->scsw.cc)) | 962 | (!scsw_cc(&irb->scsw))) |
1027 | /* FIXME: we should restart stlck here, but this | 963 | /* FIXME: we should restart stlck here, but this |
1028 | * is extremely unlikely ... */ | 964 | * is extremely unlikely ... */ |
1029 | goto out_wakeup; | 965 | goto out_wakeup; |
@@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) | |||
1055 | ccw_device_sense_id_start(cdev); | 991 | ccw_device_sense_id_start(cdev); |
1056 | } | 992 | } |
1057 | 993 | ||
1058 | void | 994 | void ccw_device_trigger_reprobe(struct ccw_device *cdev) |
1059 | device_trigger_reprobe(struct subchannel *sch) | ||
1060 | { | 995 | { |
1061 | struct ccw_device *cdev; | 996 | struct subchannel *sch; |
1062 | 997 | ||
1063 | cdev = sch_get_cdev(sch); | ||
1064 | if (!cdev) | ||
1065 | return; | ||
1066 | if (cdev->private->state != DEV_STATE_DISCONNECTED) | 998 | if (cdev->private->state != DEV_STATE_DISCONNECTED) |
1067 | return; | 999 | return; |
1068 | 1000 | ||
1001 | sch = to_subchannel(cdev->dev.parent); | ||
1069 | /* Update some values. */ | 1002 | /* Update some values. */ |
1070 | if (stsch(sch->schid, &sch->schib)) | 1003 | if (stsch(sch->schid, &sch->schib)) |
1071 | return; | 1004 | return; |
@@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch) | |||
1081 | sch->schib.pmcw.ena = 0; | 1014 | sch->schib.pmcw.ena = 0; |
1082 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 1015 | if ((sch->lpm & (sch->lpm - 1)) != 0) |
1083 | sch->schib.pmcw.mp = 1; | 1016 | sch->schib.pmcw.mp = 1; |
1084 | sch->schib.pmcw.intparm = (u32)(addr_t)sch; | ||
1085 | /* We should also udate ssd info, but this has to wait. */ | 1017 | /* We should also udate ssd info, but this has to wait. */ |
1086 | /* Check if this is another device which appeared on the same sch. */ | 1018 | /* Check if this is another device which appeared on the same sch. */ |
1087 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1019 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |