diff options
author | Stefan Bader <shbader@de.ibm.com> | 2007-04-27 10:01:33 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-04-27 10:01:40 -0400 |
commit | 387b734fc2b55f776b192c7afdfd892ba42347d4 (patch) | |
tree | 04dd6d5072fd66bc4d8d644d1a551ce1464a3b37 /drivers/s390/cio/chsc.c | |
parent | cfbe9bb2fb5de1da58d351432a9465c22d6d3ee5 (diff) |
[S390] cio: Re-start path verification after aborting internal I/O.
Path verification triggered by changes to the available CHPIDs will be
interrupted by another change but not re-started. This results in an
invalid path mask.
To solve this make sure to completely re-start path verification when
changing the available paths.
Signed-off-by: Stefan Bader <shbader@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r-- | drivers/s390/cio/chsc.c | 108 |
1 files changed, 54 insertions, 54 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 3dec460bba27..02615eb43984 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -178,6 +178,38 @@ css_get_ssd_info(struct subchannel *sch) | |||
178 | return ret; | 178 | return ret; |
179 | } | 179 | } |
180 | 180 | ||
181 | static int check_for_io_on_path(struct subchannel *sch, int mask) | ||
182 | { | ||
183 | int cc; | ||
184 | |||
185 | cc = stsch(sch->schid, &sch->schib); | ||
186 | if (cc) | ||
187 | return 0; | ||
188 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) | ||
189 | return 1; | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static void terminate_internal_io(struct subchannel *sch) | ||
194 | { | ||
195 | if (cio_clear(sch)) { | ||
196 | /* Recheck device in case clear failed. */ | ||
197 | sch->lpm = 0; | ||
198 | if (device_trigger_verify(sch) != 0) { | ||
199 | if(css_enqueue_subchannel_slow(sch->schid)) { | ||
200 | css_clear_subchannel_slow_list(); | ||
201 | need_rescan = 1; | ||
202 | } | ||
203 | } | ||
204 | return; | ||
205 | } | ||
206 | /* Request retry of internal operation. */ | ||
207 | device_set_intretry(sch); | ||
208 | /* Call handler. */ | ||
209 | if (sch->driver && sch->driver->termination) | ||
210 | sch->driver->termination(&sch->dev); | ||
211 | } | ||
212 | |||
181 | static int | 213 | static int |
182 | s390_subchannel_remove_chpid(struct device *dev, void *data) | 214 | s390_subchannel_remove_chpid(struct device *dev, void *data) |
183 | { | 215 | { |
@@ -208,37 +240,33 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
208 | if (sch->schib.pmcw.pim == 0x80) | 240 | if (sch->schib.pmcw.pim == 0x80) |
209 | goto out_unreg; | 241 | goto out_unreg; |
210 | 242 | ||
211 | if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && | 243 | if (check_for_io_on_path(sch, mask)) { |
212 | (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && | 244 | if (device_is_online(sch)) |
213 | (sch->schib.pmcw.lpum == mask)) { | 245 | device_kill_io(sch); |
214 | int cc; | 246 | else { |
215 | 247 | terminate_internal_io(sch); | |
216 | cc = cio_clear(sch); | 248 | /* Re-start path verification. */ |
217 | if (cc == -ENODEV) | 249 | if (sch->driver && sch->driver->verify) |
250 | sch->driver->verify(&sch->dev); | ||
251 | } | ||
252 | } else { | ||
253 | /* trigger path verification. */ | ||
254 | if (sch->driver && sch->driver->verify) | ||
255 | sch->driver->verify(&sch->dev); | ||
256 | else if (sch->lpm == mask) | ||
218 | goto out_unreg; | 257 | goto out_unreg; |
219 | /* Request retry of internal operation. */ | ||
220 | device_set_intretry(sch); | ||
221 | /* Call handler. */ | ||
222 | if (sch->driver && sch->driver->termination) | ||
223 | sch->driver->termination(&sch->dev); | ||
224 | goto out_unlock; | ||
225 | } | 258 | } |
226 | 259 | ||
227 | /* trigger path verification. */ | ||
228 | if (sch->driver && sch->driver->verify) | ||
229 | sch->driver->verify(&sch->dev); | ||
230 | else if (sch->lpm == mask) | ||
231 | goto out_unreg; | ||
232 | out_unlock: | ||
233 | spin_unlock_irq(sch->lock); | 260 | spin_unlock_irq(sch->lock); |
234 | return 0; | 261 | return 0; |
262 | |||
235 | out_unreg: | 263 | out_unreg: |
236 | spin_unlock_irq(sch->lock); | ||
237 | sch->lpm = 0; | 264 | sch->lpm = 0; |
238 | if (css_enqueue_subchannel_slow(sch->schid)) { | 265 | if (css_enqueue_subchannel_slow(sch->schid)) { |
239 | css_clear_subchannel_slow_list(); | 266 | css_clear_subchannel_slow_list(); |
240 | need_rescan = 1; | 267 | need_rescan = 1; |
241 | } | 268 | } |
269 | spin_unlock_irq(sch->lock); | ||
242 | return 0; | 270 | return 0; |
243 | } | 271 | } |
244 | 272 | ||
@@ -683,38 +711,6 @@ int chsc_chp_online(struct chp_id chpid) | |||
683 | return rc; | 711 | return rc; |
684 | } | 712 | } |
685 | 713 | ||
686 | static int check_for_io_on_path(struct subchannel *sch, int index) | ||
687 | { | ||
688 | int cc; | ||
689 | |||
690 | cc = stsch(sch->schid, &sch->schib); | ||
691 | if (cc) | ||
692 | return 0; | ||
693 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) | ||
694 | return 1; | ||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | static void terminate_internal_io(struct subchannel *sch) | ||
699 | { | ||
700 | if (cio_clear(sch)) { | ||
701 | /* Recheck device in case clear failed. */ | ||
702 | sch->lpm = 0; | ||
703 | if (device_trigger_verify(sch) != 0) { | ||
704 | if(css_enqueue_subchannel_slow(sch->schid)) { | ||
705 | css_clear_subchannel_slow_list(); | ||
706 | need_rescan = 1; | ||
707 | } | ||
708 | } | ||
709 | return; | ||
710 | } | ||
711 | /* Request retry of internal operation. */ | ||
712 | device_set_intretry(sch); | ||
713 | /* Call handler. */ | ||
714 | if (sch->driver && sch->driver->termination) | ||
715 | sch->driver->termination(&sch->dev); | ||
716 | } | ||
717 | |||
718 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, | 714 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
719 | struct chp_id chpid, int on) | 715 | struct chp_id chpid, int on) |
720 | { | 716 | { |
@@ -741,13 +737,17 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch, | |||
741 | } | 737 | } |
742 | sch->opm &= ~(0x80 >> chp); | 738 | sch->opm &= ~(0x80 >> chp); |
743 | sch->lpm &= ~(0x80 >> chp); | 739 | sch->lpm &= ~(0x80 >> chp); |
744 | if (check_for_io_on_path(sch, chp)) { | 740 | if (check_for_io_on_path(sch, (0x80 >> chp))) { |
745 | if (device_is_online(sch)) | 741 | if (device_is_online(sch)) |
746 | /* Path verification is done after killing. */ | 742 | /* Path verification is done after killing. */ |
747 | device_kill_io(sch); | 743 | device_kill_io(sch); |
748 | else | 744 | else { |
749 | /* Kill and retry internal I/O. */ | 745 | /* Kill and retry internal I/O. */ |
750 | terminate_internal_io(sch); | 746 | terminate_internal_io(sch); |
747 | /* Re-start path verification. */ | ||
748 | if (sch->driver && sch->driver->verify) | ||
749 | sch->driver->verify(&sch->dev); | ||
750 | } | ||
751 | } else if (!sch->lpm) { | 751 | } else if (!sch->lpm) { |
752 | if (device_trigger_verify(sch) != 0) { | 752 | if (device_trigger_verify(sch) != 0) { |
753 | if (css_enqueue_subchannel_slow(sch->schid)) { | 753 | if (css_enqueue_subchannel_slow(sch->schid)) { |