aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/s390/cio/chp.c20
-rw-r--r--drivers/s390/cio/chp.h13
-rw-r--r--drivers/s390/cio/chsc.c218
-rw-r--r--drivers/s390/cio/cio.c1
-rw-r--r--drivers/s390/cio/css.c123
-rw-r--r--drivers/s390/cio/css.h38
-rw-r--r--drivers/s390/cio/device.c365
-rw-r--r--drivers/s390/cio/device.h7
-rw-r--r--drivers/s390/cio/device_fsm.c106
9 files changed, 381 insertions, 510 deletions
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 297cdceb0ca4..297f1653b52b 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -496,6 +496,26 @@ void chp_process_crw(int id, int status)
496 chsc_chp_offline(chpid); 496 chsc_chp_offline(chpid);
497} 497}
498 498
499int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct res_acc_data *data)
500{
501 int i;
502 int mask;
503
504 for (i = 0; i < 8; i++) {
505 mask = 0x80 >> i;
506 if (!(ssd->path_mask & mask))
507 continue;
508 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
509 continue;
510 if ((ssd->fla_valid_mask & mask) &&
511 ((ssd->fla[i] & data->fla_mask) != data->fla))
512 continue;
513 return mask;
514 }
515 return 0;
516}
517EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
518
499static inline int info_bit_num(struct chp_id id) 519static inline int info_bit_num(struct chp_id id)
500{ 520{
501 return id.id + id.cssid * (__MAX_CHPID + 1); 521 return id.id + id.cssid * (__MAX_CHPID + 1);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 59c2fc069d9e..f03b0d2cdc09 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -19,6 +19,17 @@
19#define CHP_STATUS_RESERVED 2 19#define CHP_STATUS_RESERVED 2
20#define CHP_STATUS_NOT_RECOGNIZED 3 20#define CHP_STATUS_NOT_RECOGNIZED 3
21 21
22#define CHP_ONLINE 0
23#define CHP_OFFLINE 1
24#define CHP_VARY_ON 2
25#define CHP_VARY_OFF 3
26
27struct res_acc_data {
28 struct chp_id chpid;
29 u32 fla_mask;
30 u16 fla;
31};
32
22static inline int chp_test_bit(u8 *bitmap, int num) 33static inline int chp_test_bit(u8 *bitmap, int num)
23{ 34{
24 int byte = num >> 3; 35 int byte = num >> 3;
@@ -50,5 +61,5 @@ int chp_new(struct chp_id chpid);
50void chp_cfg_schedule(struct chp_id chpid, int configure); 61void chp_cfg_schedule(struct chp_id chpid, int configure);
51void chp_cfg_cancel_deconfigure(struct chp_id chpid); 62void chp_cfg_cancel_deconfigure(struct chp_id chpid);
52int chp_info_get_status(struct chp_id chpid); 63int chp_info_get_status(struct chp_id chpid);
53 64int chp_ssd_get_mask(struct chsc_ssd_info *, struct res_acc_data *);
54#endif /* S390_CHP_H */ 65#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 5de86908b0d0..1c0f5db94c7b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * 4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 1999,2008
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -127,77 +126,12 @@ out_free:
127 return ret; 126 return ret;
128} 127}
129 128
130static int check_for_io_on_path(struct subchannel *sch, int mask)
131{
132 int cc;
133
134 cc = stsch(sch->schid, &sch->schib);
135 if (cc)
136 return 0;
137 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
138 return 1;
139 return 0;
140}
141
142static void terminate_internal_io(struct subchannel *sch)
143{
144 if (cio_clear(sch)) {
145 /* Recheck device in case clear failed. */
146 sch->lpm = 0;
147 if (device_trigger_verify(sch) != 0)
148 css_schedule_eval(sch->schid);
149 return;
150 }
151 /* Request retry of internal operation. */
152 device_set_intretry(sch);
153 /* Call handler. */
154 if (sch->driver && sch->driver->termination)
155 sch->driver->termination(sch);
156}
157
158static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 129static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
159{ 130{
160 int j;
161 int mask;
162 struct chp_id *chpid = data;
163 struct schib schib;
164
165 for (j = 0; j < 8; j++) {
166 mask = 0x80 >> j;
167 if ((sch->schib.pmcw.pim & mask) &&
168 (sch->schib.pmcw.chpid[j] == chpid->id))
169 break;
170 }
171 if (j >= 8)
172 return 0;
173
174 spin_lock_irq(sch->lock); 131 spin_lock_irq(sch->lock);
175 132 if (sch->driver && sch->driver->chp_event)
176 stsch(sch->schid, &schib); 133 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
177 if (!css_sch_is_valid(&schib))
178 goto out_unreg;
179 memcpy(&sch->schib, &schib, sizeof(struct schib));
180 /* Check for single path devices. */
181 if (sch->schib.pmcw.pim == 0x80)
182 goto out_unreg;
183
184 if (check_for_io_on_path(sch, mask)) {
185 if (device_is_online(sch))
186 device_kill_io(sch);
187 else {
188 terminate_internal_io(sch);
189 /* Re-start path verification. */
190 if (sch->driver && sch->driver->verify)
191 sch->driver->verify(sch);
192 }
193 } else {
194 /* trigger path verification. */
195 if (sch->driver && sch->driver->verify)
196 sch->driver->verify(sch);
197 else if (sch->lpm == mask)
198 goto out_unreg; 134 goto out_unreg;
199 }
200
201 spin_unlock_irq(sch->lock); 135 spin_unlock_irq(sch->lock);
202 return 0; 136 return 0;
203 137
@@ -242,53 +176,11 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
242 return 0; 176 return 0;
243} 177}
244 178
245struct res_acc_data {
246 struct chp_id chpid;
247 u32 fla_mask;
248 u16 fla;
249};
250
251static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252 struct res_acc_data *data)
253{
254 int i;
255 int mask;
256
257 for (i = 0; i < 8; i++) {
258 mask = 0x80 >> i;
259 if (!(ssd->path_mask & mask))
260 continue;
261 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
262 continue;
263 if ((ssd->fla_valid_mask & mask) &&
264 ((ssd->fla[i] & data->fla_mask) != data->fla))
265 continue;
266 return mask;
267 }
268 return 0;
269}
270
271static int __s390_process_res_acc(struct subchannel *sch, void *data) 179static int __s390_process_res_acc(struct subchannel *sch, void *data)
272{ 180{
273 int chp_mask, old_lpm;
274 struct res_acc_data *res_data = data;
275
276 spin_lock_irq(sch->lock); 181 spin_lock_irq(sch->lock);
277 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); 182 if (sch->driver && sch->driver->chp_event)
278 if (chp_mask == 0) 183 sch->driver->chp_event(sch, data, CHP_ONLINE);
279 goto out;
280 if (stsch(sch->schid, &sch->schib))
281 goto out;
282 old_lpm = sch->lpm;
283 sch->lpm = ((sch->schib.pmcw.pim &
284 sch->schib.pmcw.pam &
285 sch->schib.pmcw.pom)
286 | chp_mask) & sch->opm;
287 if (!old_lpm && sch->lpm)
288 device_trigger_reprobe(sch);
289 else if (sch->driver && sch->driver->verify)
290 sch->driver->verify(sch);
291out:
292 spin_unlock_irq(sch->lock); 184 spin_unlock_irq(sch->lock);
293 185
294 return 0; 186 return 0;
@@ -509,114 +401,36 @@ void chsc_process_crw(void)
509 } while (sei_area->flags & 0x80); 401 } while (sei_area->flags & 0x80);
510} 402}
511 403
512static int __chp_add_new_sch(struct subchannel_id schid, void *data)
513{
514 struct schib schib;
515
516 if (stsch_err(schid, &schib))
517 /* We're through */
518 return -ENXIO;
519
520 /* Put it on the slow path. */
521 css_schedule_eval(schid);
522 return 0;
523}
524
525
526static int __chp_add(struct subchannel *sch, void *data)
527{
528 int i, mask;
529 struct chp_id *chpid = data;
530
531 spin_lock_irq(sch->lock);
532 for (i=0; i<8; i++) {
533 mask = 0x80 >> i;
534 if ((sch->schib.pmcw.pim & mask) &&
535 (sch->schib.pmcw.chpid[i] == chpid->id))
536 break;
537 }
538 if (i==8) {
539 spin_unlock_irq(sch->lock);
540 return 0;
541 }
542 if (stsch(sch->schid, &sch->schib)) {
543 spin_unlock_irq(sch->lock);
544 css_schedule_eval(sch->schid);
545 return 0;
546 }
547 sch->lpm = ((sch->schib.pmcw.pim &
548 sch->schib.pmcw.pam &
549 sch->schib.pmcw.pom)
550 | mask) & sch->opm;
551
552 if (sch->driver && sch->driver->verify)
553 sch->driver->verify(sch);
554
555 spin_unlock_irq(sch->lock);
556
557 return 0;
558}
559
560void chsc_chp_online(struct chp_id chpid) 404void chsc_chp_online(struct chp_id chpid)
561{ 405{
562 char dbf_txt[15]; 406 char dbf_txt[15];
407 struct res_acc_data res_data;
563 408
564 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 409 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
565 CIO_TRACE_EVENT(2, dbf_txt); 410 CIO_TRACE_EVENT(2, dbf_txt);
566 411
567 if (chp_get_status(chpid) != 0) { 412 if (chp_get_status(chpid) != 0) {
413 memset(&res_data, 0, sizeof(struct res_acc_data));
414 res_data.chpid = chpid;
568 /* Wait until previous actions have settled. */ 415 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path(); 416 css_wait_for_slow_path();
570 for_each_subchannel_staged(__chp_add, __chp_add_new_sch, 417 for_each_subchannel_staged(__s390_process_res_acc, NULL,
571 &chpid); 418 &res_data);
572 } 419 }
573} 420}
574 421
575static void __s390_subchannel_vary_chpid(struct subchannel *sch, 422static void __s390_subchannel_vary_chpid(struct subchannel *sch,
576 struct chp_id chpid, int on) 423 struct chp_id chpid, int on)
577{ 424{
578 int chp, old_lpm;
579 int mask;
580 unsigned long flags; 425 unsigned long flags;
426 struct res_acc_data res_data;
581 427
428 memset(&res_data, 0, sizeof(struct res_acc_data));
429 res_data.chpid = chpid;
582 spin_lock_irqsave(sch->lock, flags); 430 spin_lock_irqsave(sch->lock, flags);
583 old_lpm = sch->lpm; 431 if (sch->driver && sch->driver->chp_event)
584 for (chp = 0; chp < 8; chp++) { 432 sch->driver->chp_event(sch, &res_data,
585 mask = 0x80 >> chp; 433 on ? CHP_VARY_ON : CHP_VARY_OFF);
586 if (!(sch->ssd_info.path_mask & mask))
587 continue;
588 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
589 continue;
590
591 if (on) {
592 sch->opm |= mask;
593 sch->lpm |= mask;
594 if (!old_lpm)
595 device_trigger_reprobe(sch);
596 else if (sch->driver && sch->driver->verify)
597 sch->driver->verify(sch);
598 break;
599 }
600 sch->opm &= ~mask;
601 sch->lpm &= ~mask;
602 if (check_for_io_on_path(sch, mask)) {
603 if (device_is_online(sch))
604 /* Path verification is done after killing. */
605 device_kill_io(sch);
606 else {
607 /* Kill and retry internal I/O. */
608 terminate_internal_io(sch);
609 /* Re-start path verification. */
610 if (sch->driver && sch->driver->verify)
611 sch->driver->verify(sch);
612 }
613 } else if (!sch->lpm) {
614 if (device_trigger_verify(sch) != 0)
615 css_schedule_eval(sch->schid);
616 } else if (sch->driver && sch->driver->verify)
617 sch->driver->verify(sch);
618 break;
619 }
620 spin_unlock_irqrestore(sch->lock, flags); 434 spin_unlock_irqrestore(sch->lock, flags);
621} 435}
622 436
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 903e23ae8ed5..fdb164f36109 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -564,6 +564,7 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
564 } 564 }
565 /* Copy subchannel type from path management control word. */ 565 /* Copy subchannel type from path management control word. */
566 sch->st = sch->schib.pmcw.st; 566 sch->st = sch->schib.pmcw.st;
567
567 switch (sch->st) { 568 switch (sch->st) {
568 case SUBCHANNEL_TYPE_IO: 569 case SUBCHANNEL_TYPE_IO:
569 err = cio_validate_io_subchannel(sch); 570 err = cio_validate_io_subchannel(sch);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 53e7496dc90c..020566571e07 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -283,7 +283,7 @@ static int css_register_subchannel(struct subchannel *sch)
283 return ret; 283 return ret;
284} 284}
285 285
286static int css_probe_device(struct subchannel_id schid) 286int css_probe_device(struct subchannel_id schid)
287{ 287{
288 int ret; 288 int ret;
289 struct subchannel *sch; 289 struct subchannel *sch;
@@ -330,112 +330,6 @@ int css_sch_is_valid(struct schib *schib)
330} 330}
331EXPORT_SYMBOL_GPL(css_sch_is_valid); 331EXPORT_SYMBOL_GPL(css_sch_is_valid);
332 332
333static int css_get_subchannel_status(struct subchannel *sch)
334{
335 struct schib schib;
336
337 if (stsch(sch->schid, &schib))
338 return CIO_GONE;
339 if (!css_sch_is_valid(&schib))
340 return CIO_GONE;
341 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
342 return CIO_REVALIDATE;
343 if (!sch->lpm)
344 return CIO_NO_PATH;
345 return CIO_OPER;
346}
347
348static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
349{
350 int event, ret, disc;
351 unsigned long flags;
352 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
353
354 spin_lock_irqsave(sch->lock, flags);
355 disc = device_is_disconnected(sch);
356 if (disc && slow) {
357 /* Disconnected devices are evaluated directly only.*/
358 spin_unlock_irqrestore(sch->lock, flags);
359 return 0;
360 }
361 /* No interrupt after machine check - kill pending timers. */
362 device_kill_pending_timer(sch);
363 if (!disc && !slow) {
364 /* Non-disconnected devices are evaluated on the slow path. */
365 spin_unlock_irqrestore(sch->lock, flags);
366 return -EAGAIN;
367 }
368 event = css_get_subchannel_status(sch);
369 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
370 sch->schid.ssid, sch->schid.sch_no, event,
371 disc ? "disconnected" : "normal",
372 slow ? "slow" : "fast");
373 /* Analyze subchannel status. */
374 action = NONE;
375 switch (event) {
376 case CIO_NO_PATH:
377 if (disc) {
378 /* Check if paths have become available. */
379 action = REPROBE;
380 break;
381 }
382 /* fall through */
383 case CIO_GONE:
384 /* Prevent unwanted effects when opening lock. */
385 cio_disable_subchannel(sch);
386 device_set_disconnected(sch);
387 /* Ask driver what to do with device. */
388 action = UNREGISTER;
389 if (sch->driver && sch->driver->notify) {
390 spin_unlock_irqrestore(sch->lock, flags);
391 ret = sch->driver->notify(sch, event);
392 spin_lock_irqsave(sch->lock, flags);
393 if (ret)
394 action = NONE;
395 }
396 break;
397 case CIO_REVALIDATE:
398 /* Device will be removed, so no notify necessary. */
399 if (disc)
400 /* Reprobe because immediate unregister might block. */
401 action = REPROBE;
402 else
403 action = UNREGISTER_PROBE;
404 break;
405 case CIO_OPER:
406 if (disc)
407 /* Get device operational again. */
408 action = REPROBE;
409 break;
410 }
411 /* Perform action. */
412 ret = 0;
413 switch (action) {
414 case UNREGISTER:
415 case UNREGISTER_PROBE:
416 /* Unregister device (will use subchannel lock). */
417 spin_unlock_irqrestore(sch->lock, flags);
418 css_sch_device_unregister(sch);
419 spin_lock_irqsave(sch->lock, flags);
420
421 /* Reset intparm to zeroes. */
422 sch->schib.pmcw.intparm = 0;
423 cio_modify(sch);
424 break;
425 case REPROBE:
426 device_trigger_reprobe(sch);
427 break;
428 default:
429 break;
430 }
431 spin_unlock_irqrestore(sch->lock, flags);
432 /* Probe if necessary. */
433 if (action == UNREGISTER_PROBE)
434 ret = css_probe_device(sch->schid);
435
436 return ret;
437}
438
439static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 333static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
440{ 334{
441 struct schib schib; 335 struct schib schib;
@@ -454,6 +348,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
454 return css_probe_device(schid); 348 return css_probe_device(schid);
455} 349}
456 350
351static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
352{
353 int ret = 0;
354
355 if (sch->driver) {
356 if (sch->driver->sch_event)
357 ret = sch->driver->sch_event(sch, slow);
358 else
359 dev_dbg(&sch->dev,
360 "Got subchannel machine check but "
361 "no sch_event handler provided.\n");
362 }
363 return ret;
364}
365
457static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 366static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
458{ 367{
459 struct subchannel *sch; 368 struct subchannel *sch;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index e0fc7b499784..4cdc132c86bb 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -58,18 +58,27 @@ struct pgid {
58 __u32 tod_high; /* high word TOD clock */ 58 __u32 tod_high; /* high word TOD clock */
59} __attribute__ ((packed)); 59} __attribute__ ((packed));
60 60
61/*
62 * A css driver handles all subchannels of one type.
63 */
64struct subchannel; 61struct subchannel;
62/**
63 * struct css_driver - device driver for subchannels
64 * @owner: owning module
65 * @subchannel_type: subchannel type supported by this driver
66 * @drv: embedded device driver structure
67 * @irq: called on interrupts
68 * @chp_event: called for events affecting a channel path
69 * @sch_event: called for events affecting the subchannel
70 * @probe: function called on probe
71 * @remove: function called on remove
72 * @shutdown: called at device shutdown
73 * @name: name of the device driver
74 */
65struct css_driver { 75struct css_driver {
66 struct module *owner; 76 struct module *owner;
67 unsigned int subchannel_type; 77 unsigned int subchannel_type;
68 struct device_driver drv; 78 struct device_driver drv;
69 void (*irq)(struct subchannel *); 79 void (*irq)(struct subchannel *);
70 int (*notify)(struct subchannel *, int); 80 int (*chp_event)(struct subchannel *, void *, int);
71 void (*verify)(struct subchannel *); 81 int (*sch_event)(struct subchannel *, int);
72 void (*termination)(struct subchannel *);
73 int (*probe)(struct subchannel *); 82 int (*probe)(struct subchannel *);
74 int (*remove)(struct subchannel *); 83 int (*remove)(struct subchannel *);
75 void (*shutdown)(struct subchannel *); 84 void (*shutdown)(struct subchannel *);
@@ -87,7 +96,8 @@ extern int css_driver_register(struct css_driver *);
87extern void css_driver_unregister(struct css_driver *); 96extern void css_driver_unregister(struct css_driver *);
88 97
89extern void css_sch_device_unregister(struct subchannel *); 98extern void css_sch_device_unregister(struct subchannel *);
90extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 99extern int css_probe_device(struct subchannel_id);
100extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
91extern int css_init_done; 101extern int css_init_done;
92int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 102int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
93 int (*fn_unknown)(struct subchannel_id, 103 int (*fn_unknown)(struct subchannel_id,
@@ -119,20 +129,6 @@ struct channel_subsystem {
119extern struct bus_type css_bus_type; 129extern struct bus_type css_bus_type;
120extern struct channel_subsystem *channel_subsystems[]; 130extern struct channel_subsystem *channel_subsystems[];
121 131
122/* Some helper functions for disconnected state. */
123int device_is_disconnected(struct subchannel *);
124void device_set_disconnected(struct subchannel *);
125void device_trigger_reprobe(struct subchannel *);
126
127/* Helper functions for vary on/off. */
128int device_is_online(struct subchannel *);
129void device_kill_io(struct subchannel *);
130void device_set_intretry(struct subchannel *sch);
131int device_trigger_verify(struct subchannel *sch);
132
133/* Machine check helper function. */
134void device_kill_pending_timer(struct subchannel *);
135
136/* Helper functions to build lists for the slow path. */ 132/* Helper functions to build lists for the slow path. */
137void css_schedule_eval(struct subchannel_id schid); 133void css_schedule_eval(struct subchannel_id schid);
138void css_schedule_eval_all(void); 134void css_schedule_eval_all(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 23b129fd4d8d..9281b25087a6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device.c 2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices 3 * bus driver for ccw devices
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
@@ -126,19 +125,17 @@ struct bus_type ccw_bus_type;
126static void io_subchannel_irq(struct subchannel *); 125static void io_subchannel_irq(struct subchannel *);
127static int io_subchannel_probe(struct subchannel *); 126static int io_subchannel_probe(struct subchannel *);
128static int io_subchannel_remove(struct subchannel *); 127static int io_subchannel_remove(struct subchannel *);
129static int io_subchannel_notify(struct subchannel *, int);
130static void io_subchannel_verify(struct subchannel *);
131static void io_subchannel_ioterm(struct subchannel *);
132static void io_subchannel_shutdown(struct subchannel *); 128static void io_subchannel_shutdown(struct subchannel *);
129static int io_subchannel_sch_event(struct subchannel *, int);
130static int io_subchannel_chp_event(struct subchannel *, void *, int);
133 131
134static struct css_driver io_subchannel_driver = { 132static struct css_driver io_subchannel_driver = {
135 .owner = THIS_MODULE, 133 .owner = THIS_MODULE,
136 .subchannel_type = SUBCHANNEL_TYPE_IO, 134 .subchannel_type = SUBCHANNEL_TYPE_IO,
137 .name = "io_subchannel", 135 .name = "io_subchannel",
138 .irq = io_subchannel_irq, 136 .irq = io_subchannel_irq,
139 .notify = io_subchannel_notify, 137 .sch_event = io_subchannel_sch_event,
140 .verify = io_subchannel_verify, 138 .chp_event = io_subchannel_chp_event,
141 .termination = io_subchannel_ioterm,
142 .probe = io_subchannel_probe, 139 .probe = io_subchannel_probe,
143 .remove = io_subchannel_remove, 140 .remove = io_subchannel_remove,
144 .shutdown = io_subchannel_shutdown, 141 .shutdown = io_subchannel_shutdown,
@@ -786,7 +783,7 @@ static void sch_attach_device(struct subchannel *sch,
786 sch_set_cdev(sch, cdev); 783 sch_set_cdev(sch, cdev);
787 cdev->private->schid = sch->schid; 784 cdev->private->schid = sch->schid;
788 cdev->ccwlock = sch->lock; 785 cdev->ccwlock = sch->lock;
789 device_trigger_reprobe(sch); 786 ccw_device_trigger_reprobe(cdev);
790 spin_unlock_irq(sch->lock); 787 spin_unlock_irq(sch->lock);
791} 788}
792 789
@@ -1265,11 +1262,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event)
1265 cdev = sch_get_cdev(sch); 1262 cdev = sch_get_cdev(sch);
1266 if (!cdev) 1263 if (!cdev)
1267 return 0; 1264 return 0;
1268 if (!cdev->drv) 1265 return ccw_device_notify(cdev, event);
1269 return 0;
1270 if (!cdev->online)
1271 return 0;
1272 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
1273} 1266}
1274 1267
1275static void io_subchannel_verify(struct subchannel *sch) 1268static void io_subchannel_verify(struct subchannel *sch)
@@ -1281,22 +1274,98 @@ static void io_subchannel_verify(struct subchannel *sch)
1281 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1274 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1282} 1275}
1283 1276
1284static void io_subchannel_ioterm(struct subchannel *sch) 1277static int check_for_io_on_path(struct subchannel *sch, int mask)
1285{ 1278{
1286 struct ccw_device *cdev; 1279 int cc;
1287 1280
1288 cdev = sch_get_cdev(sch); 1281 cc = stsch(sch->schid, &sch->schib);
1289 if (!cdev) 1282 if (cc)
1290 return; 1283 return 0;
1291 /* Internal I/O will be retried by the interrupt handler. */ 1284 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
1292 if (cdev->private->flags.intretry) 1285 return 1;
1286 return 0;
1287}
1288
1289static void terminate_internal_io(struct subchannel *sch,
1290 struct ccw_device *cdev)
1291{
1292 if (cio_clear(sch)) {
1293 /* Recheck device in case clear failed. */
1294 sch->lpm = 0;
1295 if (cdev->online)
1296 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1297 else
1298 css_schedule_eval(sch->schid);
1293 return; 1299 return;
1300 }
1294 cdev->private->state = DEV_STATE_CLEAR_VERIFY; 1301 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1302 /* Request retry of internal operation. */
1303 cdev->private->flags.intretry = 1;
1304 /* Call handler. */
1295 if (cdev->handler) 1305 if (cdev->handler)
1296 cdev->handler(cdev, cdev->private->intparm, 1306 cdev->handler(cdev, cdev->private->intparm,
1297 ERR_PTR(-EIO)); 1307 ERR_PTR(-EIO));
1298} 1308}
1299 1309
1310static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1311{
1312 struct ccw_device *cdev;
1313
1314 cdev = sch_get_cdev(sch);
1315 if (!cdev)
1316 return;
1317 if (check_for_io_on_path(sch, mask)) {
1318 if (cdev->private->state == DEV_STATE_ONLINE)
1319 ccw_device_kill_io(cdev);
1320 else {
1321 terminate_internal_io(sch, cdev);
1322 /* Re-start path verification. */
1323 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1324 }
1325 } else
1326 /* trigger path verification. */
1327 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1328
1329}
1330
1331static int io_subchannel_chp_event(struct subchannel *sch, void *data,
1332 int event)
1333{
1334 int mask;
1335 struct res_acc_data *res_data;
1336
1337 res_data = data;
1338 mask = chp_ssd_get_mask(&sch->ssd_info, res_data);
1339 if (!mask)
1340 return 0;
1341 switch (event) {
1342 case CHP_VARY_OFF:
1343 sch->opm &= ~mask;
1344 sch->lpm &= ~mask;
1345 io_subchannel_terminate_path(sch, mask);
1346 break;
1347 case CHP_VARY_ON:
1348 sch->opm |= mask;
1349 sch->lpm |= mask;
1350 io_subchannel_verify(sch);
1351 break;
1352 case CHP_OFFLINE:
1353 if (stsch(sch->schid, &sch->schib))
1354 return -ENXIO;
1355 if (!css_sch_is_valid(&sch->schib))
1356 return -ENODEV;
1357 io_subchannel_terminate_path(sch, mask);
1358 break;
1359 case CHP_ONLINE:
1360 if (stsch(sch->schid, &sch->schib))
1361 return -ENXIO;
1362 sch->lpm |= mask & sch->opm;
1363 io_subchannel_verify(sch);
1364 break;
1365 }
1366 return 0;
1367}
1368
1300static void 1369static void
1301io_subchannel_shutdown(struct subchannel *sch) 1370io_subchannel_shutdown(struct subchannel *sch)
1302{ 1371{
@@ -1326,6 +1395,195 @@ io_subchannel_shutdown(struct subchannel *sch)
1326 cio_disable_subchannel(sch); 1395 cio_disable_subchannel(sch);
1327} 1396}
1328 1397
1398static int io_subchannel_get_status(struct subchannel *sch)
1399{
1400 struct schib schib;
1401
1402 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1403 return CIO_GONE;
1404 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1405 return CIO_REVALIDATE;
1406 if (!sch->lpm)
1407 return CIO_NO_PATH;
1408 return CIO_OPER;
1409}
1410
1411static int device_is_disconnected(struct ccw_device *cdev)
1412{
1413 if (!cdev)
1414 return 0;
1415 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1416 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1417}
1418
1419static int recovery_check(struct device *dev, void *data)
1420{
1421 struct ccw_device *cdev = to_ccwdev(dev);
1422 int *redo = data;
1423
1424 spin_lock_irq(cdev->ccwlock);
1425 switch (cdev->private->state) {
1426 case DEV_STATE_DISCONNECTED:
1427 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1428 cdev->private->dev_id.ssid,
1429 cdev->private->dev_id.devno);
1430 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1431 *redo = 1;
1432 break;
1433 case DEV_STATE_DISCONNECTED_SENSE_ID:
1434 *redo = 1;
1435 break;
1436 }
1437 spin_unlock_irq(cdev->ccwlock);
1438
1439 return 0;
1440}
1441
1442static void recovery_work_func(struct work_struct *unused)
1443{
1444 int redo = 0;
1445
1446 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1447 if (redo) {
1448 spin_lock_irq(&recovery_lock);
1449 if (!timer_pending(&recovery_timer)) {
1450 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1451 recovery_phase++;
1452 mod_timer(&recovery_timer, jiffies +
1453 recovery_delay[recovery_phase] * HZ);
1454 }
1455 spin_unlock_irq(&recovery_lock);
1456 } else
1457 CIO_MSG_EVENT(4, "recovery: end\n");
1458}
1459
1460static DECLARE_WORK(recovery_work, recovery_work_func);
1461
1462static void recovery_func(unsigned long data)
1463{
1464 /*
1465 * We can't do our recovery in softirq context and it's not
1466 * performance critical, so we schedule it.
1467 */
1468 schedule_work(&recovery_work);
1469}
1470
1471static void ccw_device_schedule_recovery(void)
1472{
1473 unsigned long flags;
1474
1475 CIO_MSG_EVENT(4, "recovery: schedule\n");
1476 spin_lock_irqsave(&recovery_lock, flags);
1477 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1478 recovery_phase = 0;
1479 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1480 }
1481 spin_unlock_irqrestore(&recovery_lock, flags);
1482}
1483
1484static void device_set_disconnected(struct ccw_device *cdev)
1485{
1486 if (!cdev)
1487 return;
1488 ccw_device_set_timeout(cdev, 0);
1489 cdev->private->flags.fake_irb = 0;
1490 cdev->private->state = DEV_STATE_DISCONNECTED;
1491 if (cdev->online)
1492 ccw_device_schedule_recovery();
1493}
1494
1495static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1496{
1497 int event, ret, disc;
1498 unsigned long flags;
1499 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
1500 struct ccw_device *cdev;
1501
1502 spin_lock_irqsave(sch->lock, flags);
1503 cdev = sch_get_cdev(sch);
1504 disc = device_is_disconnected(cdev);
1505 if (disc && slow) {
1506 /* Disconnected devices are evaluated directly only.*/
1507 spin_unlock_irqrestore(sch->lock, flags);
1508 return 0;
1509 }
1510 /* No interrupt after machine check - kill pending timers. */
1511 if (cdev)
1512 ccw_device_set_timeout(cdev, 0);
1513 if (!disc && !slow) {
1514 /* Non-disconnected devices are evaluated on the slow path. */
1515 spin_unlock_irqrestore(sch->lock, flags);
1516 return -EAGAIN;
1517 }
1518 event = io_subchannel_get_status(sch);
1519 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
1520 sch->schid.ssid, sch->schid.sch_no, event,
1521 disc ? "disconnected" : "normal",
1522 slow ? "slow" : "fast");
1523 /* Analyze subchannel status. */
1524 action = NONE;
1525 switch (event) {
1526 case CIO_NO_PATH:
1527 if (disc) {
1528 /* Check if paths have become available. */
1529 action = REPROBE;
1530 break;
1531 }
1532 /* fall through */
1533 case CIO_GONE:
1534 /* Prevent unwanted effects when opening lock. */
1535 cio_disable_subchannel(sch);
1536 device_set_disconnected(cdev);
1537 /* Ask driver what to do with device. */
1538 action = UNREGISTER;
1539 spin_unlock_irqrestore(sch->lock, flags);
1540 ret = io_subchannel_notify(sch, event);
1541 spin_lock_irqsave(sch->lock, flags);
1542 if (ret)
1543 action = NONE;
1544 break;
1545 case CIO_REVALIDATE:
1546 /* Device will be removed, so no notify necessary. */
1547 if (disc)
1548 /* Reprobe because immediate unregister might block. */
1549 action = REPROBE;
1550 else
1551 action = UNREGISTER_PROBE;
1552 break;
1553 case CIO_OPER:
1554 if (disc)
1555 /* Get device operational again. */
1556 action = REPROBE;
1557 break;
1558 }
1559 /* Perform action. */
1560 ret = 0;
1561 switch (action) {
1562 case UNREGISTER:
1563 case UNREGISTER_PROBE:
1564 /* Unregister device (will use subchannel lock). */
1565 spin_unlock_irqrestore(sch->lock, flags);
1566 css_sch_device_unregister(sch);
1567 spin_lock_irqsave(sch->lock, flags);
1568
1569 /* Reset intparm to zeroes. */
1570 sch->schib.pmcw.intparm = 0;
1571 cio_modify(sch);
1572 break;
1573 case REPROBE:
1574 ccw_device_trigger_reprobe(cdev);
1575 break;
1576 default:
1577 break;
1578 }
1579 spin_unlock_irqrestore(sch->lock, flags);
1580 /* Probe if necessary. */
1581 if (action == UNREGISTER_PROBE)
1582 ret = css_probe_device(sch->schid);
1583
1584 return ret;
1585}
1586
1329#ifdef CONFIG_CCW_CONSOLE 1587#ifdef CONFIG_CCW_CONSOLE
1330static struct ccw_device console_cdev; 1588static struct ccw_device console_cdev;
1331static struct ccw_device_private console_private; 1589static struct ccw_device_private console_private;
@@ -1558,71 +1816,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
1558 return sch->schid; 1816 return sch->schid;
1559} 1817}
1560 1818
1561static int recovery_check(struct device *dev, void *data)
1562{
1563 struct ccw_device *cdev = to_ccwdev(dev);
1564 int *redo = data;
1565
1566 spin_lock_irq(cdev->ccwlock);
1567 switch (cdev->private->state) {
1568 case DEV_STATE_DISCONNECTED:
1569 CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
1570 cdev->private->dev_id.ssid,
1571 cdev->private->dev_id.devno);
1572 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1573 *redo = 1;
1574 break;
1575 case DEV_STATE_DISCONNECTED_SENSE_ID:
1576 *redo = 1;
1577 break;
1578 }
1579 spin_unlock_irq(cdev->ccwlock);
1580
1581 return 0;
1582}
1583
1584static void recovery_work_func(struct work_struct *unused)
1585{
1586 int redo = 0;
1587
1588 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1589 if (redo) {
1590 spin_lock_irq(&recovery_lock);
1591 if (!timer_pending(&recovery_timer)) {
1592 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1593 recovery_phase++;
1594 mod_timer(&recovery_timer, jiffies +
1595 recovery_delay[recovery_phase] * HZ);
1596 }
1597 spin_unlock_irq(&recovery_lock);
1598 } else
1599 CIO_MSG_EVENT(4, "recovery: end\n");
1600}
1601
1602static DECLARE_WORK(recovery_work, recovery_work_func);
1603
1604static void recovery_func(unsigned long data)
1605{
1606 /*
1607 * We can't do our recovery in softirq context and it's not
1608 * performance critical, so we schedule it.
1609 */
1610 schedule_work(&recovery_work);
1611}
1612
1613void ccw_device_schedule_recovery(void)
1614{
1615 unsigned long flags;
1616
1617 CIO_MSG_EVENT(4, "recovery: schedule\n");
1618 spin_lock_irqsave(&recovery_lock, flags);
1619 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1620 recovery_phase = 0;
1621 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1622 }
1623 spin_unlock_irqrestore(&recovery_lock, flags);
1624}
1625
1626MODULE_LICENSE("GPL"); 1819MODULE_LICENSE("GPL");
1627EXPORT_SYMBOL(ccw_device_set_online); 1820EXPORT_SYMBOL(ccw_device_set_online);
1628EXPORT_SYMBOL(ccw_device_set_offline); 1821EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index cb08092be39f..9800a8335a3f 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *);
88int ccw_device_online(struct ccw_device *); 88int ccw_device_online(struct ccw_device *);
89int ccw_device_offline(struct ccw_device *); 89int ccw_device_offline(struct ccw_device *);
90 90
91void ccw_device_schedule_recovery(void);
92
93/* Function prototypes for device status and basic sense stuff. */ 91/* Function prototypes for device status and basic sense stuff. */
94void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 92void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
95void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); 93void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
@@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *);
118 116
119int ccw_device_stlck(struct ccw_device *); 117int ccw_device_stlck(struct ccw_device *);
120 118
119/* Helper function for machine check handling. */
120void ccw_device_trigger_reprobe(struct ccw_device *);
121void ccw_device_kill_io(struct ccw_device *);
122int ccw_device_notify(struct ccw_device *, int);
123
121/* qdio needs this. */ 124/* qdio needs this. */
122void ccw_device_set_timeout(struct ccw_device *, int); 125void ccw_device_set_timeout(struct ccw_device *, int);
123extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 126extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e268d5a77c12..c9b97cbc2203 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device_fsm.c 2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling 3 * finite state machine for device handling
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 8 */
@@ -27,65 +26,6 @@
27 26
28static int timeout_log_enabled; 27static int timeout_log_enabled;
29 28
30int
31device_is_online(struct subchannel *sch)
32{
33 struct ccw_device *cdev;
34
35 cdev = sch_get_cdev(sch);
36 if (!cdev)
37 return 0;
38 return (cdev->private->state == DEV_STATE_ONLINE);
39}
40
41int
42device_is_disconnected(struct subchannel *sch)
43{
44 struct ccw_device *cdev;
45
46 cdev = sch_get_cdev(sch);
47 if (!cdev)
48 return 0;
49 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51}
52
53void
54device_set_disconnected(struct subchannel *sch)
55{
56 struct ccw_device *cdev;
57
58 cdev = sch_get_cdev(sch);
59 if (!cdev)
60 return;
61 ccw_device_set_timeout(cdev, 0);
62 cdev->private->flags.fake_irb = 0;
63 cdev->private->state = DEV_STATE_DISCONNECTED;
64 if (cdev->online)
65 ccw_device_schedule_recovery();
66}
67
68void device_set_intretry(struct subchannel *sch)
69{
70 struct ccw_device *cdev;
71
72 cdev = sch_get_cdev(sch);
73 if (!cdev)
74 return;
75 cdev->private->flags.intretry = 1;
76}
77
78int device_trigger_verify(struct subchannel *sch)
79{
80 struct ccw_device *cdev;
81
82 cdev = sch_get_cdev(sch);
83 if (!cdev || !cdev->online)
84 return -EINVAL;
85 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
86 return 0;
87}
88
89static int __init ccw_timeout_log_setup(char *unused) 29static int __init ccw_timeout_log_setup(char *unused)
90{ 30{
91 timeout_log_enabled = 1; 31 timeout_log_enabled = 1;
@@ -171,18 +111,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
171 add_timer(&cdev->private->timer); 111 add_timer(&cdev->private->timer);
172} 112}
173 113
174/* Kill any pending timers after machine check. */
175void
176device_kill_pending_timer(struct subchannel *sch)
177{
178 struct ccw_device *cdev;
179
180 cdev = sch_get_cdev(sch);
181 if (!cdev)
182 return;
183 ccw_device_set_timeout(cdev, 0);
184}
185
186/* 114/*
187 * Cancel running i/o. This is called repeatedly since halt/clear are 115 * Cancel running i/o. This is called repeatedly since halt/clear are
188 * asynchronous operations. We do one try with cio_cancel, two tries 116 * asynchronous operations. We do one try with cio_cancel, two tries
@@ -388,25 +316,27 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
388 } 316 }
389} 317}
390 318
319int ccw_device_notify(struct ccw_device *cdev, int event)
320{
321 if (!cdev->drv)
322 return 0;
323 if (!cdev->online)
324 return 0;
325 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
326}
327
391static void 328static void
392ccw_device_oper_notify(struct work_struct *work) 329ccw_device_oper_notify(struct work_struct *work)
393{ 330{
394 struct ccw_device_private *priv; 331 struct ccw_device_private *priv;
395 struct ccw_device *cdev; 332 struct ccw_device *cdev;
396 struct subchannel *sch;
397 int ret; 333 int ret;
398 unsigned long flags; 334 unsigned long flags;
399 335
400 priv = container_of(work, struct ccw_device_private, kick_work); 336 priv = container_of(work, struct ccw_device_private, kick_work);
401 cdev = priv->cdev; 337 cdev = priv->cdev;
338 ret = ccw_device_notify(cdev, CIO_OPER);
402 spin_lock_irqsave(cdev->ccwlock, flags); 339 spin_lock_irqsave(cdev->ccwlock, flags);
403 sch = to_subchannel(cdev->dev.parent);
404 if (sch->driver && sch->driver->notify) {
405 spin_unlock_irqrestore(cdev->ccwlock, flags);
406 ret = sch->driver->notify(sch, CIO_OPER);
407 spin_lock_irqsave(cdev->ccwlock, flags);
408 } else
409 ret = 0;
410 if (ret) { 340 if (ret) {
411 /* Reenable channel measurements, if needed. */ 341 /* Reenable channel measurements, if needed. */
412 spin_unlock_irqrestore(cdev->ccwlock, flags); 342 spin_unlock_irqrestore(cdev->ccwlock, flags);
@@ -986,12 +916,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
986 ERR_PTR(-EIO)); 916 ERR_PTR(-EIO));
987} 917}
988 918
989void device_kill_io(struct subchannel *sch) 919void ccw_device_kill_io(struct ccw_device *cdev)
990{ 920{
991 int ret; 921 int ret;
992 struct ccw_device *cdev;
993 922
994 cdev = sch_get_cdev(sch);
995 ret = ccw_device_cancel_halt_clear(cdev); 923 ret = ccw_device_cancel_halt_clear(cdev);
996 if (ret == -EBUSY) { 924 if (ret == -EBUSY) {
997 ccw_device_set_timeout(cdev, 3*HZ); 925 ccw_device_set_timeout(cdev, 3*HZ);
@@ -1055,17 +983,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1055 ccw_device_sense_id_start(cdev); 983 ccw_device_sense_id_start(cdev);
1056} 984}
1057 985
1058void 986void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1059device_trigger_reprobe(struct subchannel *sch)
1060{ 987{
1061 struct ccw_device *cdev; 988 struct subchannel *sch;
1062 989
1063 cdev = sch_get_cdev(sch);
1064 if (!cdev)
1065 return;
1066 if (cdev->private->state != DEV_STATE_DISCONNECTED) 990 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1067 return; 991 return;
1068 992
993 sch = to_subchannel(cdev->dev.parent);
1069 /* Update some values. */ 994 /* Update some values. */
1070 if (stsch(sch->schid, &sch->schib)) 995 if (stsch(sch->schid, &sch->schib))
1071 return; 996 return;
@@ -1081,7 +1006,6 @@ device_trigger_reprobe(struct subchannel *sch)
1081 sch->schib.pmcw.ena = 0; 1006 sch->schib.pmcw.ena = 0;
1082 if ((sch->lpm & (sch->lpm - 1)) != 0) 1007 if ((sch->lpm & (sch->lpm - 1)) != 0)
1083 sch->schib.pmcw.mp = 1; 1008 sch->schib.pmcw.mp = 1;
1084 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1085 /* We should also udate ssd info, but this has to wait. */ 1009 /* We should also udate ssd info, but this has to wait. */
1086 /* Check if this is another device which appeared on the same sch. */ 1010 /* Check if this is another device which appeared on the same sch. */
1087 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1011 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {