aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/chsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r--drivers/s390/cio/chsc.c121
1 files changed, 44 insertions, 77 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 597c0c76a2ad..e7ba16a74ef7 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -89,7 +89,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
89 /* Copy data */ 89 /* Copy data */
90 ret = 0; 90 ret = 0;
91 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
92 if ((ssd_area->st != 0) && (ssd_area->st != 2)) 92 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
93 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
93 goto out_free; 94 goto out_free;
94 ssd->path_mask = ssd_area->path_mask; 95 ssd->path_mask = ssd_area->path_mask;
95 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 96 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
@@ -132,20 +133,16 @@ static void terminate_internal_io(struct subchannel *sch)
132 device_set_intretry(sch); 133 device_set_intretry(sch);
133 /* Call handler. */ 134 /* Call handler. */
134 if (sch->driver && sch->driver->termination) 135 if (sch->driver && sch->driver->termination)
135 sch->driver->termination(&sch->dev); 136 sch->driver->termination(sch);
136} 137}
137 138
138static int 139static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
139s390_subchannel_remove_chpid(struct device *dev, void *data)
140{ 140{
141 int j; 141 int j;
142 int mask; 142 int mask;
143 struct subchannel *sch; 143 struct chp_id *chpid = data;
144 struct chp_id *chpid;
145 struct schib schib; 144 struct schib schib;
146 145
147 sch = to_subchannel(dev);
148 chpid = data;
149 for (j = 0; j < 8; j++) { 146 for (j = 0; j < 8; j++) {
150 mask = 0x80 >> j; 147 mask = 0x80 >> j;
151 if ((sch->schib.pmcw.pim & mask) && 148 if ((sch->schib.pmcw.pim & mask) &&
@@ -158,7 +155,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
158 spin_lock_irq(sch->lock); 155 spin_lock_irq(sch->lock);
159 156
160 stsch(sch->schid, &schib); 157 stsch(sch->schid, &schib);
161 if (!schib.pmcw.dnv) 158 if (!css_sch_is_valid(&schib))
162 goto out_unreg; 159 goto out_unreg;
163 memcpy(&sch->schib, &schib, sizeof(struct schib)); 160 memcpy(&sch->schib, &schib, sizeof(struct schib));
164 /* Check for single path devices. */ 161 /* Check for single path devices. */
@@ -172,12 +169,12 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
172 terminate_internal_io(sch); 169 terminate_internal_io(sch);
173 /* Re-start path verification. */ 170 /* Re-start path verification. */
174 if (sch->driver && sch->driver->verify) 171 if (sch->driver && sch->driver->verify)
175 sch->driver->verify(&sch->dev); 172 sch->driver->verify(sch);
176 } 173 }
177 } else { 174 } else {
178 /* trigger path verification. */ 175 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify) 176 if (sch->driver && sch->driver->verify)
180 sch->driver->verify(&sch->dev); 177 sch->driver->verify(sch);
181 else if (sch->lpm == mask) 178 else if (sch->lpm == mask)
182 goto out_unreg; 179 goto out_unreg;
183 } 180 }
@@ -201,12 +198,10 @@ void chsc_chp_offline(struct chp_id chpid)
201 198
202 if (chp_get_status(chpid) <= 0) 199 if (chp_get_status(chpid) <= 0)
203 return; 200 return;
204 bus_for_each_dev(&css_bus_type, NULL, &chpid, 201 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
205 s390_subchannel_remove_chpid);
206} 202}
207 203
208static int 204static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
209s390_process_res_acc_new_sch(struct subchannel_id schid)
210{ 205{
211 struct schib schib; 206 struct schib schib;
212 /* 207 /*
@@ -252,18 +247,10 @@ static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252 return 0; 247 return 0;
253} 248}
254 249
255static int 250static int __s390_process_res_acc(struct subchannel *sch, void *data)
256__s390_process_res_acc(struct subchannel_id schid, void *data)
257{ 251{
258 int chp_mask, old_lpm; 252 int chp_mask, old_lpm;
259 struct res_acc_data *res_data; 253 struct res_acc_data *res_data = data;
260 struct subchannel *sch;
261
262 res_data = data;
263 sch = get_subchannel_by_schid(schid);
264 if (!sch)
265 /* Check if a subchannel is newly available. */
266 return s390_process_res_acc_new_sch(schid);
267 254
268 spin_lock_irq(sch->lock); 255 spin_lock_irq(sch->lock);
269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); 256 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
@@ -279,10 +266,10 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
279 if (!old_lpm && sch->lpm) 266 if (!old_lpm && sch->lpm)
280 device_trigger_reprobe(sch); 267 device_trigger_reprobe(sch);
281 else if (sch->driver && sch->driver->verify) 268 else if (sch->driver && sch->driver->verify)
282 sch->driver->verify(&sch->dev); 269 sch->driver->verify(sch);
283out: 270out:
284 spin_unlock_irq(sch->lock); 271 spin_unlock_irq(sch->lock);
285 put_device(&sch->dev); 272
286 return 0; 273 return 0;
287} 274}
288 275
@@ -305,7 +292,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
305 * The more information we have (info), the less scanning 292 * The more information we have (info), the less scanning
306 * will we have to do. 293 * will we have to do.
307 */ 294 */
308 for_each_subchannel(__s390_process_res_acc, res_data); 295 for_each_subchannel_staged(__s390_process_res_acc,
296 s390_process_res_acc_new_sch, res_data);
309} 297}
310 298
311static int 299static int
@@ -499,8 +487,7 @@ void chsc_process_crw(void)
499 } while (sei_area->flags & 0x80); 487 } while (sei_area->flags & 0x80);
500} 488}
501 489
502static int 490static int __chp_add_new_sch(struct subchannel_id schid, void *data)
503__chp_add_new_sch(struct subchannel_id schid)
504{ 491{
505 struct schib schib; 492 struct schib schib;
506 493
@@ -514,45 +501,37 @@ __chp_add_new_sch(struct subchannel_id schid)
514} 501}
515 502
516 503
517static int 504static int __chp_add(struct subchannel *sch, void *data)
518__chp_add(struct subchannel_id schid, void *data)
519{ 505{
520 int i, mask; 506 int i, mask;
521 struct chp_id *chpid; 507 struct chp_id *chpid = data;
522 struct subchannel *sch; 508
523
524 chpid = data;
525 sch = get_subchannel_by_schid(schid);
526 if (!sch)
527 /* Check if the subchannel is now available. */
528 return __chp_add_new_sch(schid);
529 spin_lock_irq(sch->lock); 509 spin_lock_irq(sch->lock);
530 for (i=0; i<8; i++) { 510 for (i=0; i<8; i++) {
531 mask = 0x80 >> i; 511 mask = 0x80 >> i;
532 if ((sch->schib.pmcw.pim & mask) && 512 if ((sch->schib.pmcw.pim & mask) &&
533 (sch->schib.pmcw.chpid[i] == chpid->id)) { 513 (sch->schib.pmcw.chpid[i] == chpid->id))
534 if (stsch(sch->schid, &sch->schib) != 0) {
535 /* Endgame. */
536 spin_unlock_irq(sch->lock);
537 return -ENXIO;
538 }
539 break; 514 break;
540 }
541 } 515 }
542 if (i==8) { 516 if (i==8) {
543 spin_unlock_irq(sch->lock); 517 spin_unlock_irq(sch->lock);
544 return 0; 518 return 0;
545 } 519 }
520 if (stsch(sch->schid, &sch->schib)) {
521 spin_unlock_irq(sch->lock);
522 css_schedule_eval(sch->schid);
523 return 0;
524 }
546 sch->lpm = ((sch->schib.pmcw.pim & 525 sch->lpm = ((sch->schib.pmcw.pim &
547 sch->schib.pmcw.pam & 526 sch->schib.pmcw.pam &
548 sch->schib.pmcw.pom) 527 sch->schib.pmcw.pom)
549 | mask) & sch->opm; 528 | mask) & sch->opm;
550 529
551 if (sch->driver && sch->driver->verify) 530 if (sch->driver && sch->driver->verify)
552 sch->driver->verify(&sch->dev); 531 sch->driver->verify(sch);
553 532
554 spin_unlock_irq(sch->lock); 533 spin_unlock_irq(sch->lock);
555 put_device(&sch->dev); 534
556 return 0; 535 return 0;
557} 536}
558 537
@@ -564,7 +543,8 @@ void chsc_chp_online(struct chp_id chpid)
564 CIO_TRACE_EVENT(2, dbf_txt); 543 CIO_TRACE_EVENT(2, dbf_txt);
565 544
566 if (chp_get_status(chpid) != 0) 545 if (chp_get_status(chpid) != 0)
567 for_each_subchannel(__chp_add, &chpid); 546 for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
547 &chpid);
568} 548}
569 549
570static void __s390_subchannel_vary_chpid(struct subchannel *sch, 550static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@@ -589,7 +569,7 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
589 if (!old_lpm) 569 if (!old_lpm)
590 device_trigger_reprobe(sch); 570 device_trigger_reprobe(sch);
591 else if (sch->driver && sch->driver->verify) 571 else if (sch->driver && sch->driver->verify)
592 sch->driver->verify(&sch->dev); 572 sch->driver->verify(sch);
593 break; 573 break;
594 } 574 }
595 sch->opm &= ~mask; 575 sch->opm &= ~mask;
@@ -603,37 +583,29 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
603 terminate_internal_io(sch); 583 terminate_internal_io(sch);
604 /* Re-start path verification. */ 584 /* Re-start path verification. */
605 if (sch->driver && sch->driver->verify) 585 if (sch->driver && sch->driver->verify)
606 sch->driver->verify(&sch->dev); 586 sch->driver->verify(sch);
607 } 587 }
608 } else if (!sch->lpm) { 588 } else if (!sch->lpm) {
609 if (device_trigger_verify(sch) != 0) 589 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid); 590 css_schedule_eval(sch->schid);
611 } else if (sch->driver && sch->driver->verify) 591 } else if (sch->driver && sch->driver->verify)
612 sch->driver->verify(&sch->dev); 592 sch->driver->verify(sch);
613 break; 593 break;
614 } 594 }
615 spin_unlock_irqrestore(sch->lock, flags); 595 spin_unlock_irqrestore(sch->lock, flags);
616} 596}
617 597
618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data) 598static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
619{ 599{
620 struct subchannel *sch; 600 struct chp_id *chpid = data;
621 struct chp_id *chpid;
622
623 sch = to_subchannel(dev);
624 chpid = data;
625 601
626 __s390_subchannel_vary_chpid(sch, *chpid, 0); 602 __s390_subchannel_vary_chpid(sch, *chpid, 0);
627 return 0; 603 return 0;
628} 604}
629 605
630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data) 606static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
631{ 607{
632 struct subchannel *sch; 608 struct chp_id *chpid = data;
633 struct chp_id *chpid;
634
635 sch = to_subchannel(dev);
636 chpid = data;
637 609
638 __s390_subchannel_vary_chpid(sch, *chpid, 1); 610 __s390_subchannel_vary_chpid(sch, *chpid, 1);
639 return 0; 611 return 0;
@@ -643,13 +615,7 @@ static int
643__s390_vary_chpid_on(struct subchannel_id schid, void *data) 615__s390_vary_chpid_on(struct subchannel_id schid, void *data)
644{ 616{
645 struct schib schib; 617 struct schib schib;
646 struct subchannel *sch;
647 618
648 sch = get_subchannel_by_schid(schid);
649 if (sch) {
650 put_device(&sch->dev);
651 return 0;
652 }
653 if (stsch_err(schid, &schib)) 619 if (stsch_err(schid, &schib))
654 /* We're through */ 620 /* We're through */
655 return -ENXIO; 621 return -ENXIO;
@@ -669,12 +635,13 @@ int chsc_chp_vary(struct chp_id chpid, int on)
669 * Redo PathVerification on the devices the chpid connects to 635 * Redo PathVerification on the devices the chpid connects to
670 */ 636 */
671 637
672 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
673 s390_subchannel_vary_chpid_on :
674 s390_subchannel_vary_chpid_off);
675 if (on) 638 if (on)
676 /* Scan for new devices on varied on path. */ 639 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
677 for_each_subchannel(__s390_vary_chpid_on, NULL); 640 __s390_vary_chpid_on, &chpid);
641 else
642 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
643 NULL, &chpid);
644
678 return 0; 645 return 0;
679} 646}
680 647
@@ -1075,7 +1042,7 @@ chsc_determine_css_characteristics(void)
1075 1042
1076 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1043 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1077 if (!scsc_area) { 1044 if (!scsc_area) {
1078 CIO_MSG_EVENT(0, "Was not able to determine available" 1045 CIO_MSG_EVENT(0, "Was not able to determine available "
1079 "CHSCs due to no memory.\n"); 1046 "CHSCs due to no memory.\n");
1080 return -ENOMEM; 1047 return -ENOMEM;
1081 } 1048 }