aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorCornelia Huck <cohuck@de.ibm.com>2006-01-06 03:19:22 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:51 -0500
commitf97a56fb768e5fe9cd07c56ca47870136bb5530c (patch)
tree05108317a0cca7aa04cd68f4fcb7b7d3a295ddfa /drivers/s390
parenta8237fc4108060402d904bea5e1062e22e731969 (diff)
[PATCH] s390: introduce for_each_subchannel
for_each_subchannel() is an iterator calling a function for every possible subchannel id until non-zero is returned. Convert the current iterating functions to it. Signed-off-by: Cornelia Huck <cohuck@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/cio/blacklist.c46
-rw-r--r--drivers/s390/cio/chsc.c372
-rw-r--r--drivers/s390/cio/cio.c79
-rw-r--r--drivers/s390/cio/css.c110
-rw-r--r--drivers/s390/cio/css.h1
5 files changed, 318 insertions, 290 deletions
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a4b03031ff50..25e98483d4e4 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -219,6 +219,27 @@ is_blacklisted (int devno)
219} 219}
220 220
221#ifdef CONFIG_PROC_FS 221#ifdef CONFIG_PROC_FS
222static int
223__s390_redo_validation(struct subchannel_id schid, void *data)
224{
225 int ret;
226 struct subchannel *sch;
227
228 sch = get_subchannel_by_schid(schid);
229 if (sch) {
230 /* Already known. */
231 put_device(&sch->dev);
232 return 0;
233 }
234 ret = css_probe_device(schid);
235 if (ret == -ENXIO)
236 return ret; /* We're through. */
237 if (ret == -ENOMEM)
238 /* Stop validation for now. Bad, but no need for a panic. */
239 return ret;
240 return 0;
241}
242
222/* 243/*
223 * Function: s390_redo_validation 244 * Function: s390_redo_validation
224 * Look for no longer blacklisted devices 245 * Look for no longer blacklisted devices
@@ -226,30 +247,9 @@ is_blacklisted (int devno)
226static inline void 247static inline void
227s390_redo_validation (void) 248s390_redo_validation (void)
228{ 249{
229 struct subchannel_id schid;
230
231 CIO_TRACE_EVENT (0, "redoval"); 250 CIO_TRACE_EVENT (0, "redoval");
232 init_subchannel_id(&schid); 251
233 do { 252 for_each_subchannel(__s390_redo_validation, NULL);
234 int ret;
235 struct subchannel *sch;
236
237 sch = get_subchannel_by_schid(schid);
238 if (sch) {
239 /* Already known. */
240 put_device(&sch->dev);
241 continue;
242 }
243 ret = css_probe_device(schid);
244 if (ret == -ENXIO)
245 break; /* We're through. */
246 if (ret == -ENOMEM)
247 /*
248 * Stop validation for now. Bad, but no need for a
249 * panic.
250 */
251 break;
252 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
253} 253}
254 254
255/* 255/*
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index aff5d149b729..78e082311f48 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -310,9 +310,14 @@ s390_set_chpid_offline( __u8 chpid)
310 queue_work(slow_path_wq, &slow_path_work); 310 queue_work(slow_path_wq, &slow_path_work);
311} 311}
312 312
313struct res_acc_data {
314 struct channel_path *chp;
315 u32 fla_mask;
316 u16 fla;
317};
318
313static int 319static int
314s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, 320s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
315 struct subchannel *sch)
316{ 321{
317 int found; 322 int found;
318 int chp; 323 int chp;
@@ -324,8 +329,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
324 * check if chpid is in information updated by ssd 329 * check if chpid is in information updated by ssd
325 */ 330 */
326 if (sch->ssd_info.valid && 331 if (sch->ssd_info.valid &&
327 sch->ssd_info.chpid[chp] == chpid && 332 sch->ssd_info.chpid[chp] == res_data->chp->id &&
328 (sch->ssd_info.fla[chp] & fla_mask) == fla) { 333 (sch->ssd_info.fla[chp] & res_data->fla_mask)
334 == res_data->fla) {
329 found = 1; 335 found = 1;
330 break; 336 break;
331 } 337 }
@@ -345,18 +351,80 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
345 return 0x80 >> chp; 351 return 0x80 >> chp;
346} 352}
347 353
354static inline int
355s390_process_res_acc_new_sch(struct subchannel_id schid)
356{
357 struct schib schib;
358 int ret;
359 /*
360 * We don't know the device yet, but since a path
361 * may be available now to the device we'll have
362 * to do recognition again.
363 * Since we don't have any idea about which chpid
364 * that beast may be on we'll have to do a stsch
365 * on all devices, grr...
366 */
367 if (stsch(schid, &schib))
368 /* We're through */
369 return need_rescan ? -EAGAIN : -ENXIO;
370
371 /* Put it on the slow path. */
372 ret = css_enqueue_subchannel_slow(schid);
373 if (ret) {
374 css_clear_subchannel_slow_list();
375 need_rescan = 1;
376 return -EAGAIN;
377 }
378 return 0;
379}
380
348static int 381static int
349s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) 382__s390_process_res_acc(struct subchannel_id schid, void *data)
350{ 383{
384 int chp_mask, old_lpm;
385 struct res_acc_data *res_data;
351 struct subchannel *sch; 386 struct subchannel *sch;
387
388 res_data = (struct res_acc_data *)data;
389 sch = get_subchannel_by_schid(schid);
390 if (!sch)
391 /* Check if a subchannel is newly available. */
392 return s390_process_res_acc_new_sch(schid);
393
394 spin_lock_irq(&sch->lock);
395
396 chp_mask = s390_process_res_acc_sch(res_data, sch);
397
398 if (chp_mask == 0) {
399 spin_unlock_irq(&sch->lock);
400 return 0;
401 }
402 old_lpm = sch->lpm;
403 sch->lpm = ((sch->schib.pmcw.pim &
404 sch->schib.pmcw.pam &
405 sch->schib.pmcw.pom)
406 | chp_mask) & sch->opm;
407 if (!old_lpm && sch->lpm)
408 device_trigger_reprobe(sch);
409 else if (sch->driver && sch->driver->verify)
410 sch->driver->verify(&sch->dev);
411
412 spin_unlock_irq(&sch->lock);
413 put_device(&sch->dev);
414 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
415}
416
417
418static int
419s390_process_res_acc (struct res_acc_data *res_data)
420{
352 int rc; 421 int rc;
353 struct subchannel_id schid;
354 char dbf_txt[15]; 422 char dbf_txt[15];
355 423
356 sprintf(dbf_txt, "accpr%x", chpid); 424 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
357 CIO_TRACE_EVENT( 2, dbf_txt); 425 CIO_TRACE_EVENT( 2, dbf_txt);
358 if (fla != 0) { 426 if (res_data->fla != 0) {
359 sprintf(dbf_txt, "fla%x", fla); 427 sprintf(dbf_txt, "fla%x", res_data->fla);
360 CIO_TRACE_EVENT( 2, dbf_txt); 428 CIO_TRACE_EVENT( 2, dbf_txt);
361 } 429 }
362 430
@@ -367,71 +435,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
367 * The more information we have (info), the less scanning 435 * The more information we have (info), the less scanning
368 * will we have to do. 436 * will we have to do.
369 */ 437 */
370 438 rc = for_each_subchannel(__s390_process_res_acc, res_data);
371 if (!get_chp_status(chpid)) 439 if (css_slow_subchannels_exist())
372 return 0; /* no need to do the rest */ 440 rc = -EAGAIN;
373 441 else if (rc != -EAGAIN)
374 rc = 0; 442 rc = 0;
375 init_subchannel_id(&schid);
376 do {
377 int chp_mask, old_lpm;
378
379 sch = get_subchannel_by_schid(schid);
380 if (!sch) {
381 struct schib schib;
382 int ret;
383 /*
384 * We don't know the device yet, but since a path
385 * may be available now to the device we'll have
386 * to do recognition again.
387 * Since we don't have any idea about which chpid
388 * that beast may be on we'll have to do a stsch
389 * on all devices, grr...
390 */
391 if (stsch(schid, &schib)) {
392 /* We're through */
393 if (need_rescan)
394 rc = -EAGAIN;
395 break;
396 }
397 if (need_rescan) {
398 rc = -EAGAIN;
399 continue;
400 }
401 /* Put it on the slow path. */
402 ret = css_enqueue_subchannel_slow(schid);
403 if (ret) {
404 css_clear_subchannel_slow_list();
405 need_rescan = 1;
406 }
407 rc = -EAGAIN;
408 continue;
409 }
410
411 spin_lock_irq(&sch->lock);
412
413 chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
414
415 if (chp_mask == 0) {
416
417 spin_unlock_irq(&sch->lock);
418 continue;
419 }
420 old_lpm = sch->lpm;
421 sch->lpm = ((sch->schib.pmcw.pim &
422 sch->schib.pmcw.pam &
423 sch->schib.pmcw.pom)
424 | chp_mask) & sch->opm;
425 if (!old_lpm && sch->lpm)
426 device_trigger_reprobe(sch);
427 else if (sch->driver && sch->driver->verify)
428 sch->driver->verify(&sch->dev);
429
430 spin_unlock_irq(&sch->lock);
431 put_device(&sch->dev);
432 if (fla_mask == 0xffff)
433 break;
434 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
435 return rc; 443 return rc;
436} 444}
437 445
@@ -469,6 +477,7 @@ int
469chsc_process_crw(void) 477chsc_process_crw(void)
470{ 478{
471 int chpid, ret; 479 int chpid, ret;
480 struct res_acc_data res_data;
472 struct { 481 struct {
473 struct chsc_header request; 482 struct chsc_header request;
474 u32 reserved1; 483 u32 reserved1;
@@ -503,7 +512,7 @@ chsc_process_crw(void)
503 do { 512 do {
504 int ccode, status; 513 int ccode, status;
505 memset(sei_area, 0, sizeof(*sei_area)); 514 memset(sei_area, 0, sizeof(*sei_area));
506 515 memset(&res_data, 0, sizeof(struct res_acc_data));
507 sei_area->request = (struct chsc_header) { 516 sei_area->request = (struct chsc_header) {
508 .length = 0x0010, 517 .length = 0x0010,
509 .code = 0x000e, 518 .code = 0x000e,
@@ -576,26 +585,23 @@ chsc_process_crw(void)
576 if (status < 0) 585 if (status < 0)
577 new_channel_path(sei_area->rsid); 586 new_channel_path(sei_area->rsid);
578 else if (!status) 587 else if (!status)
579 return 0; 588 break;
580 if ((sei_area->vf & 0x80) == 0) { 589 res_data.chp = chps[sei_area->rsid];
581 pr_debug("chpid: %x\n", sei_area->rsid); 590 pr_debug("chpid: %x", sei_area->rsid);
582 ret = s390_process_res_acc(sei_area->rsid, 591 if ((sei_area->vf & 0xc0) != 0) {
583 0, 0); 592 res_data.fla = sei_area->fla;
584 } else if ((sei_area->vf & 0xc0) == 0x80) { 593 if ((sei_area->vf & 0xc0) == 0xc0) {
585 pr_debug("chpid: %x link addr: %x\n", 594 pr_debug(" full link addr: %x",
586 sei_area->rsid, sei_area->fla); 595 sei_area->fla);
587 ret = s390_process_res_acc(sei_area->rsid, 596 res_data.fla_mask = 0xffff;
588 sei_area->fla, 597 } else {
589 0xff00); 598 pr_debug(" link addr: %x",
590 } else if ((sei_area->vf & 0xc0) == 0xc0) { 599 sei_area->fla);
591 pr_debug("chpid: %x full link addr: %x\n", 600 res_data.fla_mask = 0xff00;
592 sei_area->rsid, sei_area->fla); 601 }
593 ret = s390_process_res_acc(sei_area->rsid,
594 sei_area->fla,
595 0xffff);
596 } 602 }
597 pr_debug("\n"); 603 ret = s390_process_res_acc(&res_data);
598 604 pr_debug("\n\n");
599 break; 605 break;
600 606
601 default: /* other stuff */ 607 default: /* other stuff */
@@ -607,12 +613,70 @@ chsc_process_crw(void)
607 return ret; 613 return ret;
608} 614}
609 615
616static inline int
617__chp_add_new_sch(struct subchannel_id schid)
618{
619 struct schib schib;
620 int ret;
621
622 if (stsch(schid, &schib))
623 /* We're through */
624 return need_rescan ? -EAGAIN : -ENXIO;
625
626 /* Put it on the slow path. */
627 ret = css_enqueue_subchannel_slow(schid);
628 if (ret) {
629 css_clear_subchannel_slow_list();
630 need_rescan = 1;
631 return -EAGAIN;
632 }
633 return 0;
634}
635
636
610static int 637static int
611chp_add(int chpid) 638__chp_add(struct subchannel_id schid, void *data)
612{ 639{
640 int i;
641 struct channel_path *chp;
613 struct subchannel *sch; 642 struct subchannel *sch;
614 int ret, rc; 643
615 struct subchannel_id schid; 644 chp = (struct channel_path *)data;
645 sch = get_subchannel_by_schid(schid);
646 if (!sch)
647 /* Check if the subchannel is now available. */
648 return __chp_add_new_sch(schid);
649 spin_lock(&sch->lock);
650 for (i=0; i<8; i++)
651 if (sch->schib.pmcw.chpid[i] == chp->id) {
652 if (stsch(sch->schid, &sch->schib) != 0) {
653 /* Endgame. */
654 spin_unlock(&sch->lock);
655 return -ENXIO;
656 }
657 break;
658 }
659 if (i==8) {
660 spin_unlock(&sch->lock);
661 return 0;
662 }
663 sch->lpm = ((sch->schib.pmcw.pim &
664 sch->schib.pmcw.pam &
665 sch->schib.pmcw.pom)
666 | 0x80 >> i) & sch->opm;
667
668 if (sch->driver && sch->driver->verify)
669 sch->driver->verify(&sch->dev);
670
671 spin_unlock(&sch->lock);
672 put_device(&sch->dev);
673 return 0;
674}
675
676static int
677chp_add(int chpid)
678{
679 int rc;
616 char dbf_txt[15]; 680 char dbf_txt[15];
617 681
618 if (!get_chp_status(chpid)) 682 if (!get_chp_status(chpid))
@@ -621,60 +685,11 @@ chp_add(int chpid)
621 sprintf(dbf_txt, "cadd%x", chpid); 685 sprintf(dbf_txt, "cadd%x", chpid);
622 CIO_TRACE_EVENT(2, dbf_txt); 686 CIO_TRACE_EVENT(2, dbf_txt);
623 687
624 rc = 0; 688 rc = for_each_subchannel(__chp_add, chps[chpid]);
625 init_subchannel_id(&schid); 689 if (css_slow_subchannels_exist())
626 do { 690 rc = -EAGAIN;
627 int i; 691 if (rc != -EAGAIN)
628 692 rc = 0;
629 sch = get_subchannel_by_schid(schid);
630 if (!sch) {
631 struct schib schib;
632
633 if (stsch(schid, &schib)) {
634 /* We're through */
635 if (need_rescan)
636 rc = -EAGAIN;
637 break;
638 }
639 if (need_rescan) {
640 rc = -EAGAIN;
641 continue;
642 }
643 /* Put it on the slow path. */
644 ret = css_enqueue_subchannel_slow(schid);
645 if (ret) {
646 css_clear_subchannel_slow_list();
647 need_rescan = 1;
648 }
649 rc = -EAGAIN;
650 continue;
651 }
652
653 spin_lock(&sch->lock);
654 for (i=0; i<8; i++)
655 if (sch->schib.pmcw.chpid[i] == chpid) {
656 if (stsch(sch->schid, &sch->schib) != 0) {
657 /* Endgame. */
658 spin_unlock(&sch->lock);
659 return rc;
660 }
661 break;
662 }
663 if (i==8) {
664 spin_unlock(&sch->lock);
665 return rc;
666 }
667 sch->lpm = ((sch->schib.pmcw.pim &
668 sch->schib.pmcw.pam &
669 sch->schib.pmcw.pom)
670 | 0x80 >> i) & sch->opm;
671
672 if (sch->driver && sch->driver->verify)
673 sch->driver->verify(&sch->dev);
674
675 spin_unlock(&sch->lock);
676 put_device(&sch->dev);
677 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
678 return rc; 693 return rc;
679} 694}
680 695
@@ -786,6 +801,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data)
786 return 0; 801 return 0;
787} 802}
788 803
804static int
805__s390_vary_chpid_on(struct subchannel_id schid, void *data)
806{
807 struct schib schib;
808 struct subchannel *sch;
809
810 sch = get_subchannel_by_schid(schid);
811 if (sch) {
812 put_device(&sch->dev);
813 return 0;
814 }
815 if (stsch(schid, &schib))
816 /* We're through */
817 return -ENXIO;
818 /* Put it on the slow path. */
819 if (css_enqueue_subchannel_slow(schid)) {
820 css_clear_subchannel_slow_list();
821 need_rescan = 1;
822 return -EAGAIN;
823 }
824 return 0;
825}
826
789/* 827/*
790 * Function: s390_vary_chpid 828 * Function: s390_vary_chpid
791 * Varies the specified chpid online or offline 829 * Varies the specified chpid online or offline
@@ -794,9 +832,7 @@ static int
794s390_vary_chpid( __u8 chpid, int on) 832s390_vary_chpid( __u8 chpid, int on)
795{ 833{
796 char dbf_text[15]; 834 char dbf_text[15];
797 int status, ret; 835 int status;
798 struct subchannel_id schid;
799 struct subchannel *sch;
800 836
801 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 837 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
802 CIO_TRACE_EVENT( 2, dbf_text); 838 CIO_TRACE_EVENT( 2, dbf_text);
@@ -821,31 +857,9 @@ s390_vary_chpid( __u8 chpid, int on)
821 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 857 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
822 s390_subchannel_vary_chpid_on : 858 s390_subchannel_vary_chpid_on :
823 s390_subchannel_vary_chpid_off); 859 s390_subchannel_vary_chpid_off);
824 if (!on) 860 if (on)
825 goto out; 861 /* Scan for new devices on varied on path. */
826 /* Scan for new devices on varied on path. */ 862 for_each_subchannel(__s390_vary_chpid_on, NULL);
827 init_subchannel_id(&schid);
828 do {
829 struct schib schib;
830
831 if (need_rescan)
832 break;
833 sch = get_subchannel_by_schid(schid);
834 if (sch) {
835 put_device(&sch->dev);
836 continue;
837 }
838 if (stsch(schid, &schib))
839 /* We're through */
840 break;
841 /* Put it on the slow path. */
842 ret = css_enqueue_subchannel_slow(schid);
843 if (ret) {
844 css_clear_subchannel_slow_list();
845 need_rescan = 1;
846 }
847 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
848out:
849 if (need_rescan || css_slow_subchannels_exist()) 863 if (need_rescan || css_slow_subchannels_exist())
850 queue_work(slow_path_wq, &slow_path_work); 864 queue_work(slow_path_wq, &slow_path_work);
851 return 0; 865 return 0;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 396bada65f86..3eb6cb608fc9 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -691,7 +691,22 @@ wait_cons_dev (void)
691} 691}
692 692
693static int 693static int
694cio_console_irq(void) 694cio_test_for_console(struct subchannel_id schid, void *data)
695{
696 if (stsch(schid, &console_subchannel.schib) != 0)
697 return -ENXIO;
698 if (console_subchannel.schib.pmcw.dnv &&
699 console_subchannel.schib.pmcw.dev ==
700 console_devno) {
701 console_irq = schid.sch_no;
702 return 1; /* found */
703 }
704 return 0;
705}
706
707
708static int
709cio_get_console_sch_no(void)
695{ 710{
696 struct subchannel_id schid; 711 struct subchannel_id schid;
697 712
@@ -705,16 +720,7 @@ cio_console_irq(void)
705 console_devno = console_subchannel.schib.pmcw.dev; 720 console_devno = console_subchannel.schib.pmcw.dev;
706 } else if (console_devno != -1) { 721 } else if (console_devno != -1) {
707 /* At least the console device number is known. */ 722 /* At least the console device number is known. */
708 do { 723 for_each_subchannel(cio_test_for_console, NULL);
709 if (stsch(schid, &console_subchannel.schib) != 0)
710 break;
711 if (console_subchannel.schib.pmcw.dnv &&
712 console_subchannel.schib.pmcw.dev ==
713 console_devno) {
714 console_irq = schid.sch_no;
715 break;
716 }
717 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
718 if (console_irq == -1) 724 if (console_irq == -1)
719 return -1; 725 return -1;
720 } else { 726 } else {
@@ -730,19 +736,19 @@ cio_console_irq(void)
730struct subchannel * 736struct subchannel *
731cio_probe_console(void) 737cio_probe_console(void)
732{ 738{
733 int irq, ret; 739 int sch_no, ret;
734 struct subchannel_id schid; 740 struct subchannel_id schid;
735 741
736 if (xchg(&console_subchannel_in_use, 1) != 0) 742 if (xchg(&console_subchannel_in_use, 1) != 0)
737 return ERR_PTR(-EBUSY); 743 return ERR_PTR(-EBUSY);
738 irq = cio_console_irq(); 744 sch_no = cio_get_console_sch_no();
739 if (irq == -1) { 745 if (sch_no == -1) {
740 console_subchannel_in_use = 0; 746 console_subchannel_in_use = 0;
741 return ERR_PTR(-ENODEV); 747 return ERR_PTR(-ENODEV);
742 } 748 }
743 memset(&console_subchannel, 0, sizeof(struct subchannel)); 749 memset(&console_subchannel, 0, sizeof(struct subchannel));
744 init_subchannel_id(&schid); 750 init_subchannel_id(&schid);
745 schid.sch_no = irq; 751 schid.sch_no = sch_no;
746 ret = cio_validate_subchannel(&console_subchannel, schid); 752 ret = cio_validate_subchannel(&console_subchannel, schid);
747 if (ret) { 753 if (ret) {
748 console_subchannel_in_use = 0; 754 console_subchannel_in_use = 0;
@@ -830,32 +836,33 @@ __clear_subchannel_easy(struct subchannel_id schid)
830} 836}
831 837
832extern void do_reipl(unsigned long devno); 838extern void do_reipl(unsigned long devno);
839static int
840__shutdown_subchannel_easy(struct subchannel_id schid, void *data)
841{
842 struct schib schib;
843
844 if (stsch(schid, &schib))
845 return -ENXIO;
846 if (!schib.pmcw.ena)
847 return 0;
848 switch(__disable_subchannel_easy(schid, &schib)) {
849 case 0:
850 case -ENODEV:
851 break;
852 default: /* -EBUSY */
853 if (__clear_subchannel_easy(schid))
854 break; /* give up... */
855 stsch(schid, &schib);
856 __disable_subchannel_easy(schid, &schib);
857 }
858 return 0;
859}
833 860
834/* Clear all subchannels. */
835void 861void
836clear_all_subchannels(void) 862clear_all_subchannels(void)
837{ 863{
838 struct subchannel_id schid;
839
840 local_irq_disable(); 864 local_irq_disable();
841 init_subchannel_id(&schid); 865 for_each_subchannel(__shutdown_subchannel_easy, NULL);
842 do {
843 struct schib schib;
844 if (stsch(schid, &schib))
845 break; /* break out of the loop */
846 if (!schib.pmcw.ena)
847 continue;
848 switch(__disable_subchannel_easy(schid, &schib)) {
849 case 0:
850 case -ENODEV:
851 break;
852 default: /* -EBUSY */
853 if (__clear_subchannel_easy(schid))
854 break; /* give up... jump out of switch */
855 stsch(schid, &schib);
856 __disable_subchannel_easy(schid, &schib);
857 }
858 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
859} 866}
860 867
861/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 868/* Make sure all subchannels are quiet before we re-ipl an lpar. */
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 5137dafd1e8d..dba632a5f71f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -21,7 +21,6 @@
21#include "ioasm.h" 21#include "ioasm.h"
22#include "chsc.h" 22#include "chsc.h"
23 23
24unsigned int highest_subchannel;
25int need_rescan = 0; 24int need_rescan = 0;
26int css_init_done = 0; 25int css_init_done = 0;
27 26
@@ -32,6 +31,22 @@ struct device css_bus_device = {
32 .bus_id = "css0", 31 .bus_id = "css0",
33}; 32};
34 33
34inline int
35for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
36{
37 struct subchannel_id schid;
38 int ret;
39
40 init_subchannel_id(&schid);
41 ret = -ENODEV;
42 do {
43 ret = fn(schid, data);
44 if (ret)
45 break;
46 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
47 return ret;
48}
49
35static struct subchannel * 50static struct subchannel *
36css_alloc_subchannel(struct subchannel_id schid) 51css_alloc_subchannel(struct subchannel_id schid)
37{ 52{
@@ -280,25 +295,10 @@ css_evaluate_subchannel(struct subchannel_id schid, int slow)
280 return ret; 295 return ret;
281} 296}
282 297
283static void 298static int
284css_rescan_devices(void) 299css_rescan_devices(struct subchannel_id schid, void *data)
285{ 300{
286 int ret; 301 return css_evaluate_subchannel(schid, 1);
287 struct subchannel_id schid;
288
289 init_subchannel_id(&schid);
290 do {
291 ret = css_evaluate_subchannel(schid, 1);
292 /* No more memory. It doesn't make sense to continue. No
293 * panic because this can happen in midflight and just
294 * because we can't use a new device is no reason to crash
295 * the system. */
296 if (ret == -ENOMEM)
297 break;
298 /* -ENXIO indicates that there are no more subchannels. */
299 if (ret == -ENXIO)
300 break;
301 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
302} 302}
303 303
304struct slow_subchannel { 304struct slow_subchannel {
@@ -316,7 +316,7 @@ css_trigger_slow_path(void)
316 316
317 if (need_rescan) { 317 if (need_rescan) {
318 need_rescan = 0; 318 need_rescan = 0;
319 css_rescan_devices(); 319 for_each_subchannel(css_rescan_devices, NULL);
320 return; 320 return;
321 } 321 }
322 322
@@ -383,6 +383,43 @@ css_process_crw(int irq)
383 return ret; 383 return ret;
384} 384}
385 385
386static int __init
387__init_channel_subsystem(struct subchannel_id schid, void *data)
388{
389 struct subchannel *sch;
390 int ret;
391
392 if (cio_is_console(schid))
393 sch = cio_get_console_subchannel();
394 else {
395 sch = css_alloc_subchannel(schid);
396 if (IS_ERR(sch))
397 ret = PTR_ERR(sch);
398 else
399 ret = 0;
400 switch (ret) {
401 case 0:
402 break;
403 case -ENOMEM:
404 panic("Out of memory in init_channel_subsystem\n");
405 /* -ENXIO: no more subchannels. */
406 case -ENXIO:
407 return ret;
408 default:
409 return 0;
410 }
411 }
412 /*
413 * We register ALL valid subchannels in ioinfo, even those
414 * that have been present before init_channel_subsystem.
415 * These subchannels can't have been registered yet (kmalloc
416 * not working) so we do it now. This is true e.g. for the
417 * console subchannel.
418 */
419 css_register_subchannel(sch);
420 return 0;
421}
422
386static void __init 423static void __init
387css_generate_pgid(void) 424css_generate_pgid(void)
388{ 425{
@@ -410,7 +447,6 @@ static int __init
410init_channel_subsystem (void) 447init_channel_subsystem (void)
411{ 448{
412 int ret; 449 int ret;
413 struct subchannel_id schid;
414 450
415 if (chsc_determine_css_characteristics() == 0) 451 if (chsc_determine_css_characteristics() == 0)
416 css_characteristics_avail = 1; 452 css_characteristics_avail = 1;
@@ -426,38 +462,8 @@ init_channel_subsystem (void)
426 462
427 ctl_set_bit(6, 28); 463 ctl_set_bit(6, 28);
428 464
429 init_subchannel_id(&schid); 465 for_each_subchannel(__init_channel_subsystem, NULL);
430 do {
431 struct subchannel *sch;
432
433 if (cio_is_console(schid))
434 sch = cio_get_console_subchannel();
435 else {
436 sch = css_alloc_subchannel(schid);
437 if (IS_ERR(sch))
438 ret = PTR_ERR(sch);
439 else
440 ret = 0;
441 if (ret == -ENOMEM)
442 panic("Out of memory in "
443 "init_channel_subsystem\n");
444 /* -ENXIO: no more subchannels. */
445 if (ret == -ENXIO)
446 break;
447 if (ret)
448 continue;
449 }
450 /*
451 * We register ALL valid subchannels in ioinfo, even those
452 * that have been present before init_channel_subsystem.
453 * These subchannels can't have been registered yet (kmalloc
454 * not working) so we do it now. This is true e.g. for the
455 * console subchannel.
456 */
457 css_register_subchannel(sch);
458 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
459 return 0; 466 return 0;
460
461out_bus: 467out_bus:
462 bus_unregister(&css_bus_type); 468 bus_unregister(&css_bus_type);
463out: 469out:
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index f26e16daecb5..71efca25476d 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -126,6 +126,7 @@ extern struct css_driver io_subchannel_driver;
126extern int css_probe_device(struct subchannel_id); 126extern int css_probe_device(struct subchannel_id);
127extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 127extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
128extern int css_init_done; 128extern int css_init_done;
129extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
129 130
130#define __MAX_SUBCHANNEL 65535 131#define __MAX_SUBCHANNEL 65535
131 132