aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/css.c
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2009-09-22 16:58:38 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2009-09-22 16:58:42 -0400
commit703e5c9993639284bc0a8929b6de362424df7019 (patch)
tree13d6348ca6e8a00aefa302c6a776bf50180a6ac7 /drivers/s390/cio/css.c
parentb0a285d31bd475fdd4312e457288be558b705e55 (diff)
[S390] cio: introduce consistent subchannel scanning
Previously, there were multiple subchannel scanning mechanisms which could potentially conflict with each other. Fix this problem by moving blacklist and ccw driver triggered scanning to the existing evaluation method. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r--drivers/s390/cio/css.c146
1 files changed, 40 insertions, 106 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 5e217bbf8797..91c25706fa83 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -31,7 +31,6 @@
31#include "chp.h" 31#include "chp.h"
32 32
33int css_init_done = 0; 33int css_init_done = 0;
34static int need_reprobe = 0;
35int max_ssid; 34int max_ssid;
36 35
37struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 36struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
@@ -315,12 +314,18 @@ int css_probe_device(struct subchannel_id schid)
315 int ret; 314 int ret;
316 struct subchannel *sch; 315 struct subchannel *sch;
317 316
318 sch = css_alloc_subchannel(schid); 317 if (cio_is_console(schid))
319 if (IS_ERR(sch)) 318 sch = cio_get_console_subchannel();
320 return PTR_ERR(sch); 319 else {
320 sch = css_alloc_subchannel(schid);
321 if (IS_ERR(sch))
322 return PTR_ERR(sch);
323 }
321 ret = css_register_subchannel(sch); 324 ret = css_register_subchannel(sch);
322 if (ret) 325 if (ret) {
323 put_device(&sch->dev); 326 if (!cio_is_console(schid))
327 put_device(&sch->dev);
328 }
324 return ret; 329 return ret;
325} 330}
326 331
@@ -510,76 +515,48 @@ void css_schedule_eval_all(void)
510 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 515 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
511} 516}
512 517
513void css_wait_for_slow_path(void) 518static int __unset_registered(struct device *dev, void *data)
514{ 519{
515 flush_workqueue(slow_path_wq); 520 struct idset *set = data;
516} 521 struct subchannel *sch = to_subchannel(dev);
517
518/* Reprobe subchannel if unregistered. */
519static int reprobe_subchannel(struct subchannel_id schid, void *data)
520{
521 int ret;
522
523 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
524 schid.ssid, schid.sch_no);
525 if (need_reprobe)
526 return -EAGAIN;
527
528 ret = css_probe_device(schid);
529 switch (ret) {
530 case 0:
531 break;
532 case -ENXIO:
533 case -ENOMEM:
534 case -EIO:
535 /* These should abort looping */
536 break;
537 default:
538 ret = 0;
539 }
540
541 return ret;
542}
543 522
544static void reprobe_after_idle(struct work_struct *unused) 523 idset_sch_del(set, sch->schid);
545{ 524 return 0;
546 /* Make sure initial subchannel scan is done. */
547 wait_event(ccw_device_init_wq,
548 atomic_read(&ccw_device_init_count) == 0);
549 if (need_reprobe)
550 css_schedule_reprobe();
551} 525}
552 526
553static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle); 527void css_schedule_eval_all_unreg(void)
554
555/* Work function used to reprobe all unregistered subchannels. */
556static void reprobe_all(struct work_struct *unused)
557{ 528{
558 int ret; 529 unsigned long flags;
559 530 struct idset *unreg_set;
560 CIO_MSG_EVENT(4, "reprobe start\n");
561 531
562 /* Make sure initial subchannel scan is done. */ 532 /* Find unregistered subchannels. */
563 if (atomic_read(&ccw_device_init_count) != 0) { 533 unreg_set = idset_sch_new();
564 queue_work(ccw_device_work, &reprobe_idle_work); 534 if (!unreg_set) {
535 /* Fallback. */
536 css_schedule_eval_all();
565 return; 537 return;
566 } 538 }
567 need_reprobe = 0; 539 idset_fill(unreg_set);
568 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); 540 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
569 541 /* Apply to slow_subchannel_set. */
570 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 542 spin_lock_irqsave(&slow_subchannel_lock, flags);
571 need_reprobe); 543 idset_add_set(slow_subchannel_set, unreg_set);
544 atomic_set(&css_eval_scheduled, 1);
545 queue_work(slow_path_wq, &slow_path_work);
546 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
547 idset_free(unreg_set);
572} 548}
573 549
574static DECLARE_WORK(css_reprobe_work, reprobe_all); 550void css_wait_for_slow_path(void)
551{
552 flush_workqueue(slow_path_wq);
553}
575 554
576/* Schedule reprobing of all unregistered subchannels. */ 555/* Schedule reprobing of all unregistered subchannels. */
577void css_schedule_reprobe(void) 556void css_schedule_reprobe(void)
578{ 557{
579 need_reprobe = 1; 558 css_schedule_eval_all_unreg();
580 queue_work(slow_path_wq, &css_reprobe_work);
581} 559}
582
583EXPORT_SYMBOL_GPL(css_schedule_reprobe); 560EXPORT_SYMBOL_GPL(css_schedule_reprobe);
584 561
585/* 562/*
@@ -615,48 +592,6 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
615 css_evaluate_subchannel(mchk_schid, 0); 592 css_evaluate_subchannel(mchk_schid, 0);
616} 593}
617 594
618static int __init setup_subchannel(struct subchannel_id schid, void *data)
619{
620 struct subchannel *sch;
621 int ret;
622
623 if (cio_is_console(schid))
624 sch = cio_get_console_subchannel();
625 else {
626 sch = css_alloc_subchannel(schid);
627 if (IS_ERR(sch))
628 ret = PTR_ERR(sch);
629 else
630 ret = 0;
631 switch (ret) {
632 case 0:
633 break;
634 case -ENOMEM:
635 panic("Out of memory in init_channel_subsystem\n");
636 /* -ENXIO: no more subchannels. */
637 case -ENXIO:
638 return ret;
639 /* -EIO: this subchannel set not supported. */
640 case -EIO:
641 return ret;
642 default:
643 return 0;
644 }
645 }
646 /*
647 * We register ALL valid subchannels in ioinfo, even those
648 * that have been present before init_channel_subsystem.
649 * These subchannels can't have been registered yet (kmalloc
650 * not working) so we do it now. This is true e.g. for the
651 * console subchannel.
652 */
653 if (css_register_subchannel(sch)) {
654 if (!cio_is_console(schid))
655 put_device(&sch->dev);
656 }
657 return 0;
658}
659
660static void __init 595static void __init
661css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 596css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
662{ 597{
@@ -1028,11 +963,10 @@ static int css_settle(struct device_driver *drv, void *unused)
1028 */ 963 */
1029static int __init channel_subsystem_init_sync(void) 964static int __init channel_subsystem_init_sync(void)
1030{ 965{
1031 /* Allocate and register subchannels. */ 966 /* Start initial subchannel evaluation. */
1032 for_each_subchannel(setup_subchannel, NULL); 967 css_schedule_eval_all();
1033 /* Wait for the evaluation of subchannels to finish. */ 968 /* Wait for the evaluation of subchannels to finish. */
1034 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); 969 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
1035
1036 /* Wait for the subchannel type specific initialization to finish */ 970 /* Wait for the subchannel type specific initialization to finish */
1037 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 971 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1038} 972}