diff options
author | Sebastian Ott <sebott@linux.vnet.ibm.com> | 2009-09-22 16:58:38 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-09-22 16:58:42 -0400 |
commit | 703e5c9993639284bc0a8929b6de362424df7019 (patch) | |
tree | 13d6348ca6e8a00aefa302c6a776bf50180a6ac7 /drivers/s390 | |
parent | b0a285d31bd475fdd4312e457288be558b705e55 (diff) |
[S390] cio: introduce consistent subchannel scanning
Previously, there were multiple subchannel scanning mechanisms
which could potentially conflict with each other. Fix this problem
by moving blacklist and ccw driver triggered scanning to the
existing evaluation method.
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/cio/css.c | 146 | ||||
-rw-r--r-- | drivers/s390/cio/idset.c | 10 | ||||
-rw-r--r-- | drivers/s390/cio/idset.h | 1 |
3 files changed, 51 insertions, 106 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 5e217bbf8797..91c25706fa83 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include "chp.h" | 31 | #include "chp.h" |
32 | 32 | ||
33 | int css_init_done = 0; | 33 | int css_init_done = 0; |
34 | static int need_reprobe = 0; | ||
35 | int max_ssid; | 34 | int max_ssid; |
36 | 35 | ||
37 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; | 36 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; |
@@ -315,12 +314,18 @@ int css_probe_device(struct subchannel_id schid) | |||
315 | int ret; | 314 | int ret; |
316 | struct subchannel *sch; | 315 | struct subchannel *sch; |
317 | 316 | ||
318 | sch = css_alloc_subchannel(schid); | 317 | if (cio_is_console(schid)) |
319 | if (IS_ERR(sch)) | 318 | sch = cio_get_console_subchannel(); |
320 | return PTR_ERR(sch); | 319 | else { |
320 | sch = css_alloc_subchannel(schid); | ||
321 | if (IS_ERR(sch)) | ||
322 | return PTR_ERR(sch); | ||
323 | } | ||
321 | ret = css_register_subchannel(sch); | 324 | ret = css_register_subchannel(sch); |
322 | if (ret) | 325 | if (ret) { |
323 | put_device(&sch->dev); | 326 | if (!cio_is_console(schid)) |
327 | put_device(&sch->dev); | ||
328 | } | ||
324 | return ret; | 329 | return ret; |
325 | } | 330 | } |
326 | 331 | ||
@@ -510,76 +515,48 @@ void css_schedule_eval_all(void) | |||
510 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 515 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
511 | } | 516 | } |
512 | 517 | ||
513 | void css_wait_for_slow_path(void) | 518 | static int __unset_registered(struct device *dev, void *data) |
514 | { | 519 | { |
515 | flush_workqueue(slow_path_wq); | 520 | struct idset *set = data; |
516 | } | 521 | struct subchannel *sch = to_subchannel(dev); |
517 | |||
518 | /* Reprobe subchannel if unregistered. */ | ||
519 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | ||
520 | { | ||
521 | int ret; | ||
522 | |||
523 | CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", | ||
524 | schid.ssid, schid.sch_no); | ||
525 | if (need_reprobe) | ||
526 | return -EAGAIN; | ||
527 | |||
528 | ret = css_probe_device(schid); | ||
529 | switch (ret) { | ||
530 | case 0: | ||
531 | break; | ||
532 | case -ENXIO: | ||
533 | case -ENOMEM: | ||
534 | case -EIO: | ||
535 | /* These should abort looping */ | ||
536 | break; | ||
537 | default: | ||
538 | ret = 0; | ||
539 | } | ||
540 | |||
541 | return ret; | ||
542 | } | ||
543 | 522 | ||
544 | static void reprobe_after_idle(struct work_struct *unused) | 523 | idset_sch_del(set, sch->schid); |
545 | { | 524 | return 0; |
546 | /* Make sure initial subchannel scan is done. */ | ||
547 | wait_event(ccw_device_init_wq, | ||
548 | atomic_read(&ccw_device_init_count) == 0); | ||
549 | if (need_reprobe) | ||
550 | css_schedule_reprobe(); | ||
551 | } | 525 | } |
552 | 526 | ||
553 | static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle); | 527 | void css_schedule_eval_all_unreg(void) |
554 | |||
555 | /* Work function used to reprobe all unregistered subchannels. */ | ||
556 | static void reprobe_all(struct work_struct *unused) | ||
557 | { | 528 | { |
558 | int ret; | 529 | unsigned long flags; |
559 | 530 | struct idset *unreg_set; | |
560 | CIO_MSG_EVENT(4, "reprobe start\n"); | ||
561 | 531 | ||
562 | /* Make sure initial subchannel scan is done. */ | 532 | /* Find unregistered subchannels. */ |
563 | if (atomic_read(&ccw_device_init_count) != 0) { | 533 | unreg_set = idset_sch_new(); |
564 | queue_work(ccw_device_work, &reprobe_idle_work); | 534 | if (!unreg_set) { |
535 | /* Fallback. */ | ||
536 | css_schedule_eval_all(); | ||
565 | return; | 537 | return; |
566 | } | 538 | } |
567 | need_reprobe = 0; | 539 | idset_fill(unreg_set); |
568 | ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); | 540 | bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); |
569 | 541 | /* Apply to slow_subchannel_set. */ | |
570 | CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, | 542 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
571 | need_reprobe); | 543 | idset_add_set(slow_subchannel_set, unreg_set); |
544 | atomic_set(&css_eval_scheduled, 1); | ||
545 | queue_work(slow_path_wq, &slow_path_work); | ||
546 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
547 | idset_free(unreg_set); | ||
572 | } | 548 | } |
573 | 549 | ||
574 | static DECLARE_WORK(css_reprobe_work, reprobe_all); | 550 | void css_wait_for_slow_path(void) |
551 | { | ||
552 | flush_workqueue(slow_path_wq); | ||
553 | } | ||
575 | 554 | ||
576 | /* Schedule reprobing of all unregistered subchannels. */ | 555 | /* Schedule reprobing of all unregistered subchannels. */ |
577 | void css_schedule_reprobe(void) | 556 | void css_schedule_reprobe(void) |
578 | { | 557 | { |
579 | need_reprobe = 1; | 558 | css_schedule_eval_all_unreg(); |
580 | queue_work(slow_path_wq, &css_reprobe_work); | ||
581 | } | 559 | } |
582 | |||
583 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 560 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
584 | 561 | ||
585 | /* | 562 | /* |
@@ -615,48 +592,6 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) | |||
615 | css_evaluate_subchannel(mchk_schid, 0); | 592 | css_evaluate_subchannel(mchk_schid, 0); |
616 | } | 593 | } |
617 | 594 | ||
618 | static int __init setup_subchannel(struct subchannel_id schid, void *data) | ||
619 | { | ||
620 | struct subchannel *sch; | ||
621 | int ret; | ||
622 | |||
623 | if (cio_is_console(schid)) | ||
624 | sch = cio_get_console_subchannel(); | ||
625 | else { | ||
626 | sch = css_alloc_subchannel(schid); | ||
627 | if (IS_ERR(sch)) | ||
628 | ret = PTR_ERR(sch); | ||
629 | else | ||
630 | ret = 0; | ||
631 | switch (ret) { | ||
632 | case 0: | ||
633 | break; | ||
634 | case -ENOMEM: | ||
635 | panic("Out of memory in init_channel_subsystem\n"); | ||
636 | /* -ENXIO: no more subchannels. */ | ||
637 | case -ENXIO: | ||
638 | return ret; | ||
639 | /* -EIO: this subchannel set not supported. */ | ||
640 | case -EIO: | ||
641 | return ret; | ||
642 | default: | ||
643 | return 0; | ||
644 | } | ||
645 | } | ||
646 | /* | ||
647 | * We register ALL valid subchannels in ioinfo, even those | ||
648 | * that have been present before init_channel_subsystem. | ||
649 | * These subchannels can't have been registered yet (kmalloc | ||
650 | * not working) so we do it now. This is true e.g. for the | ||
651 | * console subchannel. | ||
652 | */ | ||
653 | if (css_register_subchannel(sch)) { | ||
654 | if (!cio_is_console(schid)) | ||
655 | put_device(&sch->dev); | ||
656 | } | ||
657 | return 0; | ||
658 | } | ||
659 | |||
660 | static void __init | 595 | static void __init |
661 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) | 596 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) |
662 | { | 597 | { |
@@ -1028,11 +963,10 @@ static int css_settle(struct device_driver *drv, void *unused) | |||
1028 | */ | 963 | */ |
1029 | static int __init channel_subsystem_init_sync(void) | 964 | static int __init channel_subsystem_init_sync(void) |
1030 | { | 965 | { |
1031 | /* Allocate and register subchannels. */ | 966 | /* Start initial subchannel evaluation. */ |
1032 | for_each_subchannel(setup_subchannel, NULL); | 967 | css_schedule_eval_all(); |
1033 | /* Wait for the evaluation of subchannels to finish. */ | 968 | /* Wait for the evaluation of subchannels to finish. */ |
1034 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); | 969 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); |
1035 | |||
1036 | /* Wait for the subchannel type specific initialization to finish */ | 970 | /* Wait for the subchannel type specific initialization to finish */ |
1037 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | 971 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); |
1038 | } | 972 | } |
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 5c88faf5b897..4d10981c7cc1 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c | |||
@@ -120,3 +120,13 @@ int idset_is_empty(struct idset *set) | |||
120 | return 1; | 120 | return 1; |
121 | return 0; | 121 | return 0; |
122 | } | 122 | } |
123 | |||
124 | void idset_add_set(struct idset *to, struct idset *from) | ||
125 | { | ||
126 | unsigned long i, len; | ||
127 | |||
128 | len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), | ||
129 | __BITOPS_WORDS(from->num_ssid * from->num_id)); | ||
130 | for (i = 0; i < len ; i++) | ||
131 | to->bitmap[i] |= from->bitmap[i]; | ||
132 | } | ||
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index ca1398aadc7e..7543da4529f9 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h | |||
@@ -22,5 +22,6 @@ void idset_sch_del(struct idset *set, struct subchannel_id id); | |||
22 | int idset_sch_contains(struct idset *set, struct subchannel_id id); | 22 | int idset_sch_contains(struct idset *set, struct subchannel_id id); |
23 | int idset_sch_get_first(struct idset *set, struct subchannel_id *id); | 23 | int idset_sch_get_first(struct idset *set, struct subchannel_id *id); |
24 | int idset_is_empty(struct idset *set); | 24 | int idset_is_empty(struct idset *set); |
25 | void idset_add_set(struct idset *to, struct idset *from); | ||
25 | 26 | ||
26 | #endif /* S390_IDSET_H */ | 27 | #endif /* S390_IDSET_H */ |