diff options
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r-- | drivers/s390/cio/css.c | 79 |
1 files changed, 66 insertions, 13 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 7679aee6fa14..2769da54f2b9 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/reboot.h> | 19 | #include <linux/reboot.h> |
20 | #include <linux/suspend.h> | 20 | #include <linux/suspend.h> |
21 | #include <linux/proc_fs.h> | ||
21 | #include <asm/isc.h> | 22 | #include <asm/isc.h> |
22 | #include <asm/crw.h> | 23 | #include <asm/crw.h> |
23 | 24 | ||
@@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) | |||
232 | if (!get_device(&sch->dev)) | 233 | if (!get_device(&sch->dev)) |
233 | return; | 234 | return; |
234 | sch->todo = todo; | 235 | sch->todo = todo; |
235 | if (!queue_work(slow_path_wq, &sch->todo_work)) { | 236 | if (!queue_work(cio_work_q, &sch->todo_work)) { |
236 | /* Already queued, release workqueue ref. */ | 237 | /* Already queued, release workqueue ref. */ |
237 | put_device(&sch->dev); | 238 | put_device(&sch->dev); |
238 | } | 239 | } |
@@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused) | |||
543 | } | 544 | } |
544 | 545 | ||
545 | static DECLARE_WORK(slow_path_work, css_slow_path_func); | 546 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
546 | struct workqueue_struct *slow_path_wq; | 547 | struct workqueue_struct *cio_work_q; |
547 | 548 | ||
548 | void css_schedule_eval(struct subchannel_id schid) | 549 | void css_schedule_eval(struct subchannel_id schid) |
549 | { | 550 | { |
@@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid) | |||
552 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 553 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
553 | idset_sch_add(slow_subchannel_set, schid); | 554 | idset_sch_add(slow_subchannel_set, schid); |
554 | atomic_set(&css_eval_scheduled, 1); | 555 | atomic_set(&css_eval_scheduled, 1); |
555 | queue_work(slow_path_wq, &slow_path_work); | 556 | queue_work(cio_work_q, &slow_path_work); |
556 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 557 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
557 | } | 558 | } |
558 | 559 | ||
@@ -563,7 +564,7 @@ void css_schedule_eval_all(void) | |||
563 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 564 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
564 | idset_fill(slow_subchannel_set); | 565 | idset_fill(slow_subchannel_set); |
565 | atomic_set(&css_eval_scheduled, 1); | 566 | atomic_set(&css_eval_scheduled, 1); |
566 | queue_work(slow_path_wq, &slow_path_work); | 567 | queue_work(cio_work_q, &slow_path_work); |
567 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 568 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
568 | } | 569 | } |
569 | 570 | ||
@@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void) | |||
594 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 595 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
595 | idset_add_set(slow_subchannel_set, unreg_set); | 596 | idset_add_set(slow_subchannel_set, unreg_set); |
596 | atomic_set(&css_eval_scheduled, 1); | 597 | atomic_set(&css_eval_scheduled, 1); |
597 | queue_work(slow_path_wq, &slow_path_work); | 598 | queue_work(cio_work_q, &slow_path_work); |
598 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 599 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
599 | idset_free(unreg_set); | 600 | idset_free(unreg_set); |
600 | } | 601 | } |
601 | 602 | ||
602 | void css_wait_for_slow_path(void) | 603 | void css_wait_for_slow_path(void) |
603 | { | 604 | { |
604 | flush_workqueue(slow_path_wq); | 605 | flush_workqueue(cio_work_q); |
605 | } | 606 | } |
606 | 607 | ||
607 | /* Schedule reprobing of all unregistered subchannels. */ | 608 | /* Schedule reprobing of all unregistered subchannels. */ |
@@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void) | |||
992 | ret = css_bus_init(); | 993 | ret = css_bus_init(); |
993 | if (ret) | 994 | if (ret) |
994 | return ret; | 995 | return ret; |
995 | 996 | cio_work_q = create_singlethread_workqueue("cio"); | |
997 | if (!cio_work_q) { | ||
998 | ret = -ENOMEM; | ||
999 | goto out_bus; | ||
1000 | } | ||
996 | ret = io_subchannel_init(); | 1001 | ret = io_subchannel_init(); |
997 | if (ret) | 1002 | if (ret) |
998 | css_bus_cleanup(); | 1003 | goto out_wq; |
999 | 1004 | ||
1000 | return ret; | 1005 | return ret; |
1006 | out_wq: | ||
1007 | destroy_workqueue(cio_work_q); | ||
1008 | out_bus: | ||
1009 | css_bus_cleanup(); | ||
1010 | return ret; | ||
1001 | } | 1011 | } |
1002 | subsys_initcall(channel_subsystem_init); | 1012 | subsys_initcall(channel_subsystem_init); |
1003 | 1013 | ||
@@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused) | |||
1006 | struct css_driver *cssdrv = to_cssdriver(drv); | 1016 | struct css_driver *cssdrv = to_cssdriver(drv); |
1007 | 1017 | ||
1008 | if (cssdrv->settle) | 1018 | if (cssdrv->settle) |
1009 | cssdrv->settle(); | 1019 | return cssdrv->settle(); |
1010 | return 0; | 1020 | return 0; |
1011 | } | 1021 | } |
1012 | 1022 | ||
1023 | int css_complete_work(void) | ||
1024 | { | ||
1025 | int ret; | ||
1026 | |||
1027 | /* Wait for the evaluation of subchannels to finish. */ | ||
1028 | ret = wait_event_interruptible(css_eval_wq, | ||
1029 | atomic_read(&css_eval_scheduled) == 0); | ||
1030 | if (ret) | ||
1031 | return -EINTR; | ||
1032 | flush_workqueue(cio_work_q); | ||
1033 | /* Wait for the subchannel type specific initialization to finish */ | ||
1034 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | ||
1035 | } | ||
1036 | |||
1037 | |||
1013 | /* | 1038 | /* |
1014 | * Wait for the initialization of devices to finish, to make sure we are | 1039 | * Wait for the initialization of devices to finish, to make sure we are |
1015 | * done with our setup if the search for the root device starts. | 1040 | * done with our setup if the search for the root device starts. |
@@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void) | |||
1018 | { | 1043 | { |
1019 | /* Start initial subchannel evaluation. */ | 1044 | /* Start initial subchannel evaluation. */ |
1020 | css_schedule_eval_all(); | 1045 | css_schedule_eval_all(); |
1021 | /* Wait for the evaluation of subchannels to finish. */ | 1046 | css_complete_work(); |
1022 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); | 1047 | return 0; |
1023 | /* Wait for the subchannel type specific initialization to finish */ | ||
1024 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | ||
1025 | } | 1048 | } |
1026 | subsys_initcall_sync(channel_subsystem_init_sync); | 1049 | subsys_initcall_sync(channel_subsystem_init_sync); |
1027 | 1050 | ||
1051 | #ifdef CONFIG_PROC_FS | ||
1052 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, | ||
1053 | size_t count, loff_t *ppos) | ||
1054 | { | ||
1055 | int ret; | ||
1056 | |||
1057 | /* Handle pending CRW's. */ | ||
1058 | crw_wait_for_channel_report(); | ||
1059 | ret = css_complete_work(); | ||
1060 | |||
1061 | return ret ? ret : count; | ||
1062 | } | ||
1063 | |||
1064 | static const struct file_operations cio_settle_proc_fops = { | ||
1065 | .write = cio_settle_write, | ||
1066 | }; | ||
1067 | |||
1068 | static int __init cio_settle_init(void) | ||
1069 | { | ||
1070 | struct proc_dir_entry *entry; | ||
1071 | |||
1072 | entry = proc_create("cio_settle", S_IWUSR, NULL, | ||
1073 | &cio_settle_proc_fops); | ||
1074 | if (!entry) | ||
1075 | return -ENOMEM; | ||
1076 | return 0; | ||
1077 | } | ||
1078 | device_initcall(cio_settle_init); | ||
1079 | #endif /*CONFIG_PROC_FS*/ | ||
1080 | |||
1028 | int sch_is_pseudo_sch(struct subchannel *sch) | 1081 | int sch_is_pseudo_sch(struct subchannel *sch) |
1029 | { | 1082 | { |
1030 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; | 1083 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; |