diff options
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/css.c | 26 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 28 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 1 |
4 files changed, 25 insertions, 32 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 7679aee6fa14..99fcf9d0ea14 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -232,7 +232,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) | |||
232 | if (!get_device(&sch->dev)) | 232 | if (!get_device(&sch->dev)) |
233 | return; | 233 | return; |
234 | sch->todo = todo; | 234 | sch->todo = todo; |
235 | if (!queue_work(slow_path_wq, &sch->todo_work)) { | 235 | if (!queue_work(cio_work_q, &sch->todo_work)) { |
236 | /* Already queued, release workqueue ref. */ | 236 | /* Already queued, release workqueue ref. */ |
237 | put_device(&sch->dev); | 237 | put_device(&sch->dev); |
238 | } | 238 | } |
@@ -543,7 +543,7 @@ static void css_slow_path_func(struct work_struct *unused) | |||
543 | } | 543 | } |
544 | 544 | ||
545 | static DECLARE_WORK(slow_path_work, css_slow_path_func); | 545 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
546 | struct workqueue_struct *slow_path_wq; | 546 | struct workqueue_struct *cio_work_q; |
547 | 547 | ||
548 | void css_schedule_eval(struct subchannel_id schid) | 548 | void css_schedule_eval(struct subchannel_id schid) |
549 | { | 549 | { |
@@ -552,7 +552,7 @@ void css_schedule_eval(struct subchannel_id schid) | |||
552 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 552 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
553 | idset_sch_add(slow_subchannel_set, schid); | 553 | idset_sch_add(slow_subchannel_set, schid); |
554 | atomic_set(&css_eval_scheduled, 1); | 554 | atomic_set(&css_eval_scheduled, 1); |
555 | queue_work(slow_path_wq, &slow_path_work); | 555 | queue_work(cio_work_q, &slow_path_work); |
556 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 556 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
557 | } | 557 | } |
558 | 558 | ||
@@ -563,7 +563,7 @@ void css_schedule_eval_all(void) | |||
563 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 563 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
564 | idset_fill(slow_subchannel_set); | 564 | idset_fill(slow_subchannel_set); |
565 | atomic_set(&css_eval_scheduled, 1); | 565 | atomic_set(&css_eval_scheduled, 1); |
566 | queue_work(slow_path_wq, &slow_path_work); | 566 | queue_work(cio_work_q, &slow_path_work); |
567 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 567 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
568 | } | 568 | } |
569 | 569 | ||
@@ -594,14 +594,14 @@ void css_schedule_eval_all_unreg(void) | |||
594 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 594 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
595 | idset_add_set(slow_subchannel_set, unreg_set); | 595 | idset_add_set(slow_subchannel_set, unreg_set); |
596 | atomic_set(&css_eval_scheduled, 1); | 596 | atomic_set(&css_eval_scheduled, 1); |
597 | queue_work(slow_path_wq, &slow_path_work); | 597 | queue_work(cio_work_q, &slow_path_work); |
598 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 598 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
599 | idset_free(unreg_set); | 599 | idset_free(unreg_set); |
600 | } | 600 | } |
601 | 601 | ||
602 | void css_wait_for_slow_path(void) | 602 | void css_wait_for_slow_path(void) |
603 | { | 603 | { |
604 | flush_workqueue(slow_path_wq); | 604 | flush_workqueue(cio_work_q); |
605 | } | 605 | } |
606 | 606 | ||
607 | /* Schedule reprobing of all unregistered subchannels. */ | 607 | /* Schedule reprobing of all unregistered subchannels. */ |
@@ -992,12 +992,21 @@ static int __init channel_subsystem_init(void) | |||
992 | ret = css_bus_init(); | 992 | ret = css_bus_init(); |
993 | if (ret) | 993 | if (ret) |
994 | return ret; | 994 | return ret; |
995 | 995 | cio_work_q = create_singlethread_workqueue("cio"); | |
996 | if (!cio_work_q) { | ||
997 | ret = -ENOMEM; | ||
998 | goto out_bus; | ||
999 | } | ||
996 | ret = io_subchannel_init(); | 1000 | ret = io_subchannel_init(); |
997 | if (ret) | 1001 | if (ret) |
998 | css_bus_cleanup(); | 1002 | goto out_wq; |
999 | 1003 | ||
1000 | return ret; | 1004 | return ret; |
1005 | out_wq: | ||
1006 | destroy_workqueue(cio_work_q); | ||
1007 | out_bus: | ||
1008 | css_bus_cleanup(); | ||
1009 | return ret; | ||
1001 | } | 1010 | } |
1002 | subsys_initcall(channel_subsystem_init); | 1011 | subsys_initcall(channel_subsystem_init); |
1003 | 1012 | ||
@@ -1020,6 +1029,7 @@ static int __init channel_subsystem_init_sync(void) | |||
1020 | css_schedule_eval_all(); | 1029 | css_schedule_eval_all(); |
1021 | /* Wait for the evaluation of subchannels to finish. */ | 1030 | /* Wait for the evaluation of subchannels to finish. */ |
1022 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); | 1031 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); |
1032 | flush_workqueue(cio_work_q); | ||
1023 | /* Wait for the subchannel type specific initialization to finish */ | 1033 | /* Wait for the subchannel type specific initialization to finish */ |
1024 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | 1034 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); |
1025 | } | 1035 | } |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index fe84b92cde60..e05525d53ea5 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -151,7 +151,7 @@ int sch_is_pseudo_sch(struct subchannel *); | |||
151 | struct schib; | 151 | struct schib; |
152 | int css_sch_is_valid(struct schib *); | 152 | int css_sch_is_valid(struct schib *); |
153 | 153 | ||
154 | extern struct workqueue_struct *slow_path_wq; | 154 | extern struct workqueue_struct *cio_work_q; |
155 | void css_wait_for_slow_path(void); | 155 | void css_wait_for_slow_path(void); |
156 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); | 156 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); |
157 | #endif | 157 | #endif |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index a6c7d5426fb2..9c9ea45141af 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int); | |||
136 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, | 136 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, |
137 | int); | 137 | int); |
138 | static void recovery_func(unsigned long data); | 138 | static void recovery_func(unsigned long data); |
139 | struct workqueue_struct *ccw_device_work; | ||
140 | wait_queue_head_t ccw_device_init_wq; | 139 | wait_queue_head_t ccw_device_init_wq; |
141 | atomic_t ccw_device_init_count; | 140 | atomic_t ccw_device_init_count; |
142 | 141 | ||
@@ -163,7 +162,7 @@ static void io_subchannel_settle(void) | |||
163 | { | 162 | { |
164 | wait_event(ccw_device_init_wq, | 163 | wait_event(ccw_device_init_wq, |
165 | atomic_read(&ccw_device_init_count) == 0); | 164 | atomic_read(&ccw_device_init_count) == 0); |
166 | flush_workqueue(ccw_device_work); | 165 | flush_workqueue(cio_work_q); |
167 | } | 166 | } |
168 | 167 | ||
169 | static struct css_driver io_subchannel_driver = { | 168 | static struct css_driver io_subchannel_driver = { |
@@ -188,27 +187,13 @@ int __init io_subchannel_init(void) | |||
188 | atomic_set(&ccw_device_init_count, 0); | 187 | atomic_set(&ccw_device_init_count, 0); |
189 | setup_timer(&recovery_timer, recovery_func, 0); | 188 | setup_timer(&recovery_timer, recovery_func, 0); |
190 | 189 | ||
191 | ccw_device_work = create_singlethread_workqueue("cio"); | 190 | ret = bus_register(&ccw_bus_type); |
192 | if (!ccw_device_work) | 191 | if (ret) |
193 | return -ENOMEM; | 192 | return ret; |
194 | slow_path_wq = create_singlethread_workqueue("kslowcrw"); | ||
195 | if (!slow_path_wq) { | ||
196 | ret = -ENOMEM; | ||
197 | goto out_err; | ||
198 | } | ||
199 | if ((ret = bus_register (&ccw_bus_type))) | ||
200 | goto out_err; | ||
201 | |||
202 | ret = css_driver_register(&io_subchannel_driver); | 193 | ret = css_driver_register(&io_subchannel_driver); |
203 | if (ret) | 194 | if (ret) |
204 | goto out_err; | 195 | bus_unregister(&ccw_bus_type); |
205 | 196 | ||
206 | return 0; | ||
207 | out_err: | ||
208 | if (ccw_device_work) | ||
209 | destroy_workqueue(ccw_device_work); | ||
210 | if (slow_path_wq) | ||
211 | destroy_workqueue(slow_path_wq); | ||
212 | return ret; | 197 | return ret; |
213 | } | 198 | } |
214 | 199 | ||
@@ -2028,7 +2013,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) | |||
2028 | /* Get workqueue ref. */ | 2013 | /* Get workqueue ref. */ |
2029 | if (!get_device(&cdev->dev)) | 2014 | if (!get_device(&cdev->dev)) |
2030 | return; | 2015 | return; |
2031 | if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { | 2016 | if (!queue_work(cio_work_q, &cdev->private->todo_work)) { |
2032 | /* Already queued, release workqueue ref. */ | 2017 | /* Already queued, release workqueue ref. */ |
2033 | put_device(&cdev->dev); | 2018 | put_device(&cdev->dev); |
2034 | } | 2019 | } |
@@ -2041,5 +2026,4 @@ EXPORT_SYMBOL(ccw_driver_register); | |||
2041 | EXPORT_SYMBOL(ccw_driver_unregister); | 2026 | EXPORT_SYMBOL(ccw_driver_unregister); |
2042 | EXPORT_SYMBOL(get_ccwdev_by_busid); | 2027 | EXPORT_SYMBOL(get_ccwdev_by_busid); |
2043 | EXPORT_SYMBOL(ccw_bus_type); | 2028 | EXPORT_SYMBOL(ccw_bus_type); |
2044 | EXPORT_SYMBOL(ccw_device_work); | ||
2045 | EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); | 2029 | EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index bcfe13e42638..ef60c8f5cd14 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev) | |||
71 | cdev->private->state == DEV_STATE_BOXED); | 71 | cdev->private->state == DEV_STATE_BOXED); |
72 | } | 72 | } |
73 | 73 | ||
74 | extern struct workqueue_struct *ccw_device_work; | ||
75 | extern wait_queue_head_t ccw_device_init_wq; | 74 | extern wait_queue_head_t ccw_device_init_wq; |
76 | extern atomic_t ccw_device_init_count; | 75 | extern atomic_t ccw_device_init_count; |
77 | int __init io_subchannel_init(void); | 76 | int __init io_subchannel_init(void); |