diff options
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r-- | drivers/s390/cio/css.c | 26 |
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 7679aee6fa14..99fcf9d0ea14 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -232,7 +232,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) | |||
232 | if (!get_device(&sch->dev)) | 232 | if (!get_device(&sch->dev)) |
233 | return; | 233 | return; |
234 | sch->todo = todo; | 234 | sch->todo = todo; |
235 | if (!queue_work(slow_path_wq, &sch->todo_work)) { | 235 | if (!queue_work(cio_work_q, &sch->todo_work)) { |
236 | /* Already queued, release workqueue ref. */ | 236 | /* Already queued, release workqueue ref. */ |
237 | put_device(&sch->dev); | 237 | put_device(&sch->dev); |
238 | } | 238 | } |
@@ -543,7 +543,7 @@ static void css_slow_path_func(struct work_struct *unused) | |||
543 | } | 543 | } |
544 | 544 | ||
545 | static DECLARE_WORK(slow_path_work, css_slow_path_func); | 545 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
546 | struct workqueue_struct *slow_path_wq; | 546 | struct workqueue_struct *cio_work_q; |
547 | 547 | ||
548 | void css_schedule_eval(struct subchannel_id schid) | 548 | void css_schedule_eval(struct subchannel_id schid) |
549 | { | 549 | { |
@@ -552,7 +552,7 @@ void css_schedule_eval(struct subchannel_id schid) | |||
552 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 552 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
553 | idset_sch_add(slow_subchannel_set, schid); | 553 | idset_sch_add(slow_subchannel_set, schid); |
554 | atomic_set(&css_eval_scheduled, 1); | 554 | atomic_set(&css_eval_scheduled, 1); |
555 | queue_work(slow_path_wq, &slow_path_work); | 555 | queue_work(cio_work_q, &slow_path_work); |
556 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 556 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
557 | } | 557 | } |
558 | 558 | ||
@@ -563,7 +563,7 @@ void css_schedule_eval_all(void) | |||
563 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 563 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
564 | idset_fill(slow_subchannel_set); | 564 | idset_fill(slow_subchannel_set); |
565 | atomic_set(&css_eval_scheduled, 1); | 565 | atomic_set(&css_eval_scheduled, 1); |
566 | queue_work(slow_path_wq, &slow_path_work); | 566 | queue_work(cio_work_q, &slow_path_work); |
567 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 567 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
568 | } | 568 | } |
569 | 569 | ||
@@ -594,14 +594,14 @@ void css_schedule_eval_all_unreg(void) | |||
594 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 594 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
595 | idset_add_set(slow_subchannel_set, unreg_set); | 595 | idset_add_set(slow_subchannel_set, unreg_set); |
596 | atomic_set(&css_eval_scheduled, 1); | 596 | atomic_set(&css_eval_scheduled, 1); |
597 | queue_work(slow_path_wq, &slow_path_work); | 597 | queue_work(cio_work_q, &slow_path_work); |
598 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 598 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
599 | idset_free(unreg_set); | 599 | idset_free(unreg_set); |
600 | } | 600 | } |
601 | 601 | ||
602 | void css_wait_for_slow_path(void) | 602 | void css_wait_for_slow_path(void) |
603 | { | 603 | { |
604 | flush_workqueue(slow_path_wq); | 604 | flush_workqueue(cio_work_q); |
605 | } | 605 | } |
606 | 606 | ||
607 | /* Schedule reprobing of all unregistered subchannels. */ | 607 | /* Schedule reprobing of all unregistered subchannels. */ |
@@ -992,12 +992,21 @@ static int __init channel_subsystem_init(void) | |||
992 | ret = css_bus_init(); | 992 | ret = css_bus_init(); |
993 | if (ret) | 993 | if (ret) |
994 | return ret; | 994 | return ret; |
995 | 995 | cio_work_q = create_singlethread_workqueue("cio"); | |
996 | if (!cio_work_q) { | ||
997 | ret = -ENOMEM; | ||
998 | goto out_bus; | ||
999 | } | ||
996 | ret = io_subchannel_init(); | 1000 | ret = io_subchannel_init(); |
997 | if (ret) | 1001 | if (ret) |
998 | css_bus_cleanup(); | 1002 | goto out_wq; |
999 | 1003 | ||
1000 | return ret; | 1004 | return ret; |
1005 | out_wq: | ||
1006 | destroy_workqueue(cio_work_q); | ||
1007 | out_bus: | ||
1008 | css_bus_cleanup(); | ||
1009 | return ret; | ||
1001 | } | 1010 | } |
1002 | subsys_initcall(channel_subsystem_init); | 1011 | subsys_initcall(channel_subsystem_init); |
1003 | 1012 | ||
@@ -1020,6 +1029,7 @@ static int __init channel_subsystem_init_sync(void) | |||
1020 | css_schedule_eval_all(); | 1029 | css_schedule_eval_all(); |
1021 | /* Wait for the evaluation of subchannels to finish. */ | 1030 | /* Wait for the evaluation of subchannels to finish. */ |
1022 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); | 1031 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); |
1032 | flush_workqueue(cio_work_q); | ||
1023 | /* Wait for the subchannel type specific initialization to finish */ | 1033 | /* Wait for the subchannel type specific initialization to finish */ |
1024 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | 1034 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); |
1025 | } | 1035 | } |