aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/css.c
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2010-02-26 16:37:24 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-02-26 16:37:29 -0500
commitbe5d3823f29c09676abd2eeea4f9767bc4a1a531 (patch)
tree0a89be6c2b36e1db6f1118cf2b483c84d1ac8def /drivers/s390/cio/css.c
parent6f5d09a0e9731a39a4d52a5902daec72c1e43692 (diff)
[S390] cio: consolidate workqueues
We used to maintain 2 singlethreaded workqueues for synchronization and to trigger work from interrupt context. Since our latest cio changes we only use one of these workqueues. So get rid of the unused workqueue, rename the remaining one to "cio_work_q" and move its ownership to the channel subsystem driver. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r--drivers/s390/cio/css.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7679aee6fa14..99fcf9d0ea14 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -232,7 +232,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
232 if (!get_device(&sch->dev)) 232 if (!get_device(&sch->dev))
233 return; 233 return;
234 sch->todo = todo; 234 sch->todo = todo;
235 if (!queue_work(slow_path_wq, &sch->todo_work)) { 235 if (!queue_work(cio_work_q, &sch->todo_work)) {
236 /* Already queued, release workqueue ref. */ 236 /* Already queued, release workqueue ref. */
237 put_device(&sch->dev); 237 put_device(&sch->dev);
238 } 238 }
@@ -543,7 +543,7 @@ static void css_slow_path_func(struct work_struct *unused)
543} 543}
544 544
545static DECLARE_WORK(slow_path_work, css_slow_path_func); 545static DECLARE_WORK(slow_path_work, css_slow_path_func);
546struct workqueue_struct *slow_path_wq; 546struct workqueue_struct *cio_work_q;
547 547
548void css_schedule_eval(struct subchannel_id schid) 548void css_schedule_eval(struct subchannel_id schid)
549{ 549{
@@ -552,7 +552,7 @@ void css_schedule_eval(struct subchannel_id schid)
552 spin_lock_irqsave(&slow_subchannel_lock, flags); 552 spin_lock_irqsave(&slow_subchannel_lock, flags);
553 idset_sch_add(slow_subchannel_set, schid); 553 idset_sch_add(slow_subchannel_set, schid);
554 atomic_set(&css_eval_scheduled, 1); 554 atomic_set(&css_eval_scheduled, 1);
555 queue_work(slow_path_wq, &slow_path_work); 555 queue_work(cio_work_q, &slow_path_work);
556 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 556 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
557} 557}
558 558
@@ -563,7 +563,7 @@ void css_schedule_eval_all(void)
563 spin_lock_irqsave(&slow_subchannel_lock, flags); 563 spin_lock_irqsave(&slow_subchannel_lock, flags);
564 idset_fill(slow_subchannel_set); 564 idset_fill(slow_subchannel_set);
565 atomic_set(&css_eval_scheduled, 1); 565 atomic_set(&css_eval_scheduled, 1);
566 queue_work(slow_path_wq, &slow_path_work); 566 queue_work(cio_work_q, &slow_path_work);
567 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 567 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
568} 568}
569 569
@@ -594,14 +594,14 @@ void css_schedule_eval_all_unreg(void)
594 spin_lock_irqsave(&slow_subchannel_lock, flags); 594 spin_lock_irqsave(&slow_subchannel_lock, flags);
595 idset_add_set(slow_subchannel_set, unreg_set); 595 idset_add_set(slow_subchannel_set, unreg_set);
596 atomic_set(&css_eval_scheduled, 1); 596 atomic_set(&css_eval_scheduled, 1);
597 queue_work(slow_path_wq, &slow_path_work); 597 queue_work(cio_work_q, &slow_path_work);
598 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 598 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
599 idset_free(unreg_set); 599 idset_free(unreg_set);
600} 600}
601 601
602void css_wait_for_slow_path(void) 602void css_wait_for_slow_path(void)
603{ 603{
604 flush_workqueue(slow_path_wq); 604 flush_workqueue(cio_work_q);
605} 605}
606 606
607/* Schedule reprobing of all unregistered subchannels. */ 607/* Schedule reprobing of all unregistered subchannels. */
@@ -992,12 +992,21 @@ static int __init channel_subsystem_init(void)
992 ret = css_bus_init(); 992 ret = css_bus_init();
993 if (ret) 993 if (ret)
994 return ret; 994 return ret;
995 995 cio_work_q = create_singlethread_workqueue("cio");
996 if (!cio_work_q) {
997 ret = -ENOMEM;
998 goto out_bus;
999 }
996 ret = io_subchannel_init(); 1000 ret = io_subchannel_init();
997 if (ret) 1001 if (ret)
998 css_bus_cleanup(); 1002 goto out_wq;
999 1003
1000 return ret; 1004 return ret;
1005out_wq:
1006 destroy_workqueue(cio_work_q);
1007out_bus:
1008 css_bus_cleanup();
1009 return ret;
1001} 1010}
1002subsys_initcall(channel_subsystem_init); 1011subsys_initcall(channel_subsystem_init);
1003 1012
@@ -1020,6 +1029,7 @@ static int __init channel_subsystem_init_sync(void)
1020 css_schedule_eval_all(); 1029 css_schedule_eval_all();
1021 /* Wait for the evaluation of subchannels to finish. */ 1030 /* Wait for the evaluation of subchannels to finish. */
1022 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); 1031 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
1032 flush_workqueue(cio_work_q);
1023 /* Wait for the subchannel type specific initialization to finish */ 1033 /* Wait for the subchannel type specific initialization to finish */
1024 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1034 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1025} 1035}