aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPeter Oberparleiter <peter.oberparleiter@de.ibm.com>2009-12-07 06:51:18 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2009-12-07 06:51:30 -0500
commit390935acac21f3ea1a130bdca8eb9397cb293643 (patch)
treedafe0c1ff5c5d6946a024c05e5eedaa2731eaf20 /drivers
parent5d6e6b6f6f3eac10a7f5a15e961bac3b36824d9d (diff)
[S390] cio: introduce subchannel todos
Ensure that current and future users of sch->work do not overwrite each other by introducing a single mechanism for delayed subchannel work. Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/s390/cio/cio.h8
-rw-r--r--drivers/s390/cio/css.c48
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c23
4 files changed, 63 insertions, 19 deletions
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 2e43558c704b..bf7f80f5a330 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,6 +68,11 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71enum sch_todo {
72 SCH_TODO_NOTHING,
73 SCH_TODO_UNREG,
74};
75
71/* subchannel data structure used by I/O subroutines */ 76/* subchannel data structure used by I/O subroutines */
72struct subchannel { 77struct subchannel {
73 struct subchannel_id schid; 78 struct subchannel_id schid;
@@ -95,7 +100,8 @@ struct subchannel {
95 struct device dev; /* entry in device tree */ 100 struct device dev; /* entry in device tree */
96 struct css_driver *driver; 101 struct css_driver *driver;
97 void *private; /* private per subchannel type data */ 102 void *private; /* private per subchannel type data */
98 struct work_struct work; 103 enum sch_todo todo;
104 struct work_struct todo_work;
99 struct schib_config config; 105 struct schib_config config;
100} __attribute__ ((aligned(8))); 106} __attribute__ ((aligned(8)));
101 107
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index b4df5a56cfe2..92ff88ac1107 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -133,6 +133,8 @@ out:
133 return rc; 133 return rc;
134} 134}
135 135
136static void css_sch_todo(struct work_struct *work);
137
136static struct subchannel * 138static struct subchannel *
137css_alloc_subchannel(struct subchannel_id schid) 139css_alloc_subchannel(struct subchannel_id schid)
138{ 140{
@@ -147,6 +149,7 @@ css_alloc_subchannel(struct subchannel_id schid)
147 kfree(sch); 149 kfree(sch);
148 return ERR_PTR(ret); 150 return ERR_PTR(ret);
149 } 151 }
152 INIT_WORK(&sch->todo_work, css_sch_todo);
150 return sch; 153 return sch;
151} 154}
152 155
@@ -190,6 +193,51 @@ void css_sch_device_unregister(struct subchannel *sch)
190} 193}
191EXPORT_SYMBOL_GPL(css_sch_device_unregister); 194EXPORT_SYMBOL_GPL(css_sch_device_unregister);
192 195
196static void css_sch_todo(struct work_struct *work)
197{
198 struct subchannel *sch;
199 enum sch_todo todo;
200
201 sch = container_of(work, struct subchannel, todo_work);
202 /* Find out todo. */
203 spin_lock_irq(sch->lock);
204 todo = sch->todo;
205 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
206 sch->schid.sch_no, todo);
207 sch->todo = SCH_TODO_NOTHING;
208 spin_unlock_irq(sch->lock);
209 /* Perform todo. */
210 if (todo == SCH_TODO_UNREG)
211 css_sch_device_unregister(sch);
212 /* Release workqueue ref. */
213 put_device(&sch->dev);
214}
215
216/**
217 * css_sched_sch_todo - schedule a subchannel operation
218 * @sch: subchannel
219 * @todo: todo
220 *
221 * Schedule the operation identified by @todo to be performed on the slow path
222 * workqueue. Do nothing if another operation with higher priority is already
223 * scheduled. Needs to be called with subchannel lock held.
224 */
225void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
226{
227 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
228 sch->schid.ssid, sch->schid.sch_no, todo);
229 if (sch->todo >= todo)
230 return;
231 /* Get workqueue ref. */
232 if (!get_device(&sch->dev))
233 return;
234 sch->todo = todo;
235 if (!queue_work(slow_path_wq, &sch->todo_work)) {
236 /* Already queued, release workqueue ref. */
237 put_device(&sch->dev);
238 }
239}
240
193static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 241static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
194{ 242{
195 int i; 243 int i;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 68d6b0bf151c..fe84b92cde60 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -11,6 +11,8 @@
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12#include <asm/schid.h> 12#include <asm/schid.h>
13 13
14#include "cio.h"
15
14/* 16/*
15 * path grouping stuff 17 * path grouping stuff
16 */ 18 */
@@ -151,4 +153,5 @@ int css_sch_is_valid(struct schib *);
151 153
152extern struct workqueue_struct *slow_path_wq; 154extern struct workqueue_struct *slow_path_wq;
153void css_wait_for_slow_path(void); 155void css_wait_for_slow_path(void);
156void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
154#endif 157#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6097763f1035..0dcfc0ee3d81 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1051,23 +1051,6 @@ static void io_subchannel_init_fields(struct subchannel *sch)
1051 io_subchannel_init_config(sch); 1051 io_subchannel_init_config(sch);
1052} 1052}
1053 1053
1054static void io_subchannel_do_unreg(struct work_struct *work)
1055{
1056 struct subchannel *sch;
1057
1058 sch = container_of(work, struct subchannel, work);
1059 css_sch_device_unregister(sch);
1060 put_device(&sch->dev);
1061}
1062
1063/* Schedule unregister if we have no cdev. */
1064static void io_subchannel_schedule_removal(struct subchannel *sch)
1065{
1066 get_device(&sch->dev);
1067 INIT_WORK(&sch->work, io_subchannel_do_unreg);
1068 queue_work(slow_path_wq, &sch->work);
1069}
1070
1071/* 1054/*
1072 * Note: We always return 0 so that we bind to the device even on error. 1055 * Note: We always return 0 so that we bind to the device even on error.
1073 * This is needed so that our remove function is called on unregister. 1056 * This is needed so that our remove function is called on unregister.
@@ -1124,7 +1107,9 @@ static int io_subchannel_probe(struct subchannel *sch)
1124 return 0; 1107 return 0;
1125 1108
1126out_schedule: 1109out_schedule:
1127 io_subchannel_schedule_removal(sch); 1110 spin_lock_irq(sch->lock);
1111 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1112 spin_unlock_irq(sch->lock);
1128 return 0; 1113 return 0;
1129} 1114}
1130 1115
@@ -1469,6 +1454,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1469 spin_lock_irqsave(sch->lock, flags); 1454 spin_lock_irqsave(sch->lock, flags);
1470 if (!device_is_registered(&sch->dev)) 1455 if (!device_is_registered(&sch->dev))
1471 goto out_unlock; 1456 goto out_unlock;
1457 if (work_pending(&sch->todo_work))
1458 goto out_unlock;
1472 action = sch_get_action(sch); 1459 action = sch_get_action(sch);
1473 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", 1460 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1474 sch->schid.ssid, sch->schid.sch_no, process, 1461 sch->schid.ssid, sch->schid.sch_no, process,