diff options
| author | Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 2009-12-07 06:51:18 -0500 |
|---|---|---|
| committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2009-12-07 06:51:30 -0500 |
| commit | 390935acac21f3ea1a130bdca8eb9397cb293643 (patch) | |
| tree | dafe0c1ff5c5d6946a024c05e5eedaa2731eaf20 /drivers/s390/cio/css.c | |
| parent | 5d6e6b6f6f3eac10a7f5a15e961bac3b36824d9d (diff) | |
[S390] cio: introduce subchannel todos
Ensure that current and future users of sch->work do not overwrite
each other by introducing a single mechanism for delayed subchannel
work.
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/css.c')
| -rw-r--r-- | drivers/s390/cio/css.c | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index b4df5a56cfe2..92ff88ac1107 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
| @@ -133,6 +133,8 @@ out: | |||
| 133 | return rc; | 133 | return rc; |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | static void css_sch_todo(struct work_struct *work); | ||
| 137 | |||
| 136 | static struct subchannel * | 138 | static struct subchannel * |
| 137 | css_alloc_subchannel(struct subchannel_id schid) | 139 | css_alloc_subchannel(struct subchannel_id schid) |
| 138 | { | 140 | { |
| @@ -147,6 +149,7 @@ css_alloc_subchannel(struct subchannel_id schid) | |||
| 147 | kfree(sch); | 149 | kfree(sch); |
| 148 | return ERR_PTR(ret); | 150 | return ERR_PTR(ret); |
| 149 | } | 151 | } |
| 152 | INIT_WORK(&sch->todo_work, css_sch_todo); | ||
| 150 | return sch; | 153 | return sch; |
| 151 | } | 154 | } |
| 152 | 155 | ||
| @@ -190,6 +193,51 @@ void css_sch_device_unregister(struct subchannel *sch) | |||
| 190 | } | 193 | } |
| 191 | EXPORT_SYMBOL_GPL(css_sch_device_unregister); | 194 | EXPORT_SYMBOL_GPL(css_sch_device_unregister); |
| 192 | 195 | ||
| 196 | static void css_sch_todo(struct work_struct *work) | ||
| 197 | { | ||
| 198 | struct subchannel *sch; | ||
| 199 | enum sch_todo todo; | ||
| 200 | |||
| 201 | sch = container_of(work, struct subchannel, todo_work); | ||
| 202 | /* Find out todo. */ | ||
| 203 | spin_lock_irq(sch->lock); | ||
| 204 | todo = sch->todo; | ||
| 205 | CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, | ||
| 206 | sch->schid.sch_no, todo); | ||
| 207 | sch->todo = SCH_TODO_NOTHING; | ||
| 208 | spin_unlock_irq(sch->lock); | ||
| 209 | /* Perform todo. */ | ||
| 210 | if (todo == SCH_TODO_UNREG) | ||
| 211 | css_sch_device_unregister(sch); | ||
| 212 | /* Release workqueue ref. */ | ||
| 213 | put_device(&sch->dev); | ||
| 214 | } | ||
| 215 | |||
| 216 | /** | ||
| 217 | * css_sched_sch_todo - schedule a subchannel operation | ||
| 218 | * @sch: subchannel | ||
| 219 | * @todo: todo | ||
| 220 | * | ||
| 221 | * Schedule the operation identified by @todo to be performed on the slow path | ||
| 222 | * workqueue. Do nothing if another operation with higher priority is already | ||
| 223 | * scheduled. Needs to be called with subchannel lock held. | ||
| 224 | */ | ||
| 225 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) | ||
| 226 | { | ||
| 227 | CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", | ||
| 228 | sch->schid.ssid, sch->schid.sch_no, todo); | ||
| 229 | if (sch->todo >= todo) | ||
| 230 | return; | ||
| 231 | /* Get workqueue ref. */ | ||
| 232 | if (!get_device(&sch->dev)) | ||
| 233 | return; | ||
| 234 | sch->todo = todo; | ||
| 235 | if (!queue_work(slow_path_wq, &sch->todo_work)) { | ||
| 236 | /* Already queued, release workqueue ref. */ | ||
| 237 | put_device(&sch->dev); | ||
| 238 | } | ||
| 239 | } | ||
| 240 | |||
| 193 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) | 241 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
| 194 | { | 242 | { |
| 195 | int i; | 243 | int i; |
