diff options
author | Sebastian Ott <sebott@linux.vnet.ibm.com> | 2011-12-01 07:32:19 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-01 07:32:17 -0500 |
commit | 817e5000ebc4d448ca514db49b55073a724f8552 (patch) | |
tree | eea9284fcc6cd702161eff7611de394822329c30 | |
parent | cfc9066bcd3ab498268e1d075f1556bb5244c0aa (diff) |
[S390] hibernate: directly trigger subchannel evaluation
Using the generic css_schedule_eval to evaluate subchannels
while resuming from hibernation is very slow when used with
many devices. Provide a new evaluation trigger which exploits
css_sched_sch_todo and use this in the resume callback for
ccw devices.
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | drivers/s390/cio/cio.h | 5 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 104 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 4 |
3 files changed, 66 insertions, 47 deletions
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 155a82bcb9e5..4a1ff5c2eb88 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -68,8 +68,13 @@ struct schib { | |||
68 | __u8 mda[4]; /* model dependent area */ | 68 | __u8 mda[4]; /* model dependent area */ |
69 | } __attribute__ ((packed,aligned(4))); | 69 | } __attribute__ ((packed,aligned(4))); |
70 | 70 | ||
71 | /* | ||
72 | * When rescheduled, todo's with higher values will overwrite those | ||
73 | * with lower values. | ||
74 | */ | ||
71 | enum sch_todo { | 75 | enum sch_todo { |
72 | SCH_TODO_NOTHING, | 76 | SCH_TODO_NOTHING, |
77 | SCH_TODO_EVAL, | ||
73 | SCH_TODO_UNREG, | 78 | SCH_TODO_UNREG, |
74 | }; | 79 | }; |
75 | 80 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 92d7324acb1c..21908e67bf67 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch) | |||
195 | } | 195 | } |
196 | EXPORT_SYMBOL_GPL(css_sch_device_unregister); | 196 | EXPORT_SYMBOL_GPL(css_sch_device_unregister); |
197 | 197 | ||
198 | static void css_sch_todo(struct work_struct *work) | ||
199 | { | ||
200 | struct subchannel *sch; | ||
201 | enum sch_todo todo; | ||
202 | |||
203 | sch = container_of(work, struct subchannel, todo_work); | ||
204 | /* Find out todo. */ | ||
205 | spin_lock_irq(sch->lock); | ||
206 | todo = sch->todo; | ||
207 | CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, | ||
208 | sch->schid.sch_no, todo); | ||
209 | sch->todo = SCH_TODO_NOTHING; | ||
210 | spin_unlock_irq(sch->lock); | ||
211 | /* Perform todo. */ | ||
212 | if (todo == SCH_TODO_UNREG) | ||
213 | css_sch_device_unregister(sch); | ||
214 | /* Release workqueue ref. */ | ||
215 | put_device(&sch->dev); | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * css_sched_sch_todo - schedule a subchannel operation | ||
220 | * @sch: subchannel | ||
221 | * @todo: todo | ||
222 | * | ||
223 | * Schedule the operation identified by @todo to be performed on the slow path | ||
224 | * workqueue. Do nothing if another operation with higher priority is already | ||
225 | * scheduled. Needs to be called with subchannel lock held. | ||
226 | */ | ||
227 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) | ||
228 | { | ||
229 | CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", | ||
230 | sch->schid.ssid, sch->schid.sch_no, todo); | ||
231 | if (sch->todo >= todo) | ||
232 | return; | ||
233 | /* Get workqueue ref. */ | ||
234 | if (!get_device(&sch->dev)) | ||
235 | return; | ||
236 | sch->todo = todo; | ||
237 | if (!queue_work(cio_work_q, &sch->todo_work)) { | ||
238 | /* Already queued, release workqueue ref. */ | ||
239 | put_device(&sch->dev); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) | 198 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
244 | { | 199 | { |
245 | int i; | 200 | int i; |
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
466 | css_schedule_eval(schid); | 421 | css_schedule_eval(schid); |
467 | } | 422 | } |
468 | 423 | ||
424 | /** | ||
425 | * css_sched_sch_todo - schedule a subchannel operation | ||
426 | * @sch: subchannel | ||
427 | * @todo: todo | ||
428 | * | ||
429 | * Schedule the operation identified by @todo to be performed on the slow path | ||
430 | * workqueue. Do nothing if another operation with higher priority is already | ||
431 | * scheduled. Needs to be called with subchannel lock held. | ||
432 | */ | ||
433 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) | ||
434 | { | ||
435 | CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", | ||
436 | sch->schid.ssid, sch->schid.sch_no, todo); | ||
437 | if (sch->todo >= todo) | ||
438 | return; | ||
439 | /* Get workqueue ref. */ | ||
440 | if (!get_device(&sch->dev)) | ||
441 | return; | ||
442 | sch->todo = todo; | ||
443 | if (!queue_work(cio_work_q, &sch->todo_work)) { | ||
444 | /* Already queued, release workqueue ref. */ | ||
445 | put_device(&sch->dev); | ||
446 | } | ||
447 | } | ||
448 | |||
449 | static void css_sch_todo(struct work_struct *work) | ||
450 | { | ||
451 | struct subchannel *sch; | ||
452 | enum sch_todo todo; | ||
453 | int ret; | ||
454 | |||
455 | sch = container_of(work, struct subchannel, todo_work); | ||
456 | /* Find out todo. */ | ||
457 | spin_lock_irq(sch->lock); | ||
458 | todo = sch->todo; | ||
459 | CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, | ||
460 | sch->schid.sch_no, todo); | ||
461 | sch->todo = SCH_TODO_NOTHING; | ||
462 | spin_unlock_irq(sch->lock); | ||
463 | /* Perform todo. */ | ||
464 | switch (todo) { | ||
465 | case SCH_TODO_NOTHING: | ||
466 | break; | ||
467 | case SCH_TODO_EVAL: | ||
468 | ret = css_evaluate_known_subchannel(sch, 1); | ||
469 | if (ret == -EAGAIN) { | ||
470 | spin_lock_irq(sch->lock); | ||
471 | css_sched_sch_todo(sch, todo); | ||
472 | spin_unlock_irq(sch->lock); | ||
473 | } | ||
474 | break; | ||
475 | case SCH_TODO_UNREG: | ||
476 | css_sch_device_unregister(sch); | ||
477 | break; | ||
478 | } | ||
479 | /* Release workqueue ref. */ | ||
480 | put_device(&sch->dev); | ||
481 | } | ||
482 | |||
469 | static struct idset *slow_subchannel_set; | 483 | static struct idset *slow_subchannel_set; |
470 | static spinlock_t slow_subchannel_lock; | 484 | static spinlock_t slow_subchannel_lock; |
471 | static wait_queue_head_t css_eval_wq; | 485 | static wait_queue_head_t css_eval_wq; |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index d734f4a0ecac..47269858ecb6 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) | |||
1868 | */ | 1868 | */ |
1869 | cdev->private->flags.resuming = 1; | 1869 | cdev->private->flags.resuming = 1; |
1870 | cdev->private->path_new_mask = LPM_ANYPATH; | 1870 | cdev->private->path_new_mask = LPM_ANYPATH; |
1871 | css_schedule_eval(sch->schid); | 1871 | css_sched_sch_todo(sch, SCH_TODO_EVAL); |
1872 | spin_unlock_irq(sch->lock); | 1872 | spin_unlock_irq(sch->lock); |
1873 | css_complete_work(); | 1873 | css_wait_for_slow_path(); |
1874 | 1874 | ||
1875 | /* cdev may have been moved to a different subchannel. */ | 1875 | /* cdev may have been moved to a different subchannel. */ |
1876 | sch = to_subchannel(cdev->dev.parent); | 1876 | sch = to_subchannel(cdev->dev.parent); |