diff options
author | Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 2007-04-27 10:01:34 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-04-27 10:01:40 -0400 |
commit | 83b3370c79b91b9be3f6540c3c914e689134b45f (patch) | |
tree | ad7c062b260c0259c74e45ff869208c1ad139629 /drivers/s390/cio/css.c | |
parent | 387b734fc2b55f776b192c7afdfd892ba42347d4 (diff) |
[S390] cio: replace subchannel evaluation queue with bitmap
Use a bitmap for indicating which subchannels require evaluation
instead of allocating memory for each evaluation request. This
approach reduces memory consumption during recovery in case of
massive evaluation request occurrence and removes the need for
memory allocation failure handling.
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r-- | drivers/s390/cio/css.c | 148 |
1 files changed, 47 insertions, 101 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fe0ace7aece8..fcc641e578f4 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -20,8 +20,8 @@ | |||
20 | #include "ioasm.h" | 20 | #include "ioasm.h" |
21 | #include "chsc.h" | 21 | #include "chsc.h" |
22 | #include "device.h" | 22 | #include "device.h" |
23 | #include "idset.h" | ||
23 | 24 | ||
24 | int need_rescan = 0; | ||
25 | int css_init_done = 0; | 25 | int css_init_done = 0; |
26 | static int need_reprobe = 0; | 26 | static int need_reprobe = 0; |
27 | static int max_ssid = 0; | 27 | static int max_ssid = 0; |
@@ -306,7 +306,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
306 | return css_probe_device(schid); | 306 | return css_probe_device(schid); |
307 | } | 307 | } |
308 | 308 | ||
309 | static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | 309 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
310 | { | 310 | { |
311 | struct subchannel *sch; | 311 | struct subchannel *sch; |
312 | int ret; | 312 | int ret; |
@@ -317,53 +317,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
317 | put_device(&sch->dev); | 317 | put_device(&sch->dev); |
318 | } else | 318 | } else |
319 | ret = css_evaluate_new_subchannel(schid, slow); | 319 | ret = css_evaluate_new_subchannel(schid, slow); |
320 | 320 | if (ret == -EAGAIN) | |
321 | return ret; | 321 | css_schedule_eval(schid); |
322 | } | 322 | } |
323 | 323 | ||
324 | static int | 324 | static struct idset *slow_subchannel_set; |
325 | css_rescan_devices(struct subchannel_id schid, void *data) | 325 | static spinlock_t slow_subchannel_lock; |
326 | |||
327 | static int __init slow_subchannel_init(void) | ||
326 | { | 328 | { |
327 | return css_evaluate_subchannel(schid, 1); | 329 | spin_lock_init(&slow_subchannel_lock); |
330 | slow_subchannel_set = idset_sch_new(); | ||
331 | if (!slow_subchannel_set) { | ||
332 | printk(KERN_WARNING "cio: could not allocate slow subchannel " | ||
333 | "set\n"); | ||
334 | return -ENOMEM; | ||
335 | } | ||
336 | return 0; | ||
328 | } | 337 | } |
329 | 338 | ||
330 | struct slow_subchannel { | 339 | subsys_initcall(slow_subchannel_init); |
331 | struct list_head slow_list; | ||
332 | struct subchannel_id schid; | ||
333 | }; | ||
334 | |||
335 | static LIST_HEAD(slow_subchannels_head); | ||
336 | static DEFINE_SPINLOCK(slow_subchannel_lock); | ||
337 | 340 | ||
338 | static void | 341 | static void css_slow_path_func(struct work_struct *unused) |
339 | css_trigger_slow_path(struct work_struct *unused) | ||
340 | { | 342 | { |
341 | CIO_TRACE_EVENT(4, "slowpath"); | 343 | struct subchannel_id schid; |
342 | |||
343 | if (need_rescan) { | ||
344 | need_rescan = 0; | ||
345 | for_each_subchannel(css_rescan_devices, NULL); | ||
346 | return; | ||
347 | } | ||
348 | 344 | ||
345 | CIO_TRACE_EVENT(4, "slowpath"); | ||
349 | spin_lock_irq(&slow_subchannel_lock); | 346 | spin_lock_irq(&slow_subchannel_lock); |
350 | while (!list_empty(&slow_subchannels_head)) { | 347 | init_subchannel_id(&schid); |
351 | struct slow_subchannel *slow_sch = | 348 | while (idset_sch_get_first(slow_subchannel_set, &schid)) { |
352 | list_entry(slow_subchannels_head.next, | 349 | idset_sch_del(slow_subchannel_set, schid); |
353 | struct slow_subchannel, slow_list); | ||
354 | |||
355 | list_del_init(slow_subchannels_head.next); | ||
356 | spin_unlock_irq(&slow_subchannel_lock); | 350 | spin_unlock_irq(&slow_subchannel_lock); |
357 | css_evaluate_subchannel(slow_sch->schid, 1); | 351 | css_evaluate_subchannel(schid, 1); |
358 | spin_lock_irq(&slow_subchannel_lock); | 352 | spin_lock_irq(&slow_subchannel_lock); |
359 | kfree(slow_sch); | ||
360 | } | 353 | } |
361 | spin_unlock_irq(&slow_subchannel_lock); | 354 | spin_unlock_irq(&slow_subchannel_lock); |
362 | } | 355 | } |
363 | 356 | ||
364 | DECLARE_WORK(slow_path_work, css_trigger_slow_path); | 357 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
365 | struct workqueue_struct *slow_path_wq; | 358 | struct workqueue_struct *slow_path_wq; |
366 | 359 | ||
360 | void css_schedule_eval(struct subchannel_id schid) | ||
361 | { | ||
362 | unsigned long flags; | ||
363 | |||
364 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
365 | idset_sch_add(slow_subchannel_set, schid); | ||
366 | queue_work(slow_path_wq, &slow_path_work); | ||
367 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
368 | } | ||
369 | |||
370 | void css_schedule_eval_all(void) | ||
371 | { | ||
372 | unsigned long flags; | ||
373 | |||
374 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
375 | idset_fill(slow_subchannel_set); | ||
376 | queue_work(slow_path_wq, &slow_path_work); | ||
377 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
378 | } | ||
379 | |||
367 | /* Reprobe subchannel if unregistered. */ | 380 | /* Reprobe subchannel if unregistered. */ |
368 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | 381 | static int reprobe_subchannel(struct subchannel_id schid, void *data) |
369 | { | 382 | { |
@@ -426,33 +439,14 @@ void css_schedule_reprobe(void) | |||
426 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 439 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
427 | 440 | ||
428 | /* | 441 | /* |
429 | * Rescan for new devices. FIXME: This is slow. | ||
430 | * This function is called when we have lost CRWs due to overflows and we have | ||
431 | * to do subchannel housekeeping. | ||
432 | */ | ||
433 | void | ||
434 | css_reiterate_subchannels(void) | ||
435 | { | ||
436 | css_clear_subchannel_slow_list(); | ||
437 | need_rescan = 1; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Called from the machine check handler for subchannel report words. | 442 | * Called from the machine check handler for subchannel report words. |
442 | */ | 443 | */ |
443 | int | 444 | void css_process_crw(int rsid1, int rsid2) |
444 | css_process_crw(int rsid1, int rsid2) | ||
445 | { | 445 | { |
446 | int ret; | ||
447 | struct subchannel_id mchk_schid; | 446 | struct subchannel_id mchk_schid; |
448 | 447 | ||
449 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", | 448 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", |
450 | rsid1, rsid2); | 449 | rsid1, rsid2); |
451 | |||
452 | if (need_rescan) | ||
453 | /* We need to iterate all subchannels anyway. */ | ||
454 | return -EAGAIN; | ||
455 | |||
456 | init_subchannel_id(&mchk_schid); | 450 | init_subchannel_id(&mchk_schid); |
457 | mchk_schid.sch_no = rsid1; | 451 | mchk_schid.sch_no = rsid1; |
458 | if (rsid2 != 0) | 452 | if (rsid2 != 0) |
@@ -463,14 +457,7 @@ css_process_crw(int rsid1, int rsid2) | |||
463 | * use stsch() to find out if the subchannel in question has come | 457 | * use stsch() to find out if the subchannel in question has come |
464 | * or gone. | 458 | * or gone. |
465 | */ | 459 | */ |
466 | ret = css_evaluate_subchannel(mchk_schid, 0); | 460 | css_evaluate_subchannel(mchk_schid, 0); |
467 | if (ret == -EAGAIN) { | ||
468 | if (css_enqueue_subchannel_slow(mchk_schid)) { | ||
469 | css_clear_subchannel_slow_list(); | ||
470 | need_rescan = 1; | ||
471 | } | ||
472 | } | ||
473 | return ret; | ||
474 | } | 461 | } |
475 | 462 | ||
476 | static int __init | 463 | static int __init |
@@ -745,47 +732,6 @@ struct bus_type css_bus_type = { | |||
745 | 732 | ||
746 | subsys_initcall(init_channel_subsystem); | 733 | subsys_initcall(init_channel_subsystem); |
747 | 734 | ||
748 | int | ||
749 | css_enqueue_subchannel_slow(struct subchannel_id schid) | ||
750 | { | ||
751 | struct slow_subchannel *new_slow_sch; | ||
752 | unsigned long flags; | ||
753 | |||
754 | new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); | ||
755 | if (!new_slow_sch) | ||
756 | return -ENOMEM; | ||
757 | new_slow_sch->schid = schid; | ||
758 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
759 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); | ||
760 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
761 | return 0; | ||
762 | } | ||
763 | |||
764 | void | ||
765 | css_clear_subchannel_slow_list(void) | ||
766 | { | ||
767 | unsigned long flags; | ||
768 | |||
769 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
770 | while (!list_empty(&slow_subchannels_head)) { | ||
771 | struct slow_subchannel *slow_sch = | ||
772 | list_entry(slow_subchannels_head.next, | ||
773 | struct slow_subchannel, slow_list); | ||
774 | |||
775 | list_del_init(slow_subchannels_head.next); | ||
776 | kfree(slow_sch); | ||
777 | } | ||
778 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
779 | } | ||
780 | |||
781 | |||
782 | |||
783 | int | ||
784 | css_slow_subchannels_exist(void) | ||
785 | { | ||
786 | return (!list_empty(&slow_subchannels_head)); | ||
787 | } | ||
788 | |||
789 | MODULE_LICENSE("GPL"); | 735 | MODULE_LICENSE("GPL"); |
790 | EXPORT_SYMBOL(css_bus_type); | 736 | EXPORT_SYMBOL(css_bus_type); |
791 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | 737 | EXPORT_SYMBOL_GPL(css_characteristics_avail); |