diff options
author | Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 2008-01-26 08:10:48 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-01-26 08:11:03 -0500 |
commit | e82a1567e4b22eb035da2499d20ddd573c9acf75 (patch) | |
tree | 0cf697f96e734a846ee1cbc598beebcc7be10117 /drivers/s390/cio/css.c | |
parent | 4beee64685e116b01c47655daf6d88df87e053c8 (diff) |
[S390] cio: reduce cpu utilization during device scan
Minimize calls to cpu intensive function get_subchannel_by_schid()
by introducing function for_each_subchannel_staged() which
temporarily caches the information about registered subchannels
in a bitmap.
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r-- | drivers/s390/cio/css.c | 120 |
1 files changed, 102 insertions, 18 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 69d56c7284d1..3b45bbe6cce0 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -51,6 +51,62 @@ for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) | |||
51 | return ret; | 51 | return ret; |
52 | } | 52 | } |
53 | 53 | ||
54 | struct cb_data { | ||
55 | void *data; | ||
56 | struct idset *set; | ||
57 | int (*fn_known_sch)(struct subchannel *, void *); | ||
58 | int (*fn_unknown_sch)(struct subchannel_id, void *); | ||
59 | }; | ||
60 | |||
61 | static int call_fn_known_sch(struct device *dev, void *data) | ||
62 | { | ||
63 | struct subchannel *sch = to_subchannel(dev); | ||
64 | struct cb_data *cb = data; | ||
65 | int rc = 0; | ||
66 | |||
67 | idset_sch_del(cb->set, sch->schid); | ||
68 | if (cb->fn_known_sch) | ||
69 | rc = cb->fn_known_sch(sch, cb->data); | ||
70 | return rc; | ||
71 | } | ||
72 | |||
73 | static int call_fn_unknown_sch(struct subchannel_id schid, void *data) | ||
74 | { | ||
75 | struct cb_data *cb = data; | ||
76 | int rc = 0; | ||
77 | |||
78 | if (idset_sch_contains(cb->set, schid)) | ||
79 | rc = cb->fn_unknown_sch(schid, cb->data); | ||
80 | return rc; | ||
81 | } | ||
82 | |||
83 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), | ||
84 | int (*fn_unknown)(struct subchannel_id, | ||
85 | void *), void *data) | ||
86 | { | ||
87 | struct cb_data cb; | ||
88 | int rc; | ||
89 | |||
90 | cb.set = idset_sch_new(); | ||
91 | if (!cb.set) | ||
92 | return -ENOMEM; | ||
93 | idset_fill(cb.set); | ||
94 | cb.data = data; | ||
95 | cb.fn_known_sch = fn_known; | ||
96 | cb.fn_unknown_sch = fn_unknown; | ||
97 | /* Process registered subchannels. */ | ||
98 | rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); | ||
99 | if (rc) | ||
100 | goto out; | ||
101 | /* Process unregistered subchannels. */ | ||
102 | if (fn_unknown) | ||
103 | rc = for_each_subchannel(call_fn_unknown_sch, &cb); | ||
104 | out: | ||
105 | idset_free(cb.set); | ||
106 | |||
107 | return rc; | ||
108 | } | ||
109 | |||
54 | static struct subchannel * | 110 | static struct subchannel * |
55 | css_alloc_subchannel(struct subchannel_id schid) | 111 | css_alloc_subchannel(struct subchannel_id schid) |
56 | { | 112 | { |
@@ -402,20 +458,56 @@ static int __init slow_subchannel_init(void) | |||
402 | return 0; | 458 | return 0; |
403 | } | 459 | } |
404 | 460 | ||
405 | static void css_slow_path_func(struct work_struct *unused) | 461 | static int slow_eval_known_fn(struct subchannel *sch, void *data) |
406 | { | 462 | { |
407 | struct subchannel_id schid; | 463 | int eval; |
464 | int rc; | ||
408 | 465 | ||
409 | CIO_TRACE_EVENT(4, "slowpath"); | ||
410 | spin_lock_irq(&slow_subchannel_lock); | 466 | spin_lock_irq(&slow_subchannel_lock); |
411 | init_subchannel_id(&schid); | 467 | eval = idset_sch_contains(slow_subchannel_set, sch->schid); |
412 | while (idset_sch_get_first(slow_subchannel_set, &schid)) { | 468 | idset_sch_del(slow_subchannel_set, sch->schid); |
413 | idset_sch_del(slow_subchannel_set, schid); | 469 | spin_unlock_irq(&slow_subchannel_lock); |
414 | spin_unlock_irq(&slow_subchannel_lock); | 470 | if (eval) { |
415 | css_evaluate_subchannel(schid, 1); | 471 | rc = css_evaluate_known_subchannel(sch, 1); |
416 | spin_lock_irq(&slow_subchannel_lock); | 472 | if (rc == -EAGAIN) |
473 | css_schedule_eval(sch->schid); | ||
417 | } | 474 | } |
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) | ||
479 | { | ||
480 | int eval; | ||
481 | int rc = 0; | ||
482 | |||
483 | spin_lock_irq(&slow_subchannel_lock); | ||
484 | eval = idset_sch_contains(slow_subchannel_set, schid); | ||
485 | idset_sch_del(slow_subchannel_set, schid); | ||
418 | spin_unlock_irq(&slow_subchannel_lock); | 486 | spin_unlock_irq(&slow_subchannel_lock); |
487 | if (eval) { | ||
488 | rc = css_evaluate_new_subchannel(schid, 1); | ||
489 | switch (rc) { | ||
490 | case -EAGAIN: | ||
491 | css_schedule_eval(schid); | ||
492 | rc = 0; | ||
493 | break; | ||
494 | case -ENXIO: | ||
495 | case -ENOMEM: | ||
496 | case -EIO: | ||
497 | /* These should abort looping */ | ||
498 | break; | ||
499 | default: | ||
500 | rc = 0; | ||
501 | } | ||
502 | } | ||
503 | return rc; | ||
504 | } | ||
505 | |||
506 | static void css_slow_path_func(struct work_struct *unused) | ||
507 | { | ||
508 | CIO_TRACE_EVENT(4, "slowpath"); | ||
509 | for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, | ||
510 | NULL); | ||
419 | } | 511 | } |
420 | 512 | ||
421 | static DECLARE_WORK(slow_path_work, css_slow_path_func); | 513 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
@@ -444,7 +536,6 @@ void css_schedule_eval_all(void) | |||
444 | /* Reprobe subchannel if unregistered. */ | 536 | /* Reprobe subchannel if unregistered. */ |
445 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | 537 | static int reprobe_subchannel(struct subchannel_id schid, void *data) |
446 | { | 538 | { |
447 | struct subchannel *sch; | ||
448 | int ret; | 539 | int ret; |
449 | 540 | ||
450 | CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", | 541 | CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", |
@@ -452,13 +543,6 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data) | |||
452 | if (need_reprobe) | 543 | if (need_reprobe) |
453 | return -EAGAIN; | 544 | return -EAGAIN; |
454 | 545 | ||
455 | sch = get_subchannel_by_schid(schid); | ||
456 | if (sch) { | ||
457 | /* Already known. */ | ||
458 | put_device(&sch->dev); | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | ret = css_probe_device(schid); | 546 | ret = css_probe_device(schid); |
463 | switch (ret) { | 547 | switch (ret) { |
464 | case 0: | 548 | case 0: |
@@ -486,7 +570,7 @@ static void reprobe_all(struct work_struct *unused) | |||
486 | /* Make sure initial subchannel scan is done. */ | 570 | /* Make sure initial subchannel scan is done. */ |
487 | wait_event(ccw_device_init_wq, | 571 | wait_event(ccw_device_init_wq, |
488 | atomic_read(&ccw_device_init_count) == 0); | 572 | atomic_read(&ccw_device_init_count) == 0); |
489 | ret = for_each_subchannel(reprobe_subchannel, NULL); | 573 | ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); |
490 | 574 | ||
491 | CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, | 575 | CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, |
492 | need_reprobe); | 576 | need_reprobe); |