diff options
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r-- | drivers/s390/cio/css.c | 201 |
1 files changed, 95 insertions, 106 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fe0ace7aece8..27c6d9e55b23 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -20,8 +20,9 @@ | |||
20 | #include "ioasm.h" | 20 | #include "ioasm.h" |
21 | #include "chsc.h" | 21 | #include "chsc.h" |
22 | #include "device.h" | 22 | #include "device.h" |
23 | #include "idset.h" | ||
24 | #include "chp.h" | ||
23 | 25 | ||
24 | int need_rescan = 0; | ||
25 | int css_init_done = 0; | 26 | int css_init_done = 0; |
26 | static int need_reprobe = 0; | 27 | static int need_reprobe = 0; |
27 | static int max_ssid = 0; | 28 | static int max_ssid = 0; |
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch) | |||
125 | mutex_unlock(&sch->reg_mutex); | 126 | mutex_unlock(&sch->reg_mutex); |
126 | } | 127 | } |
127 | 128 | ||
128 | static int | 129 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
129 | css_register_subchannel(struct subchannel *sch) | 130 | { |
131 | int i; | ||
132 | int mask; | ||
133 | |||
134 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | ||
135 | ssd->path_mask = pmcw->pim; | ||
136 | for (i = 0; i < 8; i++) { | ||
137 | mask = 0x80 >> i; | ||
138 | if (pmcw->pim & mask) { | ||
139 | chp_id_init(&ssd->chpid[i]); | ||
140 | ssd->chpid[i].id = pmcw->chpid[i]; | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static void ssd_register_chpids(struct chsc_ssd_info *ssd) | ||
146 | { | ||
147 | int i; | ||
148 | int mask; | ||
149 | |||
150 | for (i = 0; i < 8; i++) { | ||
151 | mask = 0x80 >> i; | ||
152 | if (ssd->path_mask & mask) | ||
153 | if (!chp_is_registered(ssd->chpid[i])) | ||
154 | chp_new(ssd->chpid[i]); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | void css_update_ssd_info(struct subchannel *sch) | ||
159 | { | ||
160 | int ret; | ||
161 | |||
162 | if (cio_is_console(sch->schid)) { | ||
163 | /* Console is initialized too early for functions requiring | ||
164 | * memory allocation. */ | ||
165 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
166 | } else { | ||
167 | ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); | ||
168 | if (ret) | ||
169 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
170 | ssd_register_chpids(&sch->ssd_info); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | static int css_register_subchannel(struct subchannel *sch) | ||
130 | { | 175 | { |
131 | int ret; | 176 | int ret; |
132 | 177 | ||
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch) | |||
135 | sch->dev.bus = &css_bus_type; | 180 | sch->dev.bus = &css_bus_type; |
136 | sch->dev.release = &css_subchannel_release; | 181 | sch->dev.release = &css_subchannel_release; |
137 | sch->dev.groups = subch_attr_groups; | 182 | sch->dev.groups = subch_attr_groups; |
138 | 183 | css_update_ssd_info(sch); | |
139 | css_get_ssd_info(sch); | ||
140 | |||
141 | /* make it known to the system */ | 184 | /* make it known to the system */ |
142 | ret = css_sch_device_register(sch); | 185 | ret = css_sch_device_register(sch); |
143 | if (ret) { | 186 | if (ret) { |
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
306 | return css_probe_device(schid); | 349 | return css_probe_device(schid); |
307 | } | 350 | } |
308 | 351 | ||
309 | static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | 352 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
310 | { | 353 | { |
311 | struct subchannel *sch; | 354 | struct subchannel *sch; |
312 | int ret; | 355 | int ret; |
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
317 | put_device(&sch->dev); | 360 | put_device(&sch->dev); |
318 | } else | 361 | } else |
319 | ret = css_evaluate_new_subchannel(schid, slow); | 362 | ret = css_evaluate_new_subchannel(schid, slow); |
320 | 363 | if (ret == -EAGAIN) | |
321 | return ret; | 364 | css_schedule_eval(schid); |
322 | } | 365 | } |
323 | 366 | ||
324 | static int | 367 | static struct idset *slow_subchannel_set; |
325 | css_rescan_devices(struct subchannel_id schid, void *data) | 368 | static spinlock_t slow_subchannel_lock; |
369 | |||
370 | static int __init slow_subchannel_init(void) | ||
326 | { | 371 | { |
327 | return css_evaluate_subchannel(schid, 1); | 372 | spin_lock_init(&slow_subchannel_lock); |
373 | slow_subchannel_set = idset_sch_new(); | ||
374 | if (!slow_subchannel_set) { | ||
375 | printk(KERN_WARNING "cio: could not allocate slow subchannel " | ||
376 | "set\n"); | ||
377 | return -ENOMEM; | ||
378 | } | ||
379 | return 0; | ||
328 | } | 380 | } |
329 | 381 | ||
330 | struct slow_subchannel { | 382 | subsys_initcall(slow_subchannel_init); |
331 | struct list_head slow_list; | ||
332 | struct subchannel_id schid; | ||
333 | }; | ||
334 | |||
335 | static LIST_HEAD(slow_subchannels_head); | ||
336 | static DEFINE_SPINLOCK(slow_subchannel_lock); | ||
337 | 383 | ||
338 | static void | 384 | static void css_slow_path_func(struct work_struct *unused) |
339 | css_trigger_slow_path(struct work_struct *unused) | ||
340 | { | 385 | { |
341 | CIO_TRACE_EVENT(4, "slowpath"); | 386 | struct subchannel_id schid; |
342 | |||
343 | if (need_rescan) { | ||
344 | need_rescan = 0; | ||
345 | for_each_subchannel(css_rescan_devices, NULL); | ||
346 | return; | ||
347 | } | ||
348 | 387 | ||
388 | CIO_TRACE_EVENT(4, "slowpath"); | ||
349 | spin_lock_irq(&slow_subchannel_lock); | 389 | spin_lock_irq(&slow_subchannel_lock); |
350 | while (!list_empty(&slow_subchannels_head)) { | 390 | init_subchannel_id(&schid); |
351 | struct slow_subchannel *slow_sch = | 391 | while (idset_sch_get_first(slow_subchannel_set, &schid)) { |
352 | list_entry(slow_subchannels_head.next, | 392 | idset_sch_del(slow_subchannel_set, schid); |
353 | struct slow_subchannel, slow_list); | ||
354 | |||
355 | list_del_init(slow_subchannels_head.next); | ||
356 | spin_unlock_irq(&slow_subchannel_lock); | 393 | spin_unlock_irq(&slow_subchannel_lock); |
357 | css_evaluate_subchannel(slow_sch->schid, 1); | 394 | css_evaluate_subchannel(schid, 1); |
358 | spin_lock_irq(&slow_subchannel_lock); | 395 | spin_lock_irq(&slow_subchannel_lock); |
359 | kfree(slow_sch); | ||
360 | } | 396 | } |
361 | spin_unlock_irq(&slow_subchannel_lock); | 397 | spin_unlock_irq(&slow_subchannel_lock); |
362 | } | 398 | } |
363 | 399 | ||
364 | DECLARE_WORK(slow_path_work, css_trigger_slow_path); | 400 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
365 | struct workqueue_struct *slow_path_wq; | 401 | struct workqueue_struct *slow_path_wq; |
366 | 402 | ||
403 | void css_schedule_eval(struct subchannel_id schid) | ||
404 | { | ||
405 | unsigned long flags; | ||
406 | |||
407 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
408 | idset_sch_add(slow_subchannel_set, schid); | ||
409 | queue_work(slow_path_wq, &slow_path_work); | ||
410 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
411 | } | ||
412 | |||
413 | void css_schedule_eval_all(void) | ||
414 | { | ||
415 | unsigned long flags; | ||
416 | |||
417 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
418 | idset_fill(slow_subchannel_set); | ||
419 | queue_work(slow_path_wq, &slow_path_work); | ||
420 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
421 | } | ||
422 | |||
367 | /* Reprobe subchannel if unregistered. */ | 423 | /* Reprobe subchannel if unregistered. */ |
368 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | 424 | static int reprobe_subchannel(struct subchannel_id schid, void *data) |
369 | { | 425 | { |
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void) | |||
426 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 482 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
427 | 483 | ||
428 | /* | 484 | /* |
429 | * Rescan for new devices. FIXME: This is slow. | ||
430 | * This function is called when we have lost CRWs due to overflows and we have | ||
431 | * to do subchannel housekeeping. | ||
432 | */ | ||
433 | void | ||
434 | css_reiterate_subchannels(void) | ||
435 | { | ||
436 | css_clear_subchannel_slow_list(); | ||
437 | need_rescan = 1; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Called from the machine check handler for subchannel report words. | 485 | * Called from the machine check handler for subchannel report words. |
442 | */ | 486 | */ |
443 | int | 487 | void css_process_crw(int rsid1, int rsid2) |
444 | css_process_crw(int rsid1, int rsid2) | ||
445 | { | 488 | { |
446 | int ret; | ||
447 | struct subchannel_id mchk_schid; | 489 | struct subchannel_id mchk_schid; |
448 | 490 | ||
449 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", | 491 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", |
450 | rsid1, rsid2); | 492 | rsid1, rsid2); |
451 | |||
452 | if (need_rescan) | ||
453 | /* We need to iterate all subchannels anyway. */ | ||
454 | return -EAGAIN; | ||
455 | |||
456 | init_subchannel_id(&mchk_schid); | 493 | init_subchannel_id(&mchk_schid); |
457 | mchk_schid.sch_no = rsid1; | 494 | mchk_schid.sch_no = rsid1; |
458 | if (rsid2 != 0) | 495 | if (rsid2 != 0) |
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2) | |||
463 | * use stsch() to find out if the subchannel in question has come | 500 | * use stsch() to find out if the subchannel in question has come |
464 | * or gone. | 501 | * or gone. |
465 | */ | 502 | */ |
466 | ret = css_evaluate_subchannel(mchk_schid, 0); | 503 | css_evaluate_subchannel(mchk_schid, 0); |
467 | if (ret == -EAGAIN) { | ||
468 | if (css_enqueue_subchannel_slow(mchk_schid)) { | ||
469 | css_clear_subchannel_slow_list(); | ||
470 | need_rescan = 1; | ||
471 | } | ||
472 | } | ||
473 | return ret; | ||
474 | } | 504 | } |
475 | 505 | ||
476 | static int __init | 506 | static int __init |
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = { | |||
745 | 775 | ||
746 | subsys_initcall(init_channel_subsystem); | 776 | subsys_initcall(init_channel_subsystem); |
747 | 777 | ||
748 | int | ||
749 | css_enqueue_subchannel_slow(struct subchannel_id schid) | ||
750 | { | ||
751 | struct slow_subchannel *new_slow_sch; | ||
752 | unsigned long flags; | ||
753 | |||
754 | new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); | ||
755 | if (!new_slow_sch) | ||
756 | return -ENOMEM; | ||
757 | new_slow_sch->schid = schid; | ||
758 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
759 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); | ||
760 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
761 | return 0; | ||
762 | } | ||
763 | |||
764 | void | ||
765 | css_clear_subchannel_slow_list(void) | ||
766 | { | ||
767 | unsigned long flags; | ||
768 | |||
769 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
770 | while (!list_empty(&slow_subchannels_head)) { | ||
771 | struct slow_subchannel *slow_sch = | ||
772 | list_entry(slow_subchannels_head.next, | ||
773 | struct slow_subchannel, slow_list); | ||
774 | |||
775 | list_del_init(slow_subchannels_head.next); | ||
776 | kfree(slow_sch); | ||
777 | } | ||
778 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
779 | } | ||
780 | |||
781 | |||
782 | |||
783 | int | ||
784 | css_slow_subchannels_exist(void) | ||
785 | { | ||
786 | return (!list_empty(&slow_subchannels_head)); | ||
787 | } | ||
788 | |||
789 | MODULE_LICENSE("GPL"); | 778 | MODULE_LICENSE("GPL"); |
790 | EXPORT_SYMBOL(css_bus_type); | 779 | EXPORT_SYMBOL(css_bus_type); |
791 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | 780 | EXPORT_SYMBOL_GPL(css_characteristics_avail); |