diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/s390/cio/Makefile | 2 | ||||
-rw-r--r-- | drivers/s390/cio/chp.c | 8 | ||||
-rw-r--r-- | drivers/s390/cio/chp.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.c | 130 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.h | 4 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 148 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 10 | ||||
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 6 | ||||
-rw-r--r-- | drivers/s390/cio/idset.c | 112 | ||||
-rw-r--r-- | drivers/s390/cio/idset.h | 25 | ||||
-rw-r--r-- | drivers/s390/s390mach.c | 24 |
11 files changed, 235 insertions, 236 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index fe7b3ffa1eaa..cfaf77b320f5 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the S/390 common i/o drivers | 2 | # Makefile for the S/390 common i/o drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o | 5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o |
6 | ccw_device-objs += device.o device_fsm.o device_ops.o | 6 | ccw_device-objs += device.o device_fsm.o device_ops.o |
7 | ccw_device-objs += device_id.o device_pgid.o device_status.o | 7 | ccw_device-objs += device_id.o device_pgid.o device_status.o |
8 | obj-y += ccw_device.o cmf.o | 8 | obj-y += ccw_device.o cmf.o |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 0e92c8c89860..ac289e6eadfe 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
@@ -491,7 +491,7 @@ void *chp_get_chp_desc(struct chp_id chpid) | |||
491 | * Handle channel-report-words indicating that the status of a channel-path | 491 | * Handle channel-report-words indicating that the status of a channel-path |
492 | * has changed. | 492 | * has changed. |
493 | */ | 493 | */ |
494 | int chp_process_crw(int id, int status) | 494 | void chp_process_crw(int id, int status) |
495 | { | 495 | { |
496 | struct chp_id chpid; | 496 | struct chp_id chpid; |
497 | 497 | ||
@@ -500,11 +500,9 @@ int chp_process_crw(int id, int status) | |||
500 | if (status) { | 500 | if (status) { |
501 | if (!chp_is_registered(chpid)) | 501 | if (!chp_is_registered(chpid)) |
502 | chp_new(chpid); | 502 | chp_new(chpid); |
503 | return chsc_chp_online(chpid); | 503 | chsc_chp_online(chpid); |
504 | } else { | 504 | } else |
505 | chsc_chp_offline(chpid); | 505 | chsc_chp_offline(chpid); |
506 | return 0; | ||
507 | } | ||
508 | } | 506 | } |
509 | 507 | ||
510 | static inline int info_bit_num(struct chp_id id) | 508 | static inline int info_bit_num(struct chp_id id) |
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 862af69d9707..65286563c592 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h | |||
@@ -42,7 +42,7 @@ int chp_get_status(struct chp_id chpid); | |||
42 | u8 chp_get_sch_opm(struct subchannel *sch); | 42 | u8 chp_get_sch_opm(struct subchannel *sch); |
43 | int chp_is_registered(struct chp_id chpid); | 43 | int chp_is_registered(struct chp_id chpid); |
44 | void *chp_get_chp_desc(struct chp_id chpid); | 44 | void *chp_get_chp_desc(struct chp_id chpid); |
45 | int chp_process_crw(int id, int available); | 45 | void chp_process_crw(int id, int available); |
46 | void chp_remove_cmg_attr(struct channel_path *chp); | 46 | void chp_remove_cmg_attr(struct channel_path *chp); |
47 | int chp_add_cmg_attr(struct channel_path *chp); | 47 | int chp_add_cmg_attr(struct channel_path *chp); |
48 | int chp_new(struct chp_id chpid); | 48 | int chp_new(struct chp_id chpid); |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 02615eb43984..89a130a62654 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -195,12 +195,8 @@ static void terminate_internal_io(struct subchannel *sch) | |||
195 | if (cio_clear(sch)) { | 195 | if (cio_clear(sch)) { |
196 | /* Recheck device in case clear failed. */ | 196 | /* Recheck device in case clear failed. */ |
197 | sch->lpm = 0; | 197 | sch->lpm = 0; |
198 | if (device_trigger_verify(sch) != 0) { | 198 | if (device_trigger_verify(sch) != 0) |
199 | if(css_enqueue_subchannel_slow(sch->schid)) { | 199 | css_schedule_eval(sch->schid); |
200 | css_clear_subchannel_slow_list(); | ||
201 | need_rescan = 1; | ||
202 | } | ||
203 | } | ||
204 | return; | 200 | return; |
205 | } | 201 | } |
206 | /* Request retry of internal operation. */ | 202 | /* Request retry of internal operation. */ |
@@ -262,11 +258,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
262 | 258 | ||
263 | out_unreg: | 259 | out_unreg: |
264 | sch->lpm = 0; | 260 | sch->lpm = 0; |
265 | if (css_enqueue_subchannel_slow(sch->schid)) { | ||
266 | css_clear_subchannel_slow_list(); | ||
267 | need_rescan = 1; | ||
268 | } | ||
269 | spin_unlock_irq(sch->lock); | 261 | spin_unlock_irq(sch->lock); |
262 | css_schedule_eval(sch->schid); | ||
270 | return 0; | 263 | return 0; |
271 | } | 264 | } |
272 | 265 | ||
@@ -281,9 +274,6 @@ void chsc_chp_offline(struct chp_id chpid) | |||
281 | return; | 274 | return; |
282 | bus_for_each_dev(&css_bus_type, NULL, &chpid, | 275 | bus_for_each_dev(&css_bus_type, NULL, &chpid, |
283 | s390_subchannel_remove_chpid); | 276 | s390_subchannel_remove_chpid); |
284 | |||
285 | if (need_rescan || css_slow_subchannels_exist()) | ||
286 | queue_work(slow_path_wq, &slow_path_work); | ||
287 | } | 277 | } |
288 | 278 | ||
289 | struct res_acc_data { | 279 | struct res_acc_data { |
@@ -331,7 +321,6 @@ static int | |||
331 | s390_process_res_acc_new_sch(struct subchannel_id schid) | 321 | s390_process_res_acc_new_sch(struct subchannel_id schid) |
332 | { | 322 | { |
333 | struct schib schib; | 323 | struct schib schib; |
334 | int ret; | ||
335 | /* | 324 | /* |
336 | * We don't know the device yet, but since a path | 325 | * We don't know the device yet, but since a path |
337 | * may be available now to the device we'll have | 326 | * may be available now to the device we'll have |
@@ -342,15 +331,10 @@ s390_process_res_acc_new_sch(struct subchannel_id schid) | |||
342 | */ | 331 | */ |
343 | if (stsch_err(schid, &schib)) | 332 | if (stsch_err(schid, &schib)) |
344 | /* We're through */ | 333 | /* We're through */ |
345 | return need_rescan ? -EAGAIN : -ENXIO; | 334 | return -ENXIO; |
346 | 335 | ||
347 | /* Put it on the slow path. */ | 336 | /* Put it on the slow path. */ |
348 | ret = css_enqueue_subchannel_slow(schid); | 337 | css_schedule_eval(schid); |
349 | if (ret) { | ||
350 | css_clear_subchannel_slow_list(); | ||
351 | need_rescan = 1; | ||
352 | return -EAGAIN; | ||
353 | } | ||
354 | return 0; | 338 | return 0; |
355 | } | 339 | } |
356 | 340 | ||
@@ -392,10 +376,8 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
392 | } | 376 | } |
393 | 377 | ||
394 | 378 | ||
395 | static int | 379 | static void s390_process_res_acc (struct res_acc_data *res_data) |
396 | s390_process_res_acc (struct res_acc_data *res_data) | ||
397 | { | 380 | { |
398 | int rc; | ||
399 | char dbf_txt[15]; | 381 | char dbf_txt[15]; |
400 | 382 | ||
401 | sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, | 383 | sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, |
@@ -413,12 +395,7 @@ s390_process_res_acc (struct res_acc_data *res_data) | |||
413 | * The more information we have (info), the less scanning | 395 | * The more information we have (info), the less scanning |
414 | * will we have to do. | 396 | * will we have to do. |
415 | */ | 397 | */ |
416 | rc = for_each_subchannel(__s390_process_res_acc, res_data); | 398 | for_each_subchannel(__s390_process_res_acc, res_data); |
417 | if (css_slow_subchannels_exist()) | ||
418 | rc = -EAGAIN; | ||
419 | else if (rc != -EAGAIN) | ||
420 | rc = 0; | ||
421 | return rc; | ||
422 | } | 399 | } |
423 | 400 | ||
424 | static int | 401 | static int |
@@ -470,7 +447,7 @@ struct chsc_sei_area { | |||
470 | /* ccdf has to be big enough for a link-incident record */ | 447 | /* ccdf has to be big enough for a link-incident record */ |
471 | } __attribute__ ((packed)); | 448 | } __attribute__ ((packed)); |
472 | 449 | ||
473 | static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | 450 | static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) |
474 | { | 451 | { |
475 | struct chp_id chpid; | 452 | struct chp_id chpid; |
476 | int id; | 453 | int id; |
@@ -478,7 +455,7 @@ static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | |||
478 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", | 455 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", |
479 | sei_area->rs, sei_area->rsid); | 456 | sei_area->rs, sei_area->rsid); |
480 | if (sei_area->rs != 4) | 457 | if (sei_area->rs != 4) |
481 | return 0; | 458 | return; |
482 | id = __get_chpid_from_lir(sei_area->ccdf); | 459 | id = __get_chpid_from_lir(sei_area->ccdf); |
483 | if (id < 0) | 460 | if (id < 0) |
484 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); | 461 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); |
@@ -487,21 +464,18 @@ static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | |||
487 | chpid.id = id; | 464 | chpid.id = id; |
488 | chsc_chp_offline(chpid); | 465 | chsc_chp_offline(chpid); |
489 | } | 466 | } |
490 | |||
491 | return 0; | ||
492 | } | 467 | } |
493 | 468 | ||
494 | static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | 469 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) |
495 | { | 470 | { |
496 | struct res_acc_data res_data; | 471 | struct res_acc_data res_data; |
497 | struct chp_id chpid; | 472 | struct chp_id chpid; |
498 | int status; | 473 | int status; |
499 | int rc; | ||
500 | 474 | ||
501 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " | 475 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " |
502 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); | 476 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); |
503 | if (sei_area->rs != 4) | 477 | if (sei_area->rs != 4) |
504 | return 0; | 478 | return; |
505 | chp_id_init(&chpid); | 479 | chp_id_init(&chpid); |
506 | chpid.id = sei_area->rsid; | 480 | chpid.id = sei_area->rsid; |
507 | /* allocate a new channel path structure, if needed */ | 481 | /* allocate a new channel path structure, if needed */ |
@@ -509,7 +483,7 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | |||
509 | if (status < 0) | 483 | if (status < 0) |
510 | chp_new(chpid); | 484 | chp_new(chpid); |
511 | else if (!status) | 485 | else if (!status) |
512 | return 0; | 486 | return; |
513 | memset(&res_data, 0, sizeof(struct res_acc_data)); | 487 | memset(&res_data, 0, sizeof(struct res_acc_data)); |
514 | res_data.chpid = chpid; | 488 | res_data.chpid = chpid; |
515 | if ((sei_area->vf & 0xc0) != 0) { | 489 | if ((sei_area->vf & 0xc0) != 0) { |
@@ -521,9 +495,7 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | |||
521 | /* link address */ | 495 | /* link address */ |
522 | res_data.fla_mask = 0xff00; | 496 | res_data.fla_mask = 0xff00; |
523 | } | 497 | } |
524 | rc = s390_process_res_acc(&res_data); | 498 | s390_process_res_acc(&res_data); |
525 | |||
526 | return rc; | ||
527 | } | 499 | } |
528 | 500 | ||
529 | struct chp_config_data { | 501 | struct chp_config_data { |
@@ -532,7 +504,7 @@ struct chp_config_data { | |||
532 | u8 pc; | 504 | u8 pc; |
533 | }; | 505 | }; |
534 | 506 | ||
535 | static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | 507 | static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) |
536 | { | 508 | { |
537 | struct chp_config_data *data; | 509 | struct chp_config_data *data; |
538 | struct chp_id chpid; | 510 | struct chp_id chpid; |
@@ -540,7 +512,7 @@ static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | |||
540 | 512 | ||
541 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); | 513 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); |
542 | if (sei_area->rs != 0) | 514 | if (sei_area->rs != 0) |
543 | return 0; | 515 | return; |
544 | data = (struct chp_config_data *) &(sei_area->ccdf); | 516 | data = (struct chp_config_data *) &(sei_area->ccdf); |
545 | chp_id_init(&chpid); | 517 | chp_id_init(&chpid); |
546 | for (num = 0; num <= __MAX_CHPID; num++) { | 518 | for (num = 0; num <= __MAX_CHPID; num++) { |
@@ -561,52 +533,44 @@ static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | |||
561 | break; | 533 | break; |
562 | } | 534 | } |
563 | } | 535 | } |
564 | |||
565 | return 0; | ||
566 | } | 536 | } |
567 | 537 | ||
568 | static int chsc_process_sei(struct chsc_sei_area *sei_area) | 538 | static void chsc_process_sei(struct chsc_sei_area *sei_area) |
569 | { | 539 | { |
570 | int rc; | ||
571 | |||
572 | /* Check if we might have lost some information. */ | 540 | /* Check if we might have lost some information. */ |
573 | if (sei_area->flags & 0x40) | 541 | if (sei_area->flags & 0x40) { |
574 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); | 542 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); |
543 | css_schedule_eval_all(); | ||
544 | } | ||
575 | /* which kind of information was stored? */ | 545 | /* which kind of information was stored? */ |
576 | rc = 0; | ||
577 | switch (sei_area->cc) { | 546 | switch (sei_area->cc) { |
578 | case 1: /* link incident*/ | 547 | case 1: /* link incident*/ |
579 | rc = chsc_process_sei_link_incident(sei_area); | 548 | chsc_process_sei_link_incident(sei_area); |
580 | break; | 549 | break; |
581 | case 2: /* i/o resource accessibiliy */ | 550 | case 2: /* i/o resource accessibiliy */ |
582 | rc = chsc_process_sei_res_acc(sei_area); | 551 | chsc_process_sei_res_acc(sei_area); |
583 | break; | 552 | break; |
584 | case 8: /* channel-path-configuration notification */ | 553 | case 8: /* channel-path-configuration notification */ |
585 | rc = chsc_process_sei_chp_config(sei_area); | 554 | chsc_process_sei_chp_config(sei_area); |
586 | break; | 555 | break; |
587 | default: /* other stuff */ | 556 | default: /* other stuff */ |
588 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", | 557 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", |
589 | sei_area->cc); | 558 | sei_area->cc); |
590 | break; | 559 | break; |
591 | } | 560 | } |
592 | |||
593 | return rc; | ||
594 | } | 561 | } |
595 | 562 | ||
596 | int chsc_process_crw(void) | 563 | void chsc_process_crw(void) |
597 | { | 564 | { |
598 | struct chsc_sei_area *sei_area; | 565 | struct chsc_sei_area *sei_area; |
599 | int ret; | ||
600 | int rc; | ||
601 | 566 | ||
602 | if (!sei_page) | 567 | if (!sei_page) |
603 | return 0; | 568 | return; |
604 | /* Access to sei_page is serialized through machine check handler | 569 | /* Access to sei_page is serialized through machine check handler |
605 | * thread, so no need for locking. */ | 570 | * thread, so no need for locking. */ |
606 | sei_area = sei_page; | 571 | sei_area = sei_page; |
607 | 572 | ||
608 | CIO_TRACE_EVENT( 2, "prcss"); | 573 | CIO_TRACE_EVENT( 2, "prcss"); |
609 | ret = 0; | ||
610 | do { | 574 | do { |
611 | memset(sei_area, 0, sizeof(*sei_area)); | 575 | memset(sei_area, 0, sizeof(*sei_area)); |
612 | sei_area->request.length = 0x0010; | 576 | sei_area->request.length = 0x0010; |
@@ -616,37 +580,26 @@ int chsc_process_crw(void) | |||
616 | 580 | ||
617 | if (sei_area->response.code == 0x0001) { | 581 | if (sei_area->response.code == 0x0001) { |
618 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); | 582 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); |
619 | rc = chsc_process_sei(sei_area); | 583 | chsc_process_sei(sei_area); |
620 | if (rc) | ||
621 | ret = rc; | ||
622 | } else { | 584 | } else { |
623 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", | 585 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", |
624 | sei_area->response.code); | 586 | sei_area->response.code); |
625 | ret = 0; | ||
626 | break; | 587 | break; |
627 | } | 588 | } |
628 | } while (sei_area->flags & 0x80); | 589 | } while (sei_area->flags & 0x80); |
629 | |||
630 | return ret; | ||
631 | } | 590 | } |
632 | 591 | ||
633 | static int | 592 | static int |
634 | __chp_add_new_sch(struct subchannel_id schid) | 593 | __chp_add_new_sch(struct subchannel_id schid) |
635 | { | 594 | { |
636 | struct schib schib; | 595 | struct schib schib; |
637 | int ret; | ||
638 | 596 | ||
639 | if (stsch_err(schid, &schib)) | 597 | if (stsch_err(schid, &schib)) |
640 | /* We're through */ | 598 | /* We're through */ |
641 | return need_rescan ? -EAGAIN : -ENXIO; | 599 | return -ENXIO; |
642 | 600 | ||
643 | /* Put it on the slow path. */ | 601 | /* Put it on the slow path. */ |
644 | ret = css_enqueue_subchannel_slow(schid); | 602 | css_schedule_eval(schid); |
645 | if (ret) { | ||
646 | css_clear_subchannel_slow_list(); | ||
647 | need_rescan = 1; | ||
648 | return -EAGAIN; | ||
649 | } | ||
650 | return 0; | 603 | return 0; |
651 | } | 604 | } |
652 | 605 | ||
@@ -693,22 +646,15 @@ __chp_add(struct subchannel_id schid, void *data) | |||
693 | return 0; | 646 | return 0; |
694 | } | 647 | } |
695 | 648 | ||
696 | int chsc_chp_online(struct chp_id chpid) | 649 | void chsc_chp_online(struct chp_id chpid) |
697 | { | 650 | { |
698 | int rc; | ||
699 | char dbf_txt[15]; | 651 | char dbf_txt[15]; |
700 | 652 | ||
701 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); | 653 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); |
702 | CIO_TRACE_EVENT(2, dbf_txt); | 654 | CIO_TRACE_EVENT(2, dbf_txt); |
703 | 655 | ||
704 | if (chp_get_status(chpid) == 0) | 656 | if (chp_get_status(chpid) != 0) |
705 | return 0; | 657 | for_each_subchannel(__chp_add, &chpid); |
706 | rc = for_each_subchannel(__chp_add, &chpid); | ||
707 | if (css_slow_subchannels_exist()) | ||
708 | rc = -EAGAIN; | ||
709 | if (rc != -EAGAIN) | ||
710 | rc = 0; | ||
711 | return rc; | ||
712 | } | 658 | } |
713 | 659 | ||
714 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, | 660 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
@@ -749,12 +695,8 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch, | |||
749 | sch->driver->verify(&sch->dev); | 695 | sch->driver->verify(&sch->dev); |
750 | } | 696 | } |
751 | } else if (!sch->lpm) { | 697 | } else if (!sch->lpm) { |
752 | if (device_trigger_verify(sch) != 0) { | 698 | if (device_trigger_verify(sch) != 0) |
753 | if (css_enqueue_subchannel_slow(sch->schid)) { | 699 | css_schedule_eval(sch->schid); |
754 | css_clear_subchannel_slow_list(); | ||
755 | need_rescan = 1; | ||
756 | } | ||
757 | } | ||
758 | } else if (sch->driver && sch->driver->verify) | 700 | } else if (sch->driver && sch->driver->verify) |
759 | sch->driver->verify(&sch->dev); | 701 | sch->driver->verify(&sch->dev); |
760 | break; | 702 | break; |
@@ -801,11 +743,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) | |||
801 | /* We're through */ | 743 | /* We're through */ |
802 | return -ENXIO; | 744 | return -ENXIO; |
803 | /* Put it on the slow path. */ | 745 | /* Put it on the slow path. */ |
804 | if (css_enqueue_subchannel_slow(schid)) { | 746 | css_schedule_eval(schid); |
805 | css_clear_subchannel_slow_list(); | ||
806 | need_rescan = 1; | ||
807 | return -EAGAIN; | ||
808 | } | ||
809 | return 0; | 747 | return 0; |
810 | } | 748 | } |
811 | 749 | ||
@@ -826,8 +764,6 @@ int chsc_chp_vary(struct chp_id chpid, int on) | |||
826 | if (on) | 764 | if (on) |
827 | /* Scan for new devices on varied on path. */ | 765 | /* Scan for new devices on varied on path. */ |
828 | for_each_subchannel(__s390_vary_chpid_on, NULL); | 766 | for_each_subchannel(__s390_vary_chpid_on, NULL); |
829 | if (need_rescan || css_slow_subchannels_exist()) | ||
830 | queue_work(slow_path_wq, &slow_path_work); | ||
831 | return 0; | 767 | return 0; |
832 | } | 768 | } |
833 | 769 | ||
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 322586f27cc0..742ef57d2c58 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -36,7 +36,7 @@ struct channel_path_desc { | |||
36 | struct channel_path; | 36 | struct channel_path; |
37 | 37 | ||
38 | extern int css_get_ssd_info(struct subchannel *); | 38 | extern int css_get_ssd_info(struct subchannel *); |
39 | extern int chsc_process_crw(void); | 39 | extern void chsc_process_crw(void); |
40 | 40 | ||
41 | struct css_general_char { | 41 | struct css_general_char { |
42 | u64 : 41; | 42 | u64 : 41; |
@@ -79,7 +79,7 @@ extern int chsc_secm(struct channel_subsystem *, int); | |||
79 | int chsc_chp_vary(struct chp_id chpid, int on); | 79 | int chsc_chp_vary(struct chp_id chpid, int on); |
80 | int chsc_determine_channel_path_description(struct chp_id chpid, | 80 | int chsc_determine_channel_path_description(struct chp_id chpid, |
81 | struct channel_path_desc *desc); | 81 | struct channel_path_desc *desc); |
82 | int chsc_chp_online(struct chp_id chpid); | 82 | void chsc_chp_online(struct chp_id chpid); |
83 | void chsc_chp_offline(struct chp_id chpid); | 83 | void chsc_chp_offline(struct chp_id chpid); |
84 | int chsc_get_channel_measurement_chars(struct channel_path *chp); | 84 | int chsc_get_channel_measurement_chars(struct channel_path *chp); |
85 | 85 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fe0ace7aece8..fcc641e578f4 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -20,8 +20,8 @@ | |||
20 | #include "ioasm.h" | 20 | #include "ioasm.h" |
21 | #include "chsc.h" | 21 | #include "chsc.h" |
22 | #include "device.h" | 22 | #include "device.h" |
23 | #include "idset.h" | ||
23 | 24 | ||
24 | int need_rescan = 0; | ||
25 | int css_init_done = 0; | 25 | int css_init_done = 0; |
26 | static int need_reprobe = 0; | 26 | static int need_reprobe = 0; |
27 | static int max_ssid = 0; | 27 | static int max_ssid = 0; |
@@ -306,7 +306,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
306 | return css_probe_device(schid); | 306 | return css_probe_device(schid); |
307 | } | 307 | } |
308 | 308 | ||
309 | static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | 309 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
310 | { | 310 | { |
311 | struct subchannel *sch; | 311 | struct subchannel *sch; |
312 | int ret; | 312 | int ret; |
@@ -317,53 +317,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
317 | put_device(&sch->dev); | 317 | put_device(&sch->dev); |
318 | } else | 318 | } else |
319 | ret = css_evaluate_new_subchannel(schid, slow); | 319 | ret = css_evaluate_new_subchannel(schid, slow); |
320 | 320 | if (ret == -EAGAIN) | |
321 | return ret; | 321 | css_schedule_eval(schid); |
322 | } | 322 | } |
323 | 323 | ||
324 | static int | 324 | static struct idset *slow_subchannel_set; |
325 | css_rescan_devices(struct subchannel_id schid, void *data) | 325 | static spinlock_t slow_subchannel_lock; |
326 | |||
327 | static int __init slow_subchannel_init(void) | ||
326 | { | 328 | { |
327 | return css_evaluate_subchannel(schid, 1); | 329 | spin_lock_init(&slow_subchannel_lock); |
330 | slow_subchannel_set = idset_sch_new(); | ||
331 | if (!slow_subchannel_set) { | ||
332 | printk(KERN_WARNING "cio: could not allocate slow subchannel " | ||
333 | "set\n"); | ||
334 | return -ENOMEM; | ||
335 | } | ||
336 | return 0; | ||
328 | } | 337 | } |
329 | 338 | ||
330 | struct slow_subchannel { | 339 | subsys_initcall(slow_subchannel_init); |
331 | struct list_head slow_list; | ||
332 | struct subchannel_id schid; | ||
333 | }; | ||
334 | |||
335 | static LIST_HEAD(slow_subchannels_head); | ||
336 | static DEFINE_SPINLOCK(slow_subchannel_lock); | ||
337 | 340 | ||
338 | static void | 341 | static void css_slow_path_func(struct work_struct *unused) |
339 | css_trigger_slow_path(struct work_struct *unused) | ||
340 | { | 342 | { |
341 | CIO_TRACE_EVENT(4, "slowpath"); | 343 | struct subchannel_id schid; |
342 | |||
343 | if (need_rescan) { | ||
344 | need_rescan = 0; | ||
345 | for_each_subchannel(css_rescan_devices, NULL); | ||
346 | return; | ||
347 | } | ||
348 | 344 | ||
345 | CIO_TRACE_EVENT(4, "slowpath"); | ||
349 | spin_lock_irq(&slow_subchannel_lock); | 346 | spin_lock_irq(&slow_subchannel_lock); |
350 | while (!list_empty(&slow_subchannels_head)) { | 347 | init_subchannel_id(&schid); |
351 | struct slow_subchannel *slow_sch = | 348 | while (idset_sch_get_first(slow_subchannel_set, &schid)) { |
352 | list_entry(slow_subchannels_head.next, | 349 | idset_sch_del(slow_subchannel_set, schid); |
353 | struct slow_subchannel, slow_list); | ||
354 | |||
355 | list_del_init(slow_subchannels_head.next); | ||
356 | spin_unlock_irq(&slow_subchannel_lock); | 350 | spin_unlock_irq(&slow_subchannel_lock); |
357 | css_evaluate_subchannel(slow_sch->schid, 1); | 351 | css_evaluate_subchannel(schid, 1); |
358 | spin_lock_irq(&slow_subchannel_lock); | 352 | spin_lock_irq(&slow_subchannel_lock); |
359 | kfree(slow_sch); | ||
360 | } | 353 | } |
361 | spin_unlock_irq(&slow_subchannel_lock); | 354 | spin_unlock_irq(&slow_subchannel_lock); |
362 | } | 355 | } |
363 | 356 | ||
364 | DECLARE_WORK(slow_path_work, css_trigger_slow_path); | 357 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
365 | struct workqueue_struct *slow_path_wq; | 358 | struct workqueue_struct *slow_path_wq; |
366 | 359 | ||
360 | void css_schedule_eval(struct subchannel_id schid) | ||
361 | { | ||
362 | unsigned long flags; | ||
363 | |||
364 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
365 | idset_sch_add(slow_subchannel_set, schid); | ||
366 | queue_work(slow_path_wq, &slow_path_work); | ||
367 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
368 | } | ||
369 | |||
370 | void css_schedule_eval_all(void) | ||
371 | { | ||
372 | unsigned long flags; | ||
373 | |||
374 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
375 | idset_fill(slow_subchannel_set); | ||
376 | queue_work(slow_path_wq, &slow_path_work); | ||
377 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
378 | } | ||
379 | |||
367 | /* Reprobe subchannel if unregistered. */ | 380 | /* Reprobe subchannel if unregistered. */ |
368 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | 381 | static int reprobe_subchannel(struct subchannel_id schid, void *data) |
369 | { | 382 | { |
@@ -426,33 +439,14 @@ void css_schedule_reprobe(void) | |||
426 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 439 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
427 | 440 | ||
428 | /* | 441 | /* |
429 | * Rescan for new devices. FIXME: This is slow. | ||
430 | * This function is called when we have lost CRWs due to overflows and we have | ||
431 | * to do subchannel housekeeping. | ||
432 | */ | ||
433 | void | ||
434 | css_reiterate_subchannels(void) | ||
435 | { | ||
436 | css_clear_subchannel_slow_list(); | ||
437 | need_rescan = 1; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Called from the machine check handler for subchannel report words. | 442 | * Called from the machine check handler for subchannel report words. |
442 | */ | 443 | */ |
443 | int | 444 | void css_process_crw(int rsid1, int rsid2) |
444 | css_process_crw(int rsid1, int rsid2) | ||
445 | { | 445 | { |
446 | int ret; | ||
447 | struct subchannel_id mchk_schid; | 446 | struct subchannel_id mchk_schid; |
448 | 447 | ||
449 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", | 448 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", |
450 | rsid1, rsid2); | 449 | rsid1, rsid2); |
451 | |||
452 | if (need_rescan) | ||
453 | /* We need to iterate all subchannels anyway. */ | ||
454 | return -EAGAIN; | ||
455 | |||
456 | init_subchannel_id(&mchk_schid); | 450 | init_subchannel_id(&mchk_schid); |
457 | mchk_schid.sch_no = rsid1; | 451 | mchk_schid.sch_no = rsid1; |
458 | if (rsid2 != 0) | 452 | if (rsid2 != 0) |
@@ -463,14 +457,7 @@ css_process_crw(int rsid1, int rsid2) | |||
463 | * use stsch() to find out if the subchannel in question has come | 457 | * use stsch() to find out if the subchannel in question has come |
464 | * or gone. | 458 | * or gone. |
465 | */ | 459 | */ |
466 | ret = css_evaluate_subchannel(mchk_schid, 0); | 460 | css_evaluate_subchannel(mchk_schid, 0); |
467 | if (ret == -EAGAIN) { | ||
468 | if (css_enqueue_subchannel_slow(mchk_schid)) { | ||
469 | css_clear_subchannel_slow_list(); | ||
470 | need_rescan = 1; | ||
471 | } | ||
472 | } | ||
473 | return ret; | ||
474 | } | 461 | } |
475 | 462 | ||
476 | static int __init | 463 | static int __init |
@@ -745,47 +732,6 @@ struct bus_type css_bus_type = { | |||
745 | 732 | ||
746 | subsys_initcall(init_channel_subsystem); | 733 | subsys_initcall(init_channel_subsystem); |
747 | 734 | ||
748 | int | ||
749 | css_enqueue_subchannel_slow(struct subchannel_id schid) | ||
750 | { | ||
751 | struct slow_subchannel *new_slow_sch; | ||
752 | unsigned long flags; | ||
753 | |||
754 | new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); | ||
755 | if (!new_slow_sch) | ||
756 | return -ENOMEM; | ||
757 | new_slow_sch->schid = schid; | ||
758 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
759 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); | ||
760 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
761 | return 0; | ||
762 | } | ||
763 | |||
764 | void | ||
765 | css_clear_subchannel_slow_list(void) | ||
766 | { | ||
767 | unsigned long flags; | ||
768 | |||
769 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
770 | while (!list_empty(&slow_subchannels_head)) { | ||
771 | struct slow_subchannel *slow_sch = | ||
772 | list_entry(slow_subchannels_head.next, | ||
773 | struct slow_subchannel, slow_list); | ||
774 | |||
775 | list_del_init(slow_subchannels_head.next); | ||
776 | kfree(slow_sch); | ||
777 | } | ||
778 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
779 | } | ||
780 | |||
781 | |||
782 | |||
783 | int | ||
784 | css_slow_subchannels_exist(void) | ||
785 | { | ||
786 | return (!list_empty(&slow_subchannels_head)); | ||
787 | } | ||
788 | |||
789 | MODULE_LICENSE("GPL"); | 735 | MODULE_LICENSE("GPL"); |
790 | EXPORT_SYMBOL(css_bus_type); | 736 | EXPORT_SYMBOL(css_bus_type); |
791 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | 737 | EXPORT_SYMBOL_GPL(css_characteristics_avail); |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index b2b1a265c602..4b3133a7bae1 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -146,7 +146,7 @@ extern void css_sch_device_unregister(struct subchannel *); | |||
146 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 146 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); |
147 | extern int css_init_done; | 147 | extern int css_init_done; |
148 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 148 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
149 | extern int css_process_crw(int, int); | 149 | extern void css_process_crw(int, int); |
150 | extern void css_reiterate_subchannels(void); | 150 | extern void css_reiterate_subchannels(void); |
151 | 151 | ||
152 | #define __MAX_SUBCHANNEL 65535 | 152 | #define __MAX_SUBCHANNEL 65535 |
@@ -186,16 +186,12 @@ int device_trigger_verify(struct subchannel *sch); | |||
186 | void device_kill_pending_timer(struct subchannel *); | 186 | void device_kill_pending_timer(struct subchannel *); |
187 | 187 | ||
188 | /* Helper functions to build lists for the slow path. */ | 188 | /* Helper functions to build lists for the slow path. */ |
189 | extern int css_enqueue_subchannel_slow(struct subchannel_id schid); | 189 | void css_schedule_eval(struct subchannel_id schid); |
190 | void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); | 190 | void css_schedule_eval_all(void); |
191 | void css_clear_subchannel_slow_list(void); | ||
192 | int css_slow_subchannels_exist(void); | ||
193 | extern int need_rescan; | ||
194 | 191 | ||
195 | int sch_is_pseudo_sch(struct subchannel *); | 192 | int sch_is_pseudo_sch(struct subchannel *); |
196 | 193 | ||
197 | extern struct workqueue_struct *slow_path_wq; | 194 | extern struct workqueue_struct *slow_path_wq; |
198 | extern struct work_struct slow_path_work; | ||
199 | 195 | ||
200 | int subchannel_add_files (struct device *); | 196 | int subchannel_add_files (struct device *); |
201 | extern struct attribute_group *subch_attr_groups[]; | 197 | extern struct attribute_group *subch_attr_groups[]; |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index d6226881d0df..898ec3b2bebb 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -222,10 +222,8 @@ __recover_lost_chpids(struct subchannel *sch, int old_lpm) | |||
222 | if (old_lpm & mask) | 222 | if (old_lpm & mask) |
223 | continue; | 223 | continue; |
224 | chpid.id = sch->schib.pmcw.chpid[i]; | 224 | chpid.id = sch->schib.pmcw.chpid[i]; |
225 | if (!chp_is_registered(chpid)) { | 225 | if (!chp_is_registered(chpid)) |
226 | need_rescan = 1; | 226 | css_schedule_eval_all(); |
227 | queue_work(slow_path_wq, &slow_path_work); | ||
228 | } | ||
229 | } | 227 | } |
230 | } | 228 | } |
231 | 229 | ||
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c new file mode 100644 index 000000000000..16ea828e99f7 --- /dev/null +++ b/drivers/s390/cio/idset.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/idset.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/slab.h> | ||
9 | #include <asm/bitops.h> | ||
10 | #include "idset.h" | ||
11 | #include "css.h" | ||
12 | |||
13 | struct idset { | ||
14 | int num_ssid; | ||
15 | int num_id; | ||
16 | unsigned long bitmap[0]; | ||
17 | }; | ||
18 | |||
19 | static inline unsigned long bitmap_size(int num_ssid, int num_id) | ||
20 | { | ||
21 | return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); | ||
22 | } | ||
23 | |||
24 | static struct idset *idset_new(int num_ssid, int num_id) | ||
25 | { | ||
26 | struct idset *set; | ||
27 | |||
28 | set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id), | ||
29 | GFP_KERNEL); | ||
30 | if (set) { | ||
31 | set->num_ssid = num_ssid; | ||
32 | set->num_id = num_id; | ||
33 | } | ||
34 | return set; | ||
35 | } | ||
36 | |||
37 | void idset_free(struct idset *set) | ||
38 | { | ||
39 | kfree(set); | ||
40 | } | ||
41 | |||
42 | void idset_clear(struct idset *set) | ||
43 | { | ||
44 | memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id)); | ||
45 | } | ||
46 | |||
47 | void idset_fill(struct idset *set) | ||
48 | { | ||
49 | memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); | ||
50 | } | ||
51 | |||
52 | static inline void idset_add(struct idset *set, int ssid, int id) | ||
53 | { | ||
54 | set_bit(ssid * set->num_id + id, set->bitmap); | ||
55 | } | ||
56 | |||
57 | static inline void idset_del(struct idset *set, int ssid, int id) | ||
58 | { | ||
59 | clear_bit(ssid * set->num_id + id, set->bitmap); | ||
60 | } | ||
61 | |||
62 | static inline int idset_contains(struct idset *set, int ssid, int id) | ||
63 | { | ||
64 | return test_bit(ssid * set->num_id + id, set->bitmap); | ||
65 | } | ||
66 | |||
67 | static inline int idset_get_first(struct idset *set, int *ssid, int *id) | ||
68 | { | ||
69 | int bitnum; | ||
70 | |||
71 | bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); | ||
72 | if (bitnum >= set->num_ssid * set->num_id) | ||
73 | return 0; | ||
74 | *ssid = bitnum / set->num_id; | ||
75 | *id = bitnum % set->num_id; | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | struct idset *idset_sch_new(void) | ||
80 | { | ||
81 | return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1); | ||
82 | } | ||
83 | |||
84 | void idset_sch_add(struct idset *set, struct subchannel_id schid) | ||
85 | { | ||
86 | idset_add(set, schid.ssid, schid.sch_no); | ||
87 | } | ||
88 | |||
89 | void idset_sch_del(struct idset *set, struct subchannel_id schid) | ||
90 | { | ||
91 | idset_del(set, schid.ssid, schid.sch_no); | ||
92 | } | ||
93 | |||
94 | int idset_sch_contains(struct idset *set, struct subchannel_id schid) | ||
95 | { | ||
96 | return idset_contains(set, schid.ssid, schid.sch_no); | ||
97 | } | ||
98 | |||
99 | int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) | ||
100 | { | ||
101 | int ssid = 0; | ||
102 | int id = 0; | ||
103 | int rc; | ||
104 | |||
105 | rc = idset_get_first(set, &ssid, &id); | ||
106 | if (rc) { | ||
107 | init_subchannel_id(schid); | ||
108 | schid->ssid = ssid; | ||
109 | schid->sch_no = id; | ||
110 | } | ||
111 | return rc; | ||
112 | } | ||
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h new file mode 100644 index 000000000000..144466ab8c15 --- /dev/null +++ b/drivers/s390/cio/idset.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/idset.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef S390_IDSET_H | ||
9 | #define S390_IDSET_H S390_IDSET_H | ||
10 | |||
11 | #include "schid.h" | ||
12 | |||
13 | struct idset; | ||
14 | |||
15 | void idset_free(struct idset *set); | ||
16 | void idset_clear(struct idset *set); | ||
17 | void idset_fill(struct idset *set); | ||
18 | |||
19 | struct idset *idset_sch_new(void); | ||
20 | void idset_sch_add(struct idset *set, struct subchannel_id id); | ||
21 | void idset_sch_del(struct idset *set, struct subchannel_id id); | ||
22 | int idset_sch_contains(struct idset *set, struct subchannel_id id); | ||
23 | int idset_sch_get_first(struct idset *set, struct subchannel_id *id); | ||
24 | |||
25 | #endif /* S390_IDSET_H */ | ||
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index afd8a3c0f8d6..644a06eba828 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -45,14 +45,13 @@ static int | |||
45 | s390_collect_crw_info(void *param) | 45 | s390_collect_crw_info(void *param) |
46 | { | 46 | { |
47 | struct crw crw[2]; | 47 | struct crw crw[2]; |
48 | int ccode, ret, slow; | 48 | int ccode; |
49 | struct semaphore *sem; | 49 | struct semaphore *sem; |
50 | unsigned int chain; | 50 | unsigned int chain; |
51 | 51 | ||
52 | sem = (struct semaphore *)param; | 52 | sem = (struct semaphore *)param; |
53 | repeat: | 53 | repeat: |
54 | down_interruptible(sem); | 54 | down_interruptible(sem); |
55 | slow = 0; | ||
56 | chain = 0; | 55 | chain = 0; |
57 | while (1) { | 56 | while (1) { |
58 | if (unlikely(chain > 1)) { | 57 | if (unlikely(chain > 1)) { |
@@ -85,9 +84,8 @@ repeat: | |||
85 | /* Check for overflows. */ | 84 | /* Check for overflows. */ |
86 | if (crw[chain].oflw) { | 85 | if (crw[chain].oflw) { |
87 | pr_debug("%s: crw overflow detected!\n", __FUNCTION__); | 86 | pr_debug("%s: crw overflow detected!\n", __FUNCTION__); |
88 | css_reiterate_subchannels(); | 87 | css_schedule_eval_all(); |
89 | chain = 0; | 88 | chain = 0; |
90 | slow = 1; | ||
91 | continue; | 89 | continue; |
92 | } | 90 | } |
93 | switch (crw[chain].rsc) { | 91 | switch (crw[chain].rsc) { |
@@ -95,10 +93,7 @@ repeat: | |||
95 | if (crw[0].chn && !chain) | 93 | if (crw[0].chn && !chain) |
96 | break; | 94 | break; |
97 | pr_debug("source is subchannel %04X\n", crw[0].rsid); | 95 | pr_debug("source is subchannel %04X\n", crw[0].rsid); |
98 | ret = css_process_crw (crw[0].rsid, | 96 | css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0); |
99 | chain ? crw[1].rsid : 0); | ||
100 | if (ret == -EAGAIN) | ||
101 | slow = 1; | ||
102 | break; | 97 | break; |
103 | case CRW_RSC_MONITOR: | 98 | case CRW_RSC_MONITOR: |
104 | pr_debug("source is monitoring facility\n"); | 99 | pr_debug("source is monitoring facility\n"); |
@@ -117,28 +112,23 @@ repeat: | |||
117 | } | 112 | } |
118 | switch (crw[0].erc) { | 113 | switch (crw[0].erc) { |
119 | case CRW_ERC_IPARM: /* Path has come. */ | 114 | case CRW_ERC_IPARM: /* Path has come. */ |
120 | ret = chp_process_crw(crw[0].rsid, 1); | 115 | chp_process_crw(crw[0].rsid, 1); |
121 | break; | 116 | break; |
122 | case CRW_ERC_PERRI: /* Path has gone. */ | 117 | case CRW_ERC_PERRI: /* Path has gone. */ |
123 | case CRW_ERC_PERRN: | 118 | case CRW_ERC_PERRN: |
124 | ret = chp_process_crw(crw[0].rsid, 0); | 119 | chp_process_crw(crw[0].rsid, 0); |
125 | break; | 120 | break; |
126 | default: | 121 | default: |
127 | pr_debug("Don't know how to handle erc=%x\n", | 122 | pr_debug("Don't know how to handle erc=%x\n", |
128 | crw[0].erc); | 123 | crw[0].erc); |
129 | ret = 0; | ||
130 | } | 124 | } |
131 | if (ret == -EAGAIN) | ||
132 | slow = 1; | ||
133 | break; | 125 | break; |
134 | case CRW_RSC_CONFIG: | 126 | case CRW_RSC_CONFIG: |
135 | pr_debug("source is configuration-alert facility\n"); | 127 | pr_debug("source is configuration-alert facility\n"); |
136 | break; | 128 | break; |
137 | case CRW_RSC_CSS: | 129 | case CRW_RSC_CSS: |
138 | pr_debug("source is channel subsystem\n"); | 130 | pr_debug("source is channel subsystem\n"); |
139 | ret = chsc_process_crw(); | 131 | chsc_process_crw(); |
140 | if (ret == -EAGAIN) | ||
141 | slow = 1; | ||
142 | break; | 132 | break; |
143 | default: | 133 | default: |
144 | pr_debug("unknown source\n"); | 134 | pr_debug("unknown source\n"); |
@@ -147,8 +137,6 @@ repeat: | |||
147 | /* chain is always 0 or 1 here. */ | 137 | /* chain is always 0 or 1 here. */ |
148 | chain = crw[chain].chn ? chain + 1 : 0; | 138 | chain = crw[chain].chn ? chain + 1 : 0; |
149 | } | 139 | } |
150 | if (slow) | ||
151 | queue_work(slow_path_wq, &slow_path_work); | ||
152 | goto repeat; | 140 | goto repeat; |
153 | return 0; | 141 | return 0; |
154 | } | 142 | } |