aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorPeter Oberparleiter <peter.oberparleiter@de.ibm.com>2007-04-27 10:01:34 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-04-27 10:01:40 -0400
commit83b3370c79b91b9be3f6540c3c914e689134b45f (patch)
treead7c062b260c0259c74e45ff869208c1ad139629 /drivers/s390/cio
parent387b734fc2b55f776b192c7afdfd892ba42347d4 (diff)
[S390] cio: replace subchannel evaluation queue with bitmap
Use a bitmap for indicating which subchannels require evaluation instead of allocating memory for each evaluation request. This approach reduces memory consumption during recovery in case of massive evaluation request occurrence and removes the need for memory allocation failure handling. Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chp.c8
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c130
-rw-r--r--drivers/s390/cio/chsc.h4
-rw-r--r--drivers/s390/cio/css.c148
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/cio/device_fsm.c6
-rw-r--r--drivers/s390/cio/idset.c112
-rw-r--r--drivers/s390/cio/idset.h25
10 files changed, 229 insertions, 218 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index fe7b3ffa1eaa..cfaf77b320f5 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 6ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 7ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 8obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 0e92c8c89860..ac289e6eadfe 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -491,7 +491,7 @@ void *chp_get_chp_desc(struct chp_id chpid)
491 * Handle channel-report-words indicating that the status of a channel-path 491 * Handle channel-report-words indicating that the status of a channel-path
492 * has changed. 492 * has changed.
493 */ 493 */
494int chp_process_crw(int id, int status) 494void chp_process_crw(int id, int status)
495{ 495{
496 struct chp_id chpid; 496 struct chp_id chpid;
497 497
@@ -500,11 +500,9 @@ int chp_process_crw(int id, int status)
500 if (status) { 500 if (status) {
501 if (!chp_is_registered(chpid)) 501 if (!chp_is_registered(chpid))
502 chp_new(chpid); 502 chp_new(chpid);
503 return chsc_chp_online(chpid); 503 chsc_chp_online(chpid);
504 } else { 504 } else
505 chsc_chp_offline(chpid); 505 chsc_chp_offline(chpid);
506 return 0;
507 }
508} 506}
509 507
510static inline int info_bit_num(struct chp_id id) 508static inline int info_bit_num(struct chp_id id)
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 862af69d9707..65286563c592 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -42,7 +42,7 @@ int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch); 42u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid); 43int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid); 44void *chp_get_chp_desc(struct chp_id chpid);
45int chp_process_crw(int id, int available); 45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp); 46void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp); 47int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid); 48int chp_new(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 02615eb43984..89a130a62654 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -195,12 +195,8 @@ static void terminate_internal_io(struct subchannel *sch)
195 if (cio_clear(sch)) { 195 if (cio_clear(sch)) {
196 /* Recheck device in case clear failed. */ 196 /* Recheck device in case clear failed. */
197 sch->lpm = 0; 197 sch->lpm = 0;
198 if (device_trigger_verify(sch) != 0) { 198 if (device_trigger_verify(sch) != 0)
199 if(css_enqueue_subchannel_slow(sch->schid)) { 199 css_schedule_eval(sch->schid);
200 css_clear_subchannel_slow_list();
201 need_rescan = 1;
202 }
203 }
204 return; 200 return;
205 } 201 }
206 /* Request retry of internal operation. */ 202 /* Request retry of internal operation. */
@@ -262,11 +258,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
262 258
263out_unreg: 259out_unreg:
264 sch->lpm = 0; 260 sch->lpm = 0;
265 if (css_enqueue_subchannel_slow(sch->schid)) {
266 css_clear_subchannel_slow_list();
267 need_rescan = 1;
268 }
269 spin_unlock_irq(sch->lock); 261 spin_unlock_irq(sch->lock);
262 css_schedule_eval(sch->schid);
270 return 0; 263 return 0;
271} 264}
272 265
@@ -281,9 +274,6 @@ void chsc_chp_offline(struct chp_id chpid)
281 return; 274 return;
282 bus_for_each_dev(&css_bus_type, NULL, &chpid, 275 bus_for_each_dev(&css_bus_type, NULL, &chpid,
283 s390_subchannel_remove_chpid); 276 s390_subchannel_remove_chpid);
284
285 if (need_rescan || css_slow_subchannels_exist())
286 queue_work(slow_path_wq, &slow_path_work);
287} 277}
288 278
289struct res_acc_data { 279struct res_acc_data {
@@ -331,7 +321,6 @@ static int
331s390_process_res_acc_new_sch(struct subchannel_id schid) 321s390_process_res_acc_new_sch(struct subchannel_id schid)
332{ 322{
333 struct schib schib; 323 struct schib schib;
334 int ret;
335 /* 324 /*
336 * We don't know the device yet, but since a path 325 * We don't know the device yet, but since a path
337 * may be available now to the device we'll have 326 * may be available now to the device we'll have
@@ -342,15 +331,10 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
342 */ 331 */
343 if (stsch_err(schid, &schib)) 332 if (stsch_err(schid, &schib))
344 /* We're through */ 333 /* We're through */
345 return need_rescan ? -EAGAIN : -ENXIO; 334 return -ENXIO;
346 335
347 /* Put it on the slow path. */ 336 /* Put it on the slow path. */
348 ret = css_enqueue_subchannel_slow(schid); 337 css_schedule_eval(schid);
349 if (ret) {
350 css_clear_subchannel_slow_list();
351 need_rescan = 1;
352 return -EAGAIN;
353 }
354 return 0; 338 return 0;
355} 339}
356 340
@@ -392,10 +376,8 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
392} 376}
393 377
394 378
395static int 379static void s390_process_res_acc (struct res_acc_data *res_data)
396s390_process_res_acc (struct res_acc_data *res_data)
397{ 380{
398 int rc;
399 char dbf_txt[15]; 381 char dbf_txt[15];
400 382
401 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, 383 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
@@ -413,12 +395,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
413 * The more information we have (info), the less scanning 395 * The more information we have (info), the less scanning
414 * will we have to do. 396 * will we have to do.
415 */ 397 */
416 rc = for_each_subchannel(__s390_process_res_acc, res_data); 398 for_each_subchannel(__s390_process_res_acc, res_data);
417 if (css_slow_subchannels_exist())
418 rc = -EAGAIN;
419 else if (rc != -EAGAIN)
420 rc = 0;
421 return rc;
422} 399}
423 400
424static int 401static int
@@ -470,7 +447,7 @@ struct chsc_sei_area {
470 /* ccdf has to be big enough for a link-incident record */ 447 /* ccdf has to be big enough for a link-incident record */
471} __attribute__ ((packed)); 448} __attribute__ ((packed));
472 449
473static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 450static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
474{ 451{
475 struct chp_id chpid; 452 struct chp_id chpid;
476 int id; 453 int id;
@@ -478,7 +455,7 @@ static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
478 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 455 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
479 sei_area->rs, sei_area->rsid); 456 sei_area->rs, sei_area->rsid);
480 if (sei_area->rs != 4) 457 if (sei_area->rs != 4)
481 return 0; 458 return;
482 id = __get_chpid_from_lir(sei_area->ccdf); 459 id = __get_chpid_from_lir(sei_area->ccdf);
483 if (id < 0) 460 if (id < 0)
484 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 461 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
@@ -487,21 +464,18 @@ static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
487 chpid.id = id; 464 chpid.id = id;
488 chsc_chp_offline(chpid); 465 chsc_chp_offline(chpid);
489 } 466 }
490
491 return 0;
492} 467}
493 468
494static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 469static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
495{ 470{
496 struct res_acc_data res_data; 471 struct res_acc_data res_data;
497 struct chp_id chpid; 472 struct chp_id chpid;
498 int status; 473 int status;
499 int rc;
500 474
501 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 475 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
502 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 476 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
503 if (sei_area->rs != 4) 477 if (sei_area->rs != 4)
504 return 0; 478 return;
505 chp_id_init(&chpid); 479 chp_id_init(&chpid);
506 chpid.id = sei_area->rsid; 480 chpid.id = sei_area->rsid;
507 /* allocate a new channel path structure, if needed */ 481 /* allocate a new channel path structure, if needed */
@@ -509,7 +483,7 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
509 if (status < 0) 483 if (status < 0)
510 chp_new(chpid); 484 chp_new(chpid);
511 else if (!status) 485 else if (!status)
512 return 0; 486 return;
513 memset(&res_data, 0, sizeof(struct res_acc_data)); 487 memset(&res_data, 0, sizeof(struct res_acc_data));
514 res_data.chpid = chpid; 488 res_data.chpid = chpid;
515 if ((sei_area->vf & 0xc0) != 0) { 489 if ((sei_area->vf & 0xc0) != 0) {
@@ -521,9 +495,7 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
521 /* link address */ 495 /* link address */
522 res_data.fla_mask = 0xff00; 496 res_data.fla_mask = 0xff00;
523 } 497 }
524 rc = s390_process_res_acc(&res_data); 498 s390_process_res_acc(&res_data);
525
526 return rc;
527} 499}
528 500
529struct chp_config_data { 501struct chp_config_data {
@@ -532,7 +504,7 @@ struct chp_config_data {
532 u8 pc; 504 u8 pc;
533}; 505};
534 506
535static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 507static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
536{ 508{
537 struct chp_config_data *data; 509 struct chp_config_data *data;
538 struct chp_id chpid; 510 struct chp_id chpid;
@@ -540,7 +512,7 @@ static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
540 512
541 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 513 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
542 if (sei_area->rs != 0) 514 if (sei_area->rs != 0)
543 return 0; 515 return;
544 data = (struct chp_config_data *) &(sei_area->ccdf); 516 data = (struct chp_config_data *) &(sei_area->ccdf);
545 chp_id_init(&chpid); 517 chp_id_init(&chpid);
546 for (num = 0; num <= __MAX_CHPID; num++) { 518 for (num = 0; num <= __MAX_CHPID; num++) {
@@ -561,52 +533,44 @@ static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
561 break; 533 break;
562 } 534 }
563 } 535 }
564
565 return 0;
566} 536}
567 537
568static int chsc_process_sei(struct chsc_sei_area *sei_area) 538static void chsc_process_sei(struct chsc_sei_area *sei_area)
569{ 539{
570 int rc;
571
572 /* Check if we might have lost some information. */ 540 /* Check if we might have lost some information. */
573 if (sei_area->flags & 0x40) 541 if (sei_area->flags & 0x40) {
574 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 542 CIO_CRW_EVENT(2, "chsc: event overflow\n");
543 css_schedule_eval_all();
544 }
575 /* which kind of information was stored? */ 545 /* which kind of information was stored? */
576 rc = 0;
577 switch (sei_area->cc) { 546 switch (sei_area->cc) {
578 case 1: /* link incident*/ 547 case 1: /* link incident*/
579 rc = chsc_process_sei_link_incident(sei_area); 548 chsc_process_sei_link_incident(sei_area);
580 break; 549 break;
581 case 2: /* i/o resource accessibiliy */ 550 case 2: /* i/o resource accessibiliy */
582 rc = chsc_process_sei_res_acc(sei_area); 551 chsc_process_sei_res_acc(sei_area);
583 break; 552 break;
584 case 8: /* channel-path-configuration notification */ 553 case 8: /* channel-path-configuration notification */
585 rc = chsc_process_sei_chp_config(sei_area); 554 chsc_process_sei_chp_config(sei_area);
586 break; 555 break;
587 default: /* other stuff */ 556 default: /* other stuff */
588 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 557 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
589 sei_area->cc); 558 sei_area->cc);
590 break; 559 break;
591 } 560 }
592
593 return rc;
594} 561}
595 562
596int chsc_process_crw(void) 563void chsc_process_crw(void)
597{ 564{
598 struct chsc_sei_area *sei_area; 565 struct chsc_sei_area *sei_area;
599 int ret;
600 int rc;
601 566
602 if (!sei_page) 567 if (!sei_page)
603 return 0; 568 return;
604 /* Access to sei_page is serialized through machine check handler 569 /* Access to sei_page is serialized through machine check handler
605 * thread, so no need for locking. */ 570 * thread, so no need for locking. */
606 sei_area = sei_page; 571 sei_area = sei_page;
607 572
608 CIO_TRACE_EVENT( 2, "prcss"); 573 CIO_TRACE_EVENT( 2, "prcss");
609 ret = 0;
610 do { 574 do {
611 memset(sei_area, 0, sizeof(*sei_area)); 575 memset(sei_area, 0, sizeof(*sei_area));
612 sei_area->request.length = 0x0010; 576 sei_area->request.length = 0x0010;
@@ -616,37 +580,26 @@ int chsc_process_crw(void)
616 580
617 if (sei_area->response.code == 0x0001) { 581 if (sei_area->response.code == 0x0001) {
618 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 582 CIO_CRW_EVENT(4, "chsc: sei successful\n");
619 rc = chsc_process_sei(sei_area); 583 chsc_process_sei(sei_area);
620 if (rc)
621 ret = rc;
622 } else { 584 } else {
623 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 585 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
624 sei_area->response.code); 586 sei_area->response.code);
625 ret = 0;
626 break; 587 break;
627 } 588 }
628 } while (sei_area->flags & 0x80); 589 } while (sei_area->flags & 0x80);
629
630 return ret;
631} 590}
632 591
633static int 592static int
634__chp_add_new_sch(struct subchannel_id schid) 593__chp_add_new_sch(struct subchannel_id schid)
635{ 594{
636 struct schib schib; 595 struct schib schib;
637 int ret;
638 596
639 if (stsch_err(schid, &schib)) 597 if (stsch_err(schid, &schib))
640 /* We're through */ 598 /* We're through */
641 return need_rescan ? -EAGAIN : -ENXIO; 599 return -ENXIO;
642 600
643 /* Put it on the slow path. */ 601 /* Put it on the slow path. */
644 ret = css_enqueue_subchannel_slow(schid); 602 css_schedule_eval(schid);
645 if (ret) {
646 css_clear_subchannel_slow_list();
647 need_rescan = 1;
648 return -EAGAIN;
649 }
650 return 0; 603 return 0;
651} 604}
652 605
@@ -693,22 +646,15 @@ __chp_add(struct subchannel_id schid, void *data)
693 return 0; 646 return 0;
694} 647}
695 648
696int chsc_chp_online(struct chp_id chpid) 649void chsc_chp_online(struct chp_id chpid)
697{ 650{
698 int rc;
699 char dbf_txt[15]; 651 char dbf_txt[15];
700 652
701 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 653 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
702 CIO_TRACE_EVENT(2, dbf_txt); 654 CIO_TRACE_EVENT(2, dbf_txt);
703 655
704 if (chp_get_status(chpid) == 0) 656 if (chp_get_status(chpid) != 0)
705 return 0; 657 for_each_subchannel(__chp_add, &chpid);
706 rc = for_each_subchannel(__chp_add, &chpid);
707 if (css_slow_subchannels_exist())
708 rc = -EAGAIN;
709 if (rc != -EAGAIN)
710 rc = 0;
711 return rc;
712} 658}
713 659
714static void __s390_subchannel_vary_chpid(struct subchannel *sch, 660static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@@ -749,12 +695,8 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
749 sch->driver->verify(&sch->dev); 695 sch->driver->verify(&sch->dev);
750 } 696 }
751 } else if (!sch->lpm) { 697 } else if (!sch->lpm) {
752 if (device_trigger_verify(sch) != 0) { 698 if (device_trigger_verify(sch) != 0)
753 if (css_enqueue_subchannel_slow(sch->schid)) { 699 css_schedule_eval(sch->schid);
754 css_clear_subchannel_slow_list();
755 need_rescan = 1;
756 }
757 }
758 } else if (sch->driver && sch->driver->verify) 700 } else if (sch->driver && sch->driver->verify)
759 sch->driver->verify(&sch->dev); 701 sch->driver->verify(&sch->dev);
760 break; 702 break;
@@ -801,11 +743,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
801 /* We're through */ 743 /* We're through */
802 return -ENXIO; 744 return -ENXIO;
803 /* Put it on the slow path. */ 745 /* Put it on the slow path. */
804 if (css_enqueue_subchannel_slow(schid)) { 746 css_schedule_eval(schid);
805 css_clear_subchannel_slow_list();
806 need_rescan = 1;
807 return -EAGAIN;
808 }
809 return 0; 747 return 0;
810} 748}
811 749
@@ -826,8 +764,6 @@ int chsc_chp_vary(struct chp_id chpid, int on)
826 if (on) 764 if (on)
827 /* Scan for new devices on varied on path. */ 765 /* Scan for new devices on varied on path. */
828 for_each_subchannel(__s390_vary_chpid_on, NULL); 766 for_each_subchannel(__s390_vary_chpid_on, NULL);
829 if (need_rescan || css_slow_subchannels_exist())
830 queue_work(slow_path_wq, &slow_path_work);
831 return 0; 767 return 0;
832} 768}
833 769
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 322586f27cc0..742ef57d2c58 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -36,7 +36,7 @@ struct channel_path_desc {
36struct channel_path; 36struct channel_path;
37 37
38extern int css_get_ssd_info(struct subchannel *); 38extern int css_get_ssd_info(struct subchannel *);
39extern int chsc_process_crw(void); 39extern void chsc_process_crw(void);
40 40
41struct css_general_char { 41struct css_general_char {
42 u64 : 41; 42 u64 : 41;
@@ -79,7 +79,7 @@ extern int chsc_secm(struct channel_subsystem *, int);
79int chsc_chp_vary(struct chp_id chpid, int on); 79int chsc_chp_vary(struct chp_id chpid, int on);
80int chsc_determine_channel_path_description(struct chp_id chpid, 80int chsc_determine_channel_path_description(struct chp_id chpid,
81 struct channel_path_desc *desc); 81 struct channel_path_desc *desc);
82int chsc_chp_online(struct chp_id chpid); 82void chsc_chp_online(struct chp_id chpid);
83void chsc_chp_offline(struct chp_id chpid); 83void chsc_chp_offline(struct chp_id chpid);
84int chsc_get_channel_measurement_chars(struct channel_path *chp); 84int chsc_get_channel_measurement_chars(struct channel_path *chp);
85 85
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fe0ace7aece8..fcc641e578f4 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,8 +20,8 @@
20#include "ioasm.h" 20#include "ioasm.h"
21#include "chsc.h" 21#include "chsc.h"
22#include "device.h" 22#include "device.h"
23#include "idset.h"
23 24
24int need_rescan = 0;
25int css_init_done = 0; 25int css_init_done = 0;
26static int need_reprobe = 0; 26static int need_reprobe = 0;
27static int max_ssid = 0; 27static int max_ssid = 0;
@@ -306,7 +306,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
306 return css_probe_device(schid); 306 return css_probe_device(schid);
307} 307}
308 308
309static int css_evaluate_subchannel(struct subchannel_id schid, int slow) 309static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
310{ 310{
311 struct subchannel *sch; 311 struct subchannel *sch;
312 int ret; 312 int ret;
@@ -317,53 +317,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
317 put_device(&sch->dev); 317 put_device(&sch->dev);
318 } else 318 } else
319 ret = css_evaluate_new_subchannel(schid, slow); 319 ret = css_evaluate_new_subchannel(schid, slow);
320 320 if (ret == -EAGAIN)
321 return ret; 321 css_schedule_eval(schid);
322} 322}
323 323
324static int 324static struct idset *slow_subchannel_set;
325css_rescan_devices(struct subchannel_id schid, void *data) 325static spinlock_t slow_subchannel_lock;
326
327static int __init slow_subchannel_init(void)
326{ 328{
327 return css_evaluate_subchannel(schid, 1); 329 spin_lock_init(&slow_subchannel_lock);
330 slow_subchannel_set = idset_sch_new();
331 if (!slow_subchannel_set) {
332 printk(KERN_WARNING "cio: could not allocate slow subchannel "
333 "set\n");
334 return -ENOMEM;
335 }
336 return 0;
328} 337}
329 338
330struct slow_subchannel { 339subsys_initcall(slow_subchannel_init);
331 struct list_head slow_list;
332 struct subchannel_id schid;
333};
334
335static LIST_HEAD(slow_subchannels_head);
336static DEFINE_SPINLOCK(slow_subchannel_lock);
337 340
338static void 341static void css_slow_path_func(struct work_struct *unused)
339css_trigger_slow_path(struct work_struct *unused)
340{ 342{
341 CIO_TRACE_EVENT(4, "slowpath"); 343 struct subchannel_id schid;
342
343 if (need_rescan) {
344 need_rescan = 0;
345 for_each_subchannel(css_rescan_devices, NULL);
346 return;
347 }
348 344
345 CIO_TRACE_EVENT(4, "slowpath");
349 spin_lock_irq(&slow_subchannel_lock); 346 spin_lock_irq(&slow_subchannel_lock);
350 while (!list_empty(&slow_subchannels_head)) { 347 init_subchannel_id(&schid);
351 struct slow_subchannel *slow_sch = 348 while (idset_sch_get_first(slow_subchannel_set, &schid)) {
352 list_entry(slow_subchannels_head.next, 349 idset_sch_del(slow_subchannel_set, schid);
353 struct slow_subchannel, slow_list);
354
355 list_del_init(slow_subchannels_head.next);
356 spin_unlock_irq(&slow_subchannel_lock); 350 spin_unlock_irq(&slow_subchannel_lock);
357 css_evaluate_subchannel(slow_sch->schid, 1); 351 css_evaluate_subchannel(schid, 1);
358 spin_lock_irq(&slow_subchannel_lock); 352 spin_lock_irq(&slow_subchannel_lock);
359 kfree(slow_sch);
360 } 353 }
361 spin_unlock_irq(&slow_subchannel_lock); 354 spin_unlock_irq(&slow_subchannel_lock);
362} 355}
363 356
364DECLARE_WORK(slow_path_work, css_trigger_slow_path); 357static DECLARE_WORK(slow_path_work, css_slow_path_func);
365struct workqueue_struct *slow_path_wq; 358struct workqueue_struct *slow_path_wq;
366 359
360void css_schedule_eval(struct subchannel_id schid)
361{
362 unsigned long flags;
363
364 spin_lock_irqsave(&slow_subchannel_lock, flags);
365 idset_sch_add(slow_subchannel_set, schid);
366 queue_work(slow_path_wq, &slow_path_work);
367 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
368}
369
370void css_schedule_eval_all(void)
371{
372 unsigned long flags;
373
374 spin_lock_irqsave(&slow_subchannel_lock, flags);
375 idset_fill(slow_subchannel_set);
376 queue_work(slow_path_wq, &slow_path_work);
377 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
378}
379
367/* Reprobe subchannel if unregistered. */ 380/* Reprobe subchannel if unregistered. */
368static int reprobe_subchannel(struct subchannel_id schid, void *data) 381static int reprobe_subchannel(struct subchannel_id schid, void *data)
369{ 382{
@@ -426,33 +439,14 @@ void css_schedule_reprobe(void)
426EXPORT_SYMBOL_GPL(css_schedule_reprobe); 439EXPORT_SYMBOL_GPL(css_schedule_reprobe);
427 440
428/* 441/*
429 * Rescan for new devices. FIXME: This is slow.
430 * This function is called when we have lost CRWs due to overflows and we have
431 * to do subchannel housekeeping.
432 */
433void
434css_reiterate_subchannels(void)
435{
436 css_clear_subchannel_slow_list();
437 need_rescan = 1;
438}
439
440/*
441 * Called from the machine check handler for subchannel report words. 442 * Called from the machine check handler for subchannel report words.
442 */ 443 */
443int 444void css_process_crw(int rsid1, int rsid2)
444css_process_crw(int rsid1, int rsid2)
445{ 445{
446 int ret;
447 struct subchannel_id mchk_schid; 446 struct subchannel_id mchk_schid;
448 447
449 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 448 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
450 rsid1, rsid2); 449 rsid1, rsid2);
451
452 if (need_rescan)
453 /* We need to iterate all subchannels anyway. */
454 return -EAGAIN;
455
456 init_subchannel_id(&mchk_schid); 450 init_subchannel_id(&mchk_schid);
457 mchk_schid.sch_no = rsid1; 451 mchk_schid.sch_no = rsid1;
458 if (rsid2 != 0) 452 if (rsid2 != 0)
@@ -463,14 +457,7 @@ css_process_crw(int rsid1, int rsid2)
463 * use stsch() to find out if the subchannel in question has come 457 * use stsch() to find out if the subchannel in question has come
464 * or gone. 458 * or gone.
465 */ 459 */
466 ret = css_evaluate_subchannel(mchk_schid, 0); 460 css_evaluate_subchannel(mchk_schid, 0);
467 if (ret == -EAGAIN) {
468 if (css_enqueue_subchannel_slow(mchk_schid)) {
469 css_clear_subchannel_slow_list();
470 need_rescan = 1;
471 }
472 }
473 return ret;
474} 461}
475 462
476static int __init 463static int __init
@@ -745,47 +732,6 @@ struct bus_type css_bus_type = {
745 732
746subsys_initcall(init_channel_subsystem); 733subsys_initcall(init_channel_subsystem);
747 734
748int
749css_enqueue_subchannel_slow(struct subchannel_id schid)
750{
751 struct slow_subchannel *new_slow_sch;
752 unsigned long flags;
753
754 new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
755 if (!new_slow_sch)
756 return -ENOMEM;
757 new_slow_sch->schid = schid;
758 spin_lock_irqsave(&slow_subchannel_lock, flags);
759 list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
760 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
761 return 0;
762}
763
764void
765css_clear_subchannel_slow_list(void)
766{
767 unsigned long flags;
768
769 spin_lock_irqsave(&slow_subchannel_lock, flags);
770 while (!list_empty(&slow_subchannels_head)) {
771 struct slow_subchannel *slow_sch =
772 list_entry(slow_subchannels_head.next,
773 struct slow_subchannel, slow_list);
774
775 list_del_init(slow_subchannels_head.next);
776 kfree(slow_sch);
777 }
778 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
779}
780
781
782
783int
784css_slow_subchannels_exist(void)
785{
786 return (!list_empty(&slow_subchannels_head));
787}
788
789MODULE_LICENSE("GPL"); 735MODULE_LICENSE("GPL");
790EXPORT_SYMBOL(css_bus_type); 736EXPORT_SYMBOL(css_bus_type);
791EXPORT_SYMBOL_GPL(css_characteristics_avail); 737EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index b2b1a265c602..4b3133a7bae1 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -146,7 +146,7 @@ extern void css_sch_device_unregister(struct subchannel *);
146extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 146extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
147extern int css_init_done; 147extern int css_init_done;
148extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 148extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
149extern int css_process_crw(int, int); 149extern void css_process_crw(int, int);
150extern void css_reiterate_subchannels(void); 150extern void css_reiterate_subchannels(void);
151 151
152#define __MAX_SUBCHANNEL 65535 152#define __MAX_SUBCHANNEL 65535
@@ -186,16 +186,12 @@ int device_trigger_verify(struct subchannel *sch);
186void device_kill_pending_timer(struct subchannel *); 186void device_kill_pending_timer(struct subchannel *);
187 187
188/* Helper functions to build lists for the slow path. */ 188/* Helper functions to build lists for the slow path. */
189extern int css_enqueue_subchannel_slow(struct subchannel_id schid); 189void css_schedule_eval(struct subchannel_id schid);
190void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); 190void css_schedule_eval_all(void);
191void css_clear_subchannel_slow_list(void);
192int css_slow_subchannels_exist(void);
193extern int need_rescan;
194 191
195int sch_is_pseudo_sch(struct subchannel *); 192int sch_is_pseudo_sch(struct subchannel *);
196 193
197extern struct workqueue_struct *slow_path_wq; 194extern struct workqueue_struct *slow_path_wq;
198extern struct work_struct slow_path_work;
199 195
200int subchannel_add_files (struct device *); 196int subchannel_add_files (struct device *);
201extern struct attribute_group *subch_attr_groups[]; 197extern struct attribute_group *subch_attr_groups[];
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index d6226881d0df..898ec3b2bebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -222,10 +222,8 @@ __recover_lost_chpids(struct subchannel *sch, int old_lpm)
222 if (old_lpm & mask) 222 if (old_lpm & mask)
223 continue; 223 continue;
224 chpid.id = sch->schib.pmcw.chpid[i]; 224 chpid.id = sch->schib.pmcw.chpid[i];
225 if (!chp_is_registered(chpid)) { 225 if (!chp_is_registered(chpid))
226 need_rescan = 1; 226 css_schedule_eval_all();
227 queue_work(slow_path_wq, &slow_path_work);
228 }
229 } 227 }
230} 228}
231 229
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644
index 000000000000..16ea828e99f7
--- /dev/null
+++ b/drivers/s390/cio/idset.c
@@ -0,0 +1,112 @@
1/*
2 * drivers/s390/cio/idset.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/slab.h>
9#include <asm/bitops.h>
10#include "idset.h"
11#include "css.h"
12
13struct idset {
14 int num_ssid;
15 int num_id;
16 unsigned long bitmap[0];
17};
18
19static inline unsigned long bitmap_size(int num_ssid, int num_id)
20{
21 return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
22}
23
24static struct idset *idset_new(int num_ssid, int num_id)
25{
26 struct idset *set;
27
28 set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id),
29 GFP_KERNEL);
30 if (set) {
31 set->num_ssid = num_ssid;
32 set->num_id = num_id;
33 }
34 return set;
35}
36
37void idset_free(struct idset *set)
38{
39 kfree(set);
40}
41
42void idset_clear(struct idset *set)
43{
44 memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
45}
46
47void idset_fill(struct idset *set)
48{
49 memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
50}
51
52static inline void idset_add(struct idset *set, int ssid, int id)
53{
54 set_bit(ssid * set->num_id + id, set->bitmap);
55}
56
57static inline void idset_del(struct idset *set, int ssid, int id)
58{
59 clear_bit(ssid * set->num_id + id, set->bitmap);
60}
61
62static inline int idset_contains(struct idset *set, int ssid, int id)
63{
64 return test_bit(ssid * set->num_id + id, set->bitmap);
65}
66
67static inline int idset_get_first(struct idset *set, int *ssid, int *id)
68{
69 int bitnum;
70
71 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
72 if (bitnum >= set->num_ssid * set->num_id)
73 return 0;
74 *ssid = bitnum / set->num_id;
75 *id = bitnum % set->num_id;
76 return 1;
77}
78
79struct idset *idset_sch_new(void)
80{
81 return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
82}
83
84void idset_sch_add(struct idset *set, struct subchannel_id schid)
85{
86 idset_add(set, schid.ssid, schid.sch_no);
87}
88
89void idset_sch_del(struct idset *set, struct subchannel_id schid)
90{
91 idset_del(set, schid.ssid, schid.sch_no);
92}
93
94int idset_sch_contains(struct idset *set, struct subchannel_id schid)
95{
96 return idset_contains(set, schid.ssid, schid.sch_no);
97}
98
99int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
100{
101 int ssid = 0;
102 int id = 0;
103 int rc;
104
105 rc = idset_get_first(set, &ssid, &id);
106 if (rc) {
107 init_subchannel_id(schid);
108 schid->ssid = ssid;
109 schid->sch_no = id;
110 }
111 return rc;
112}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644
index 000000000000..144466ab8c15
--- /dev/null
+++ b/drivers/s390/cio/idset.h
@@ -0,0 +1,25 @@
1/*
2 * drivers/s390/cio/idset.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H
10
11#include "schid.h"
12
13struct idset;
14
15void idset_free(struct idset *set);
16void idset_clear(struct idset *set);
17void idset_fill(struct idset *set);
18
19struct idset *idset_sch_new(void);
20void idset_sch_add(struct idset *set, struct subchannel_id id);
21void idset_sch_del(struct idset *set, struct subchannel_id id);
22int idset_sch_contains(struct idset *set, struct subchannel_id id);
23int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
24
25#endif /* S390_IDSET_H */