aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/chsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r--drivers/s390/cio/chsc.c372
1 files changed, 193 insertions, 179 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index aff5d149b729..78e082311f48 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -310,9 +310,14 @@ s390_set_chpid_offline( __u8 chpid)
310 queue_work(slow_path_wq, &slow_path_work); 310 queue_work(slow_path_wq, &slow_path_work);
311} 311}
312 312
313struct res_acc_data {
314 struct channel_path *chp;
315 u32 fla_mask;
316 u16 fla;
317};
318
313static int 319static int
314s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, 320s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
315 struct subchannel *sch)
316{ 321{
317 int found; 322 int found;
318 int chp; 323 int chp;
@@ -324,8 +329,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
324 * check if chpid is in information updated by ssd 329 * check if chpid is in information updated by ssd
325 */ 330 */
326 if (sch->ssd_info.valid && 331 if (sch->ssd_info.valid &&
327 sch->ssd_info.chpid[chp] == chpid && 332 sch->ssd_info.chpid[chp] == res_data->chp->id &&
328 (sch->ssd_info.fla[chp] & fla_mask) == fla) { 333 (sch->ssd_info.fla[chp] & res_data->fla_mask)
334 == res_data->fla) {
329 found = 1; 335 found = 1;
330 break; 336 break;
331 } 337 }
@@ -345,18 +351,80 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
345 return 0x80 >> chp; 351 return 0x80 >> chp;
346} 352}
347 353
354static inline int
355s390_process_res_acc_new_sch(struct subchannel_id schid)
356{
357 struct schib schib;
358 int ret;
359 /*
360 * We don't know the device yet, but since a path
361 * may be available now to the device we'll have
362 * to do recognition again.
363 * Since we don't have any idea about which chpid
364 * that beast may be on we'll have to do a stsch
365 * on all devices, grr...
366 */
367 if (stsch(schid, &schib))
368 /* We're through */
369 return need_rescan ? -EAGAIN : -ENXIO;
370
371 /* Put it on the slow path. */
372 ret = css_enqueue_subchannel_slow(schid);
373 if (ret) {
374 css_clear_subchannel_slow_list();
375 need_rescan = 1;
376 return -EAGAIN;
377 }
378 return 0;
379}
380
348static int 381static int
349s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) 382__s390_process_res_acc(struct subchannel_id schid, void *data)
350{ 383{
384 int chp_mask, old_lpm;
385 struct res_acc_data *res_data;
351 struct subchannel *sch; 386 struct subchannel *sch;
387
388 res_data = (struct res_acc_data *)data;
389 sch = get_subchannel_by_schid(schid);
390 if (!sch)
391 /* Check if a subchannel is newly available. */
392 return s390_process_res_acc_new_sch(schid);
393
394 spin_lock_irq(&sch->lock);
395
396 chp_mask = s390_process_res_acc_sch(res_data, sch);
397
398 if (chp_mask == 0) {
399 spin_unlock_irq(&sch->lock);
400 return 0;
401 }
402 old_lpm = sch->lpm;
403 sch->lpm = ((sch->schib.pmcw.pim &
404 sch->schib.pmcw.pam &
405 sch->schib.pmcw.pom)
406 | chp_mask) & sch->opm;
407 if (!old_lpm && sch->lpm)
408 device_trigger_reprobe(sch);
409 else if (sch->driver && sch->driver->verify)
410 sch->driver->verify(&sch->dev);
411
412 spin_unlock_irq(&sch->lock);
413 put_device(&sch->dev);
414 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
415}
416
417
418static int
419s390_process_res_acc (struct res_acc_data *res_data)
420{
352 int rc; 421 int rc;
353 struct subchannel_id schid;
354 char dbf_txt[15]; 422 char dbf_txt[15];
355 423
356 sprintf(dbf_txt, "accpr%x", chpid); 424 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
357 CIO_TRACE_EVENT( 2, dbf_txt); 425 CIO_TRACE_EVENT( 2, dbf_txt);
358 if (fla != 0) { 426 if (res_data->fla != 0) {
359 sprintf(dbf_txt, "fla%x", fla); 427 sprintf(dbf_txt, "fla%x", res_data->fla);
360 CIO_TRACE_EVENT( 2, dbf_txt); 428 CIO_TRACE_EVENT( 2, dbf_txt);
361 } 429 }
362 430
@@ -367,71 +435,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
367 * The more information we have (info), the less scanning 435 * The more information we have (info), the less scanning
368 * will we have to do. 436 * will we have to do.
369 */ 437 */
370 438 rc = for_each_subchannel(__s390_process_res_acc, res_data);
371 if (!get_chp_status(chpid)) 439 if (css_slow_subchannels_exist())
372 return 0; /* no need to do the rest */ 440 rc = -EAGAIN;
373 441 else if (rc != -EAGAIN)
374 rc = 0; 442 rc = 0;
375 init_subchannel_id(&schid);
376 do {
377 int chp_mask, old_lpm;
378
379 sch = get_subchannel_by_schid(schid);
380 if (!sch) {
381 struct schib schib;
382 int ret;
383 /*
384 * We don't know the device yet, but since a path
385 * may be available now to the device we'll have
386 * to do recognition again.
387 * Since we don't have any idea about which chpid
388 * that beast may be on we'll have to do a stsch
389 * on all devices, grr...
390 */
391 if (stsch(schid, &schib)) {
392 /* We're through */
393 if (need_rescan)
394 rc = -EAGAIN;
395 break;
396 }
397 if (need_rescan) {
398 rc = -EAGAIN;
399 continue;
400 }
401 /* Put it on the slow path. */
402 ret = css_enqueue_subchannel_slow(schid);
403 if (ret) {
404 css_clear_subchannel_slow_list();
405 need_rescan = 1;
406 }
407 rc = -EAGAIN;
408 continue;
409 }
410
411 spin_lock_irq(&sch->lock);
412
413 chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
414
415 if (chp_mask == 0) {
416
417 spin_unlock_irq(&sch->lock);
418 continue;
419 }
420 old_lpm = sch->lpm;
421 sch->lpm = ((sch->schib.pmcw.pim &
422 sch->schib.pmcw.pam &
423 sch->schib.pmcw.pom)
424 | chp_mask) & sch->opm;
425 if (!old_lpm && sch->lpm)
426 device_trigger_reprobe(sch);
427 else if (sch->driver && sch->driver->verify)
428 sch->driver->verify(&sch->dev);
429
430 spin_unlock_irq(&sch->lock);
431 put_device(&sch->dev);
432 if (fla_mask == 0xffff)
433 break;
434 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
435 return rc; 443 return rc;
436} 444}
437 445
@@ -469,6 +477,7 @@ int
469chsc_process_crw(void) 477chsc_process_crw(void)
470{ 478{
471 int chpid, ret; 479 int chpid, ret;
480 struct res_acc_data res_data;
472 struct { 481 struct {
473 struct chsc_header request; 482 struct chsc_header request;
474 u32 reserved1; 483 u32 reserved1;
@@ -503,7 +512,7 @@ chsc_process_crw(void)
503 do { 512 do {
504 int ccode, status; 513 int ccode, status;
505 memset(sei_area, 0, sizeof(*sei_area)); 514 memset(sei_area, 0, sizeof(*sei_area));
506 515 memset(&res_data, 0, sizeof(struct res_acc_data));
507 sei_area->request = (struct chsc_header) { 516 sei_area->request = (struct chsc_header) {
508 .length = 0x0010, 517 .length = 0x0010,
509 .code = 0x000e, 518 .code = 0x000e,
@@ -576,26 +585,23 @@ chsc_process_crw(void)
576 if (status < 0) 585 if (status < 0)
577 new_channel_path(sei_area->rsid); 586 new_channel_path(sei_area->rsid);
578 else if (!status) 587 else if (!status)
579 return 0; 588 break;
580 if ((sei_area->vf & 0x80) == 0) { 589 res_data.chp = chps[sei_area->rsid];
581 pr_debug("chpid: %x\n", sei_area->rsid); 590 pr_debug("chpid: %x", sei_area->rsid);
582 ret = s390_process_res_acc(sei_area->rsid, 591 if ((sei_area->vf & 0xc0) != 0) {
583 0, 0); 592 res_data.fla = sei_area->fla;
584 } else if ((sei_area->vf & 0xc0) == 0x80) { 593 if ((sei_area->vf & 0xc0) == 0xc0) {
585 pr_debug("chpid: %x link addr: %x\n", 594 pr_debug(" full link addr: %x",
586 sei_area->rsid, sei_area->fla); 595 sei_area->fla);
587 ret = s390_process_res_acc(sei_area->rsid, 596 res_data.fla_mask = 0xffff;
588 sei_area->fla, 597 } else {
589 0xff00); 598 pr_debug(" link addr: %x",
590 } else if ((sei_area->vf & 0xc0) == 0xc0) { 599 sei_area->fla);
591 pr_debug("chpid: %x full link addr: %x\n", 600 res_data.fla_mask = 0xff00;
592 sei_area->rsid, sei_area->fla); 601 }
593 ret = s390_process_res_acc(sei_area->rsid,
594 sei_area->fla,
595 0xffff);
596 } 602 }
597 pr_debug("\n"); 603 ret = s390_process_res_acc(&res_data);
598 604 pr_debug("\n\n");
599 break; 605 break;
600 606
601 default: /* other stuff */ 607 default: /* other stuff */
@@ -607,12 +613,70 @@ chsc_process_crw(void)
607 return ret; 613 return ret;
608} 614}
609 615
616static inline int
617__chp_add_new_sch(struct subchannel_id schid)
618{
619 struct schib schib;
620 int ret;
621
622 if (stsch(schid, &schib))
623 /* We're through */
624 return need_rescan ? -EAGAIN : -ENXIO;
625
626 /* Put it on the slow path. */
627 ret = css_enqueue_subchannel_slow(schid);
628 if (ret) {
629 css_clear_subchannel_slow_list();
630 need_rescan = 1;
631 return -EAGAIN;
632 }
633 return 0;
634}
635
636
610static int 637static int
611chp_add(int chpid) 638__chp_add(struct subchannel_id schid, void *data)
612{ 639{
640 int i;
641 struct channel_path *chp;
613 struct subchannel *sch; 642 struct subchannel *sch;
614 int ret, rc; 643
615 struct subchannel_id schid; 644 chp = (struct channel_path *)data;
645 sch = get_subchannel_by_schid(schid);
646 if (!sch)
647 /* Check if the subchannel is now available. */
648 return __chp_add_new_sch(schid);
649 spin_lock(&sch->lock);
650 for (i=0; i<8; i++)
651 if (sch->schib.pmcw.chpid[i] == chp->id) {
652 if (stsch(sch->schid, &sch->schib) != 0) {
653 /* Endgame. */
654 spin_unlock(&sch->lock);
655 return -ENXIO;
656 }
657 break;
658 }
659 if (i==8) {
660 spin_unlock(&sch->lock);
661 return 0;
662 }
663 sch->lpm = ((sch->schib.pmcw.pim &
664 sch->schib.pmcw.pam &
665 sch->schib.pmcw.pom)
666 | 0x80 >> i) & sch->opm;
667
668 if (sch->driver && sch->driver->verify)
669 sch->driver->verify(&sch->dev);
670
671 spin_unlock(&sch->lock);
672 put_device(&sch->dev);
673 return 0;
674}
675
676static int
677chp_add(int chpid)
678{
679 int rc;
616 char dbf_txt[15]; 680 char dbf_txt[15];
617 681
618 if (!get_chp_status(chpid)) 682 if (!get_chp_status(chpid))
@@ -621,60 +685,11 @@ chp_add(int chpid)
621 sprintf(dbf_txt, "cadd%x", chpid); 685 sprintf(dbf_txt, "cadd%x", chpid);
622 CIO_TRACE_EVENT(2, dbf_txt); 686 CIO_TRACE_EVENT(2, dbf_txt);
623 687
624 rc = 0; 688 rc = for_each_subchannel(__chp_add, chps[chpid]);
625 init_subchannel_id(&schid); 689 if (css_slow_subchannels_exist())
626 do { 690 rc = -EAGAIN;
627 int i; 691 if (rc != -EAGAIN)
628 692 rc = 0;
629 sch = get_subchannel_by_schid(schid);
630 if (!sch) {
631 struct schib schib;
632
633 if (stsch(schid, &schib)) {
634 /* We're through */
635 if (need_rescan)
636 rc = -EAGAIN;
637 break;
638 }
639 if (need_rescan) {
640 rc = -EAGAIN;
641 continue;
642 }
643 /* Put it on the slow path. */
644 ret = css_enqueue_subchannel_slow(schid);
645 if (ret) {
646 css_clear_subchannel_slow_list();
647 need_rescan = 1;
648 }
649 rc = -EAGAIN;
650 continue;
651 }
652
653 spin_lock(&sch->lock);
654 for (i=0; i<8; i++)
655 if (sch->schib.pmcw.chpid[i] == chpid) {
656 if (stsch(sch->schid, &sch->schib) != 0) {
657 /* Endgame. */
658 spin_unlock(&sch->lock);
659 return rc;
660 }
661 break;
662 }
663 if (i==8) {
664 spin_unlock(&sch->lock);
665 return rc;
666 }
667 sch->lpm = ((sch->schib.pmcw.pim &
668 sch->schib.pmcw.pam &
669 sch->schib.pmcw.pom)
670 | 0x80 >> i) & sch->opm;
671
672 if (sch->driver && sch->driver->verify)
673 sch->driver->verify(&sch->dev);
674
675 spin_unlock(&sch->lock);
676 put_device(&sch->dev);
677 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
678 return rc; 693 return rc;
679} 694}
680 695
@@ -786,6 +801,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data)
786 return 0; 801 return 0;
787} 802}
788 803
804static int
805__s390_vary_chpid_on(struct subchannel_id schid, void *data)
806{
807 struct schib schib;
808 struct subchannel *sch;
809
810 sch = get_subchannel_by_schid(schid);
811 if (sch) {
812 put_device(&sch->dev);
813 return 0;
814 }
815 if (stsch(schid, &schib))
816 /* We're through */
817 return -ENXIO;
818 /* Put it on the slow path. */
819 if (css_enqueue_subchannel_slow(schid)) {
820 css_clear_subchannel_slow_list();
821 need_rescan = 1;
822 return -EAGAIN;
823 }
824 return 0;
825}
826
789/* 827/*
790 * Function: s390_vary_chpid 828 * Function: s390_vary_chpid
791 * Varies the specified chpid online or offline 829 * Varies the specified chpid online or offline
@@ -794,9 +832,7 @@ static int
794s390_vary_chpid( __u8 chpid, int on) 832s390_vary_chpid( __u8 chpid, int on)
795{ 833{
796 char dbf_text[15]; 834 char dbf_text[15];
797 int status, ret; 835 int status;
798 struct subchannel_id schid;
799 struct subchannel *sch;
800 836
801 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 837 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
802 CIO_TRACE_EVENT( 2, dbf_text); 838 CIO_TRACE_EVENT( 2, dbf_text);
@@ -821,31 +857,9 @@ s390_vary_chpid( __u8 chpid, int on)
821 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 857 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
822 s390_subchannel_vary_chpid_on : 858 s390_subchannel_vary_chpid_on :
823 s390_subchannel_vary_chpid_off); 859 s390_subchannel_vary_chpid_off);
824 if (!on) 860 if (on)
825 goto out; 861 /* Scan for new devices on varied on path. */
826 /* Scan for new devices on varied on path. */ 862 for_each_subchannel(__s390_vary_chpid_on, NULL);
827 init_subchannel_id(&schid);
828 do {
829 struct schib schib;
830
831 if (need_rescan)
832 break;
833 sch = get_subchannel_by_schid(schid);
834 if (sch) {
835 put_device(&sch->dev);
836 continue;
837 }
838 if (stsch(schid, &schib))
839 /* We're through */
840 break;
841 /* Put it on the slow path. */
842 ret = css_enqueue_subchannel_slow(schid);
843 if (ret) {
844 css_clear_subchannel_slow_list();
845 need_rescan = 1;
846 }
847 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
848out:
849 if (need_rescan || css_slow_subchannels_exist()) 863 if (need_rescan || css_slow_subchannels_exist())
850 queue_work(slow_path_wq, &slow_path_work); 864 queue_work(slow_path_wq, &slow_path_work);
851 return 0; 865 return 0;