aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/chsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r--drivers/s390/cio/chsc.c63
1 files changed, 35 insertions, 28 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index fa3c23b80e3a..aff5d149b729 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -104,8 +104,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
104 .code = 0x0004, 104 .code = 0x0004,
105 }; 105 };
106 106
107 ssd_area->f_sch = sch->irq; 107 ssd_area->f_sch = sch->schid.sch_no;
108 ssd_area->l_sch = sch->irq; 108 ssd_area->l_sch = sch->schid.sch_no;
109 109
110 ccode = chsc(ssd_area); 110 ccode = chsc(ssd_area);
111 if (ccode > 0) { 111 if (ccode > 0) {
@@ -147,7 +147,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
147 */ 147 */
148 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 148 if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 CIO_CRW_EVENT(0, "Strange subchannel type %d" 149 CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 " for sch %04x\n", ssd_area->st, sch->irq); 150 " for sch %04x\n", ssd_area->st,
151 sch->schid.sch_no);
151 /* 152 /*
152 * There may have been a new subchannel type defined in the 153 * There may have been a new subchannel type defined in the
153 * time since this code was written; since we don't know which 154 * time since this code was written; since we don't know which
@@ -157,7 +158,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
157 } else { 158 } else {
158 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 159 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
159 CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", 160 CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n",
160 sch->irq, type[ssd_area->st]); 161 sch->schid.sch_no, type[ssd_area->st]);
161 162
162 sch->ssd_info.valid = 1; 163 sch->ssd_info.valid = 1;
163 sch->ssd_info.type = ssd_area->st; 164 sch->ssd_info.type = ssd_area->st;
@@ -232,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
232 mask = 0x80 >> j; 233 mask = 0x80 >> j;
233 spin_lock(&sch->lock); 234 spin_lock(&sch->lock);
234 235
235 stsch(sch->irq, &schib); 236 stsch(sch->schid, &schib);
236 if (!schib.pmcw.dnv) 237 if (!schib.pmcw.dnv)
237 goto out_unreg; 238 goto out_unreg;
238 memcpy(&sch->schib, &schib, sizeof(struct schib)); 239 memcpy(&sch->schib, &schib, sizeof(struct schib));
@@ -284,7 +285,7 @@ out_unlock:
284out_unreg: 285out_unreg:
285 spin_unlock(&sch->lock); 286 spin_unlock(&sch->lock);
286 sch->lpm = 0; 287 sch->lpm = 0;
287 if (css_enqueue_subchannel_slow(sch->irq)) { 288 if (css_enqueue_subchannel_slow(sch->schid)) {
288 css_clear_subchannel_slow_list(); 289 css_clear_subchannel_slow_list();
289 need_rescan = 1; 290 need_rescan = 1;
290 } 291 }
@@ -337,7 +338,7 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
337 * new path information and eventually check for logically 338 * new path information and eventually check for logically
338 * offline chpids. 339 * offline chpids.
339 */ 340 */
340 ccode = stsch(sch->irq, &sch->schib); 341 ccode = stsch(sch->schid, &sch->schib);
341 if (ccode > 0) 342 if (ccode > 0)
342 return 0; 343 return 0;
343 344
@@ -348,7 +349,8 @@ static int
348s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) 349s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
349{ 350{
350 struct subchannel *sch; 351 struct subchannel *sch;
351 int irq, rc; 352 int rc;
353 struct subchannel_id schid;
352 char dbf_txt[15]; 354 char dbf_txt[15];
353 355
354 sprintf(dbf_txt, "accpr%x", chpid); 356 sprintf(dbf_txt, "accpr%x", chpid);
@@ -370,10 +372,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
370 return 0; /* no need to do the rest */ 372 return 0; /* no need to do the rest */
371 373
372 rc = 0; 374 rc = 0;
373 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 375 init_subchannel_id(&schid);
376 do {
374 int chp_mask, old_lpm; 377 int chp_mask, old_lpm;
375 378
376 sch = get_subchannel_by_schid(irq); 379 sch = get_subchannel_by_schid(schid);
377 if (!sch) { 380 if (!sch) {
378 struct schib schib; 381 struct schib schib;
379 int ret; 382 int ret;
@@ -385,7 +388,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
385 * that beast may be on we'll have to do a stsch 388 * that beast may be on we'll have to do a stsch
386 * on all devices, grr... 389 * on all devices, grr...
387 */ 390 */
388 if (stsch(irq, &schib)) { 391 if (stsch(schid, &schib)) {
389 /* We're through */ 392 /* We're through */
390 if (need_rescan) 393 if (need_rescan)
391 rc = -EAGAIN; 394 rc = -EAGAIN;
@@ -396,7 +399,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
396 continue; 399 continue;
397 } 400 }
398 /* Put it on the slow path. */ 401 /* Put it on the slow path. */
399 ret = css_enqueue_subchannel_slow(irq); 402 ret = css_enqueue_subchannel_slow(schid);
400 if (ret) { 403 if (ret) {
401 css_clear_subchannel_slow_list(); 404 css_clear_subchannel_slow_list();
402 need_rescan = 1; 405 need_rescan = 1;
@@ -428,7 +431,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
428 put_device(&sch->dev); 431 put_device(&sch->dev);
429 if (fla_mask == 0xffff) 432 if (fla_mask == 0xffff)
430 break; 433 break;
431 } 434 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
432 return rc; 435 return rc;
433} 436}
434 437
@@ -608,7 +611,8 @@ static int
608chp_add(int chpid) 611chp_add(int chpid)
609{ 612{
610 struct subchannel *sch; 613 struct subchannel *sch;
611 int irq, ret, rc; 614 int ret, rc;
615 struct subchannel_id schid;
612 char dbf_txt[15]; 616 char dbf_txt[15];
613 617
614 if (!get_chp_status(chpid)) 618 if (!get_chp_status(chpid))
@@ -618,14 +622,15 @@ chp_add(int chpid)
618 CIO_TRACE_EVENT(2, dbf_txt); 622 CIO_TRACE_EVENT(2, dbf_txt);
619 623
620 rc = 0; 624 rc = 0;
621 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 625 init_subchannel_id(&schid);
626 do {
622 int i; 627 int i;
623 628
624 sch = get_subchannel_by_schid(irq); 629 sch = get_subchannel_by_schid(schid);
625 if (!sch) { 630 if (!sch) {
626 struct schib schib; 631 struct schib schib;
627 632
628 if (stsch(irq, &schib)) { 633 if (stsch(schid, &schib)) {
629 /* We're through */ 634 /* We're through */
630 if (need_rescan) 635 if (need_rescan)
631 rc = -EAGAIN; 636 rc = -EAGAIN;
@@ -636,7 +641,7 @@ chp_add(int chpid)
636 continue; 641 continue;
637 } 642 }
638 /* Put it on the slow path. */ 643 /* Put it on the slow path. */
639 ret = css_enqueue_subchannel_slow(irq); 644 ret = css_enqueue_subchannel_slow(schid);
640 if (ret) { 645 if (ret) {
641 css_clear_subchannel_slow_list(); 646 css_clear_subchannel_slow_list();
642 need_rescan = 1; 647 need_rescan = 1;
@@ -648,7 +653,7 @@ chp_add(int chpid)
648 spin_lock(&sch->lock); 653 spin_lock(&sch->lock);
649 for (i=0; i<8; i++) 654 for (i=0; i<8; i++)
650 if (sch->schib.pmcw.chpid[i] == chpid) { 655 if (sch->schib.pmcw.chpid[i] == chpid) {
651 if (stsch(sch->irq, &sch->schib) != 0) { 656 if (stsch(sch->schid, &sch->schib) != 0) {
652 /* Endgame. */ 657 /* Endgame. */
653 spin_unlock(&sch->lock); 658 spin_unlock(&sch->lock);
654 return rc; 659 return rc;
@@ -669,7 +674,7 @@ chp_add(int chpid)
669 674
670 spin_unlock(&sch->lock); 675 spin_unlock(&sch->lock);
671 put_device(&sch->dev); 676 put_device(&sch->dev);
672 } 677 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
673 return rc; 678 return rc;
674} 679}
675 680
@@ -702,7 +707,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
702 if (!device_is_online(sch)) 707 if (!device_is_online(sch))
703 /* cio could be doing I/O. */ 708 /* cio could be doing I/O. */
704 return 0; 709 return 0;
705 cc = stsch(sch->irq, &sch->schib); 710 cc = stsch(sch->schid, &sch->schib);
706 if (cc) 711 if (cc)
707 return 0; 712 return 0;
708 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 713 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
@@ -743,7 +748,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
743 * just varied off path. Then kill it. 748 * just varied off path. Then kill it.
744 */ 749 */
745 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 750 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
746 if (css_enqueue_subchannel_slow(sch->irq)) { 751 if (css_enqueue_subchannel_slow(sch->schid)) {
747 css_clear_subchannel_slow_list(); 752 css_clear_subchannel_slow_list();
748 need_rescan = 1; 753 need_rescan = 1;
749 } 754 }
@@ -789,7 +794,8 @@ static int
789s390_vary_chpid( __u8 chpid, int on) 794s390_vary_chpid( __u8 chpid, int on)
790{ 795{
791 char dbf_text[15]; 796 char dbf_text[15];
792 int status, irq, ret; 797 int status, ret;
798 struct subchannel_id schid;
793 struct subchannel *sch; 799 struct subchannel *sch;
794 800
795 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 801 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
@@ -818,26 +824,27 @@ s390_vary_chpid( __u8 chpid, int on)
818 if (!on) 824 if (!on)
819 goto out; 825 goto out;
820 /* Scan for new devices on varied on path. */ 826 /* Scan for new devices on varied on path. */
821 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 827 init_subchannel_id(&schid);
828 do {
822 struct schib schib; 829 struct schib schib;
823 830
824 if (need_rescan) 831 if (need_rescan)
825 break; 832 break;
826 sch = get_subchannel_by_schid(irq); 833 sch = get_subchannel_by_schid(schid);
827 if (sch) { 834 if (sch) {
828 put_device(&sch->dev); 835 put_device(&sch->dev);
829 continue; 836 continue;
830 } 837 }
831 if (stsch(irq, &schib)) 838 if (stsch(schid, &schib))
832 /* We're through */ 839 /* We're through */
833 break; 840 break;
834 /* Put it on the slow path. */ 841 /* Put it on the slow path. */
835 ret = css_enqueue_subchannel_slow(irq); 842 ret = css_enqueue_subchannel_slow(schid);
836 if (ret) { 843 if (ret) {
837 css_clear_subchannel_slow_list(); 844 css_clear_subchannel_slow_list();
838 need_rescan = 1; 845 need_rescan = 1;
839 } 846 }
840 } 847 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
841out: 848out:
842 if (need_rescan || css_slow_subchannels_exist()) 849 if (need_rescan || css_slow_subchannels_exist())
843 queue_work(slow_path_wq, &slow_path_work); 850 queue_work(slow_path_wq, &slow_path_work);