aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/chsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r--drivers/s390/cio/chsc.c473
1 files changed, 276 insertions, 197 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index fa3c23b80e3a..7270808c02d1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * $Revision: 1.120 $ 4 * $Revision: 1.126 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -24,8 +24,6 @@
24#include "ioasm.h" 24#include "ioasm.h"
25#include "chsc.h" 25#include "chsc.h"
26 26
27static struct channel_path *chps[NR_CHPIDS];
28
29static void *sei_page; 27static void *sei_page;
30 28
31static int new_channel_path(int chpid); 29static int new_channel_path(int chpid);
@@ -33,13 +31,13 @@ static int new_channel_path(int chpid);
33static inline void 31static inline void
34set_chp_logically_online(int chp, int onoff) 32set_chp_logically_online(int chp, int onoff)
35{ 33{
36 chps[chp]->state = onoff; 34 css[0]->chps[chp]->state = onoff;
37} 35}
38 36
39static int 37static int
40get_chp_status(int chp) 38get_chp_status(int chp)
41{ 39{
42 return (chps[chp] ? chps[chp]->state : -ENODEV); 40 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
43} 41}
44 42
45void 43void
@@ -77,7 +75,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
77 75
78 struct { 76 struct {
79 struct chsc_header request; 77 struct chsc_header request;
80 u16 reserved1; 78 u16 reserved1a:10;
79 u16 ssid:2;
80 u16 reserved1b:4;
81 u16 f_sch; /* first subchannel */ 81 u16 f_sch; /* first subchannel */
82 u16 reserved2; 82 u16 reserved2;
83 u16 l_sch; /* last subchannel */ 83 u16 l_sch; /* last subchannel */
@@ -104,8 +104,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
104 .code = 0x0004, 104 .code = 0x0004,
105 }; 105 };
106 106
107 ssd_area->f_sch = sch->irq; 107 ssd_area->ssid = sch->schid.ssid;
108 ssd_area->l_sch = sch->irq; 108 ssd_area->f_sch = sch->schid.sch_no;
109 ssd_area->l_sch = sch->schid.sch_no;
109 110
110 ccode = chsc(ssd_area); 111 ccode = chsc(ssd_area);
111 if (ccode > 0) { 112 if (ccode > 0) {
@@ -147,7 +148,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
147 */ 148 */
148 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 149 if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 CIO_CRW_EVENT(0, "Strange subchannel type %d" 150 CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 " for sch %04x\n", ssd_area->st, sch->irq); 151 " for sch 0.%x.%04x\n", ssd_area->st,
152 sch->schid.ssid, sch->schid.sch_no);
151 /* 153 /*
152 * There may have been a new subchannel type defined in the 154 * There may have been a new subchannel type defined in the
153 * time since this code was written; since we don't know which 155 * time since this code was written; since we don't know which
@@ -156,8 +158,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
156 return 0; 158 return 0;
157 } else { 159 } else {
158 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 160 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
159 CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", 161 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
160 sch->irq, type[ssd_area->st]); 162 sch->schid.ssid, sch->schid.sch_no,
163 type[ssd_area->st]);
161 164
162 sch->ssd_info.valid = 1; 165 sch->ssd_info.valid = 1;
163 sch->ssd_info.type = ssd_area->st; 166 sch->ssd_info.type = ssd_area->st;
@@ -218,13 +221,13 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
218 int j; 221 int j;
219 int mask; 222 int mask;
220 struct subchannel *sch; 223 struct subchannel *sch;
221 __u8 *chpid; 224 struct channel_path *chpid;
222 struct schib schib; 225 struct schib schib;
223 226
224 sch = to_subchannel(dev); 227 sch = to_subchannel(dev);
225 chpid = data; 228 chpid = data;
226 for (j = 0; j < 8; j++) 229 for (j = 0; j < 8; j++)
227 if (sch->schib.pmcw.chpid[j] == *chpid) 230 if (sch->schib.pmcw.chpid[j] == chpid->id)
228 break; 231 break;
229 if (j >= 8) 232 if (j >= 8)
230 return 0; 233 return 0;
@@ -232,7 +235,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
232 mask = 0x80 >> j; 235 mask = 0x80 >> j;
233 spin_lock(&sch->lock); 236 spin_lock(&sch->lock);
234 237
235 stsch(sch->irq, &schib); 238 stsch(sch->schid, &schib);
236 if (!schib.pmcw.dnv) 239 if (!schib.pmcw.dnv)
237 goto out_unreg; 240 goto out_unreg;
238 memcpy(&sch->schib, &schib, sizeof(struct schib)); 241 memcpy(&sch->schib, &schib, sizeof(struct schib));
@@ -284,7 +287,7 @@ out_unlock:
284out_unreg: 287out_unreg:
285 spin_unlock(&sch->lock); 288 spin_unlock(&sch->lock);
286 sch->lpm = 0; 289 sch->lpm = 0;
287 if (css_enqueue_subchannel_slow(sch->irq)) { 290 if (css_enqueue_subchannel_slow(sch->schid)) {
288 css_clear_subchannel_slow_list(); 291 css_clear_subchannel_slow_list();
289 need_rescan = 1; 292 need_rescan = 1;
290 } 293 }
@@ -295,23 +298,30 @@ static inline void
295s390_set_chpid_offline( __u8 chpid) 298s390_set_chpid_offline( __u8 chpid)
296{ 299{
297 char dbf_txt[15]; 300 char dbf_txt[15];
301 struct device *dev;
298 302
299 sprintf(dbf_txt, "chpr%x", chpid); 303 sprintf(dbf_txt, "chpr%x", chpid);
300 CIO_TRACE_EVENT(2, dbf_txt); 304 CIO_TRACE_EVENT(2, dbf_txt);
301 305
302 if (get_chp_status(chpid) <= 0) 306 if (get_chp_status(chpid) <= 0)
303 return; 307 return;
304 308 dev = get_device(&css[0]->chps[chpid]->dev);
305 bus_for_each_dev(&css_bus_type, NULL, &chpid, 309 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
306 s390_subchannel_remove_chpid); 310 s390_subchannel_remove_chpid);
307 311
308 if (need_rescan || css_slow_subchannels_exist()) 312 if (need_rescan || css_slow_subchannels_exist())
309 queue_work(slow_path_wq, &slow_path_work); 313 queue_work(slow_path_wq, &slow_path_work);
314 put_device(dev);
310} 315}
311 316
317struct res_acc_data {
318 struct channel_path *chp;
319 u32 fla_mask;
320 u16 fla;
321};
322
312static int 323static int
313s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, 324s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
314 struct subchannel *sch)
315{ 325{
316 int found; 326 int found;
317 int chp; 327 int chp;
@@ -323,8 +333,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
323 * check if chpid is in information updated by ssd 333 * check if chpid is in information updated by ssd
324 */ 334 */
325 if (sch->ssd_info.valid && 335 if (sch->ssd_info.valid &&
326 sch->ssd_info.chpid[chp] == chpid && 336 sch->ssd_info.chpid[chp] == res_data->chp->id &&
327 (sch->ssd_info.fla[chp] & fla_mask) == fla) { 337 (sch->ssd_info.fla[chp] & res_data->fla_mask)
338 == res_data->fla) {
328 found = 1; 339 found = 1;
329 break; 340 break;
330 } 341 }
@@ -337,24 +348,87 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
337 * new path information and eventually check for logically 348 * new path information and eventually check for logically
338 * offline chpids. 349 * offline chpids.
339 */ 350 */
340 ccode = stsch(sch->irq, &sch->schib); 351 ccode = stsch(sch->schid, &sch->schib);
341 if (ccode > 0) 352 if (ccode > 0)
342 return 0; 353 return 0;
343 354
344 return 0x80 >> chp; 355 return 0x80 >> chp;
345} 356}
346 357
358static inline int
359s390_process_res_acc_new_sch(struct subchannel_id schid)
360{
361 struct schib schib;
362 int ret;
363 /*
364 * We don't know the device yet, but since a path
365 * may be available now to the device we'll have
366 * to do recognition again.
367 * Since we don't have any idea about which chpid
368 * that beast may be on we'll have to do a stsch
369 * on all devices, grr...
370 */
371 if (stsch_err(schid, &schib))
372 /* We're through */
373 return need_rescan ? -EAGAIN : -ENXIO;
374
375 /* Put it on the slow path. */
376 ret = css_enqueue_subchannel_slow(schid);
377 if (ret) {
378 css_clear_subchannel_slow_list();
379 need_rescan = 1;
380 return -EAGAIN;
381 }
382 return 0;
383}
384
347static int 385static int
348s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) 386__s390_process_res_acc(struct subchannel_id schid, void *data)
349{ 387{
388 int chp_mask, old_lpm;
389 struct res_acc_data *res_data;
350 struct subchannel *sch; 390 struct subchannel *sch;
351 int irq, rc; 391
392 res_data = (struct res_acc_data *)data;
393 sch = get_subchannel_by_schid(schid);
394 if (!sch)
395 /* Check if a subchannel is newly available. */
396 return s390_process_res_acc_new_sch(schid);
397
398 spin_lock_irq(&sch->lock);
399
400 chp_mask = s390_process_res_acc_sch(res_data, sch);
401
402 if (chp_mask == 0) {
403 spin_unlock_irq(&sch->lock);
404 return 0;
405 }
406 old_lpm = sch->lpm;
407 sch->lpm = ((sch->schib.pmcw.pim &
408 sch->schib.pmcw.pam &
409 sch->schib.pmcw.pom)
410 | chp_mask) & sch->opm;
411 if (!old_lpm && sch->lpm)
412 device_trigger_reprobe(sch);
413 else if (sch->driver && sch->driver->verify)
414 sch->driver->verify(&sch->dev);
415
416 spin_unlock_irq(&sch->lock);
417 put_device(&sch->dev);
418 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
419}
420
421
422static int
423s390_process_res_acc (struct res_acc_data *res_data)
424{
425 int rc;
352 char dbf_txt[15]; 426 char dbf_txt[15];
353 427
354 sprintf(dbf_txt, "accpr%x", chpid); 428 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
355 CIO_TRACE_EVENT( 2, dbf_txt); 429 CIO_TRACE_EVENT( 2, dbf_txt);
356 if (fla != 0) { 430 if (res_data->fla != 0) {
357 sprintf(dbf_txt, "fla%x", fla); 431 sprintf(dbf_txt, "fla%x", res_data->fla);
358 CIO_TRACE_EVENT( 2, dbf_txt); 432 CIO_TRACE_EVENT( 2, dbf_txt);
359 } 433 }
360 434
@@ -365,70 +439,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
365 * The more information we have (info), the less scanning 439 * The more information we have (info), the less scanning
366 * will we have to do. 440 * will we have to do.
367 */ 441 */
368 442 rc = for_each_subchannel(__s390_process_res_acc, res_data);
369 if (!get_chp_status(chpid)) 443 if (css_slow_subchannels_exist())
370 return 0; /* no need to do the rest */ 444 rc = -EAGAIN;
371 445 else if (rc != -EAGAIN)
372 rc = 0; 446 rc = 0;
373 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
374 int chp_mask, old_lpm;
375
376 sch = get_subchannel_by_schid(irq);
377 if (!sch) {
378 struct schib schib;
379 int ret;
380 /*
381 * We don't know the device yet, but since a path
382 * may be available now to the device we'll have
383 * to do recognition again.
384 * Since we don't have any idea about which chpid
385 * that beast may be on we'll have to do a stsch
386 * on all devices, grr...
387 */
388 if (stsch(irq, &schib)) {
389 /* We're through */
390 if (need_rescan)
391 rc = -EAGAIN;
392 break;
393 }
394 if (need_rescan) {
395 rc = -EAGAIN;
396 continue;
397 }
398 /* Put it on the slow path. */
399 ret = css_enqueue_subchannel_slow(irq);
400 if (ret) {
401 css_clear_subchannel_slow_list();
402 need_rescan = 1;
403 }
404 rc = -EAGAIN;
405 continue;
406 }
407
408 spin_lock_irq(&sch->lock);
409
410 chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
411
412 if (chp_mask == 0) {
413
414 spin_unlock_irq(&sch->lock);
415 continue;
416 }
417 old_lpm = sch->lpm;
418 sch->lpm = ((sch->schib.pmcw.pim &
419 sch->schib.pmcw.pam &
420 sch->schib.pmcw.pom)
421 | chp_mask) & sch->opm;
422 if (!old_lpm && sch->lpm)
423 device_trigger_reprobe(sch);
424 else if (sch->driver && sch->driver->verify)
425 sch->driver->verify(&sch->dev);
426
427 spin_unlock_irq(&sch->lock);
428 put_device(&sch->dev);
429 if (fla_mask == 0xffff)
430 break;
431 }
432 return rc; 447 return rc;
433} 448}
434 449
@@ -466,6 +481,7 @@ int
466chsc_process_crw(void) 481chsc_process_crw(void)
467{ 482{
468 int chpid, ret; 483 int chpid, ret;
484 struct res_acc_data res_data;
469 struct { 485 struct {
470 struct chsc_header request; 486 struct chsc_header request;
471 u32 reserved1; 487 u32 reserved1;
@@ -499,8 +515,9 @@ chsc_process_crw(void)
499 ret = 0; 515 ret = 0;
500 do { 516 do {
501 int ccode, status; 517 int ccode, status;
518 struct device *dev;
502 memset(sei_area, 0, sizeof(*sei_area)); 519 memset(sei_area, 0, sizeof(*sei_area));
503 520 memset(&res_data, 0, sizeof(struct res_acc_data));
504 sei_area->request = (struct chsc_header) { 521 sei_area->request = (struct chsc_header) {
505 .length = 0x0010, 522 .length = 0x0010,
506 .code = 0x000e, 523 .code = 0x000e,
@@ -573,26 +590,25 @@ chsc_process_crw(void)
573 if (status < 0) 590 if (status < 0)
574 new_channel_path(sei_area->rsid); 591 new_channel_path(sei_area->rsid);
575 else if (!status) 592 else if (!status)
576 return 0; 593 break;
577 if ((sei_area->vf & 0x80) == 0) { 594 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
578 pr_debug("chpid: %x\n", sei_area->rsid); 595 res_data.chp = to_channelpath(dev);
579 ret = s390_process_res_acc(sei_area->rsid, 596 pr_debug("chpid: %x", sei_area->rsid);
580 0, 0); 597 if ((sei_area->vf & 0xc0) != 0) {
581 } else if ((sei_area->vf & 0xc0) == 0x80) { 598 res_data.fla = sei_area->fla;
582 pr_debug("chpid: %x link addr: %x\n", 599 if ((sei_area->vf & 0xc0) == 0xc0) {
583 sei_area->rsid, sei_area->fla); 600 pr_debug(" full link addr: %x",
584 ret = s390_process_res_acc(sei_area->rsid, 601 sei_area->fla);
585 sei_area->fla, 602 res_data.fla_mask = 0xffff;
586 0xff00); 603 } else {
587 } else if ((sei_area->vf & 0xc0) == 0xc0) { 604 pr_debug(" link addr: %x",
588 pr_debug("chpid: %x full link addr: %x\n", 605 sei_area->fla);
589 sei_area->rsid, sei_area->fla); 606 res_data.fla_mask = 0xff00;
590 ret = s390_process_res_acc(sei_area->rsid, 607 }
591 sei_area->fla,
592 0xffff);
593 } 608 }
594 pr_debug("\n"); 609 ret = s390_process_res_acc(&res_data);
595 610 pr_debug("\n\n");
611 put_device(dev);
596 break; 612 break;
597 613
598 default: /* other stuff */ 614 default: /* other stuff */
@@ -604,12 +620,72 @@ chsc_process_crw(void)
604 return ret; 620 return ret;
605} 621}
606 622
623static inline int
624__chp_add_new_sch(struct subchannel_id schid)
625{
626 struct schib schib;
627 int ret;
628
629 if (stsch(schid, &schib))
630 /* We're through */
631 return need_rescan ? -EAGAIN : -ENXIO;
632
633 /* Put it on the slow path. */
634 ret = css_enqueue_subchannel_slow(schid);
635 if (ret) {
636 css_clear_subchannel_slow_list();
637 need_rescan = 1;
638 return -EAGAIN;
639 }
640 return 0;
641}
642
643
607static int 644static int
608chp_add(int chpid) 645__chp_add(struct subchannel_id schid, void *data)
609{ 646{
647 int i;
648 struct channel_path *chp;
610 struct subchannel *sch; 649 struct subchannel *sch;
611 int irq, ret, rc; 650
651 chp = (struct channel_path *)data;
652 sch = get_subchannel_by_schid(schid);
653 if (!sch)
654 /* Check if the subchannel is now available. */
655 return __chp_add_new_sch(schid);
656 spin_lock(&sch->lock);
657 for (i=0; i<8; i++)
658 if (sch->schib.pmcw.chpid[i] == chp->id) {
659 if (stsch(sch->schid, &sch->schib) != 0) {
660 /* Endgame. */
661 spin_unlock(&sch->lock);
662 return -ENXIO;
663 }
664 break;
665 }
666 if (i==8) {
667 spin_unlock(&sch->lock);
668 return 0;
669 }
670 sch->lpm = ((sch->schib.pmcw.pim &
671 sch->schib.pmcw.pam &
672 sch->schib.pmcw.pom)
673 | 0x80 >> i) & sch->opm;
674
675 if (sch->driver && sch->driver->verify)
676 sch->driver->verify(&sch->dev);
677
678 spin_unlock(&sch->lock);
679 put_device(&sch->dev);
680 return 0;
681}
682
683static int
684chp_add(int chpid)
685{
686 int rc;
612 char dbf_txt[15]; 687 char dbf_txt[15];
688 struct device *dev;
613 689
614 if (!get_chp_status(chpid)) 690 if (!get_chp_status(chpid))
615 return 0; /* no need to do the rest */ 691 return 0; /* no need to do the rest */
@@ -617,59 +693,13 @@ chp_add(int chpid)
617 sprintf(dbf_txt, "cadd%x", chpid); 693 sprintf(dbf_txt, "cadd%x", chpid);
618 CIO_TRACE_EVENT(2, dbf_txt); 694 CIO_TRACE_EVENT(2, dbf_txt);
619 695
620 rc = 0; 696 dev = get_device(&css[0]->chps[chpid]->dev);
621 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 697 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
622 int i; 698 if (css_slow_subchannels_exist())
623 699 rc = -EAGAIN;
624 sch = get_subchannel_by_schid(irq); 700 if (rc != -EAGAIN)
625 if (!sch) { 701 rc = 0;
626 struct schib schib; 702 put_device(dev);
627
628 if (stsch(irq, &schib)) {
629 /* We're through */
630 if (need_rescan)
631 rc = -EAGAIN;
632 break;
633 }
634 if (need_rescan) {
635 rc = -EAGAIN;
636 continue;
637 }
638 /* Put it on the slow path. */
639 ret = css_enqueue_subchannel_slow(irq);
640 if (ret) {
641 css_clear_subchannel_slow_list();
642 need_rescan = 1;
643 }
644 rc = -EAGAIN;
645 continue;
646 }
647
648 spin_lock(&sch->lock);
649 for (i=0; i<8; i++)
650 if (sch->schib.pmcw.chpid[i] == chpid) {
651 if (stsch(sch->irq, &sch->schib) != 0) {
652 /* Endgame. */
653 spin_unlock(&sch->lock);
654 return rc;
655 }
656 break;
657 }
658 if (i==8) {
659 spin_unlock(&sch->lock);
660 return rc;
661 }
662 sch->lpm = ((sch->schib.pmcw.pim &
663 sch->schib.pmcw.pam &
664 sch->schib.pmcw.pom)
665 | 0x80 >> i) & sch->opm;
666
667 if (sch->driver && sch->driver->verify)
668 sch->driver->verify(&sch->dev);
669
670 spin_unlock(&sch->lock);
671 put_device(&sch->dev);
672 }
673 return rc; 703 return rc;
674} 704}
675 705
@@ -702,7 +732,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
702 if (!device_is_online(sch)) 732 if (!device_is_online(sch))
703 /* cio could be doing I/O. */ 733 /* cio could be doing I/O. */
704 return 0; 734 return 0;
705 cc = stsch(sch->irq, &sch->schib); 735 cc = stsch(sch->schid, &sch->schib);
706 if (cc) 736 if (cc)
707 return 0; 737 return 0;
708 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 738 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
@@ -743,7 +773,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
743 * just varied off path. Then kill it. 773 * just varied off path. Then kill it.
744 */ 774 */
745 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 775 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
746 if (css_enqueue_subchannel_slow(sch->irq)) { 776 if (css_enqueue_subchannel_slow(sch->schid)) {
747 css_clear_subchannel_slow_list(); 777 css_clear_subchannel_slow_list();
748 need_rescan = 1; 778 need_rescan = 1;
749 } 779 }
@@ -781,6 +811,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data)
781 return 0; 811 return 0;
782} 812}
783 813
814static int
815__s390_vary_chpid_on(struct subchannel_id schid, void *data)
816{
817 struct schib schib;
818 struct subchannel *sch;
819
820 sch = get_subchannel_by_schid(schid);
821 if (sch) {
822 put_device(&sch->dev);
823 return 0;
824 }
825 if (stsch_err(schid, &schib))
826 /* We're through */
827 return -ENXIO;
828 /* Put it on the slow path. */
829 if (css_enqueue_subchannel_slow(schid)) {
830 css_clear_subchannel_slow_list();
831 need_rescan = 1;
832 return -EAGAIN;
833 }
834 return 0;
835}
836
784/* 837/*
785 * Function: s390_vary_chpid 838 * Function: s390_vary_chpid
786 * Varies the specified chpid online or offline 839 * Varies the specified chpid online or offline
@@ -789,8 +842,7 @@ static int
789s390_vary_chpid( __u8 chpid, int on) 842s390_vary_chpid( __u8 chpid, int on)
790{ 843{
791 char dbf_text[15]; 844 char dbf_text[15];
792 int status, irq, ret; 845 int status;
793 struct subchannel *sch;
794 846
795 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 847 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
796 CIO_TRACE_EVENT( 2, dbf_text); 848 CIO_TRACE_EVENT( 2, dbf_text);
@@ -815,30 +867,9 @@ s390_vary_chpid( __u8 chpid, int on)
815 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 867 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
816 s390_subchannel_vary_chpid_on : 868 s390_subchannel_vary_chpid_on :
817 s390_subchannel_vary_chpid_off); 869 s390_subchannel_vary_chpid_off);
818 if (!on) 870 if (on)
819 goto out; 871 /* Scan for new devices on varied on path. */
820 /* Scan for new devices on varied on path. */ 872 for_each_subchannel(__s390_vary_chpid_on, NULL);
821 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
822 struct schib schib;
823
824 if (need_rescan)
825 break;
826 sch = get_subchannel_by_schid(irq);
827 if (sch) {
828 put_device(&sch->dev);
829 continue;
830 }
831 if (stsch(irq, &schib))
832 /* We're through */
833 break;
834 /* Put it on the slow path. */
835 ret = css_enqueue_subchannel_slow(irq);
836 if (ret) {
837 css_clear_subchannel_slow_list();
838 need_rescan = 1;
839 }
840 }
841out:
842 if (need_rescan || css_slow_subchannels_exist()) 873 if (need_rescan || css_slow_subchannels_exist())
843 queue_work(slow_path_wq, &slow_path_work); 874 queue_work(slow_path_wq, &slow_path_work);
844 return 0; 875 return 0;
@@ -995,7 +1026,7 @@ new_channel_path(int chpid)
995 chp->id = chpid; 1026 chp->id = chpid;
996 chp->state = 1; 1027 chp->state = 1;
997 chp->dev = (struct device) { 1028 chp->dev = (struct device) {
998 .parent = &css_bus_device, 1029 .parent = &css[0]->device,
999 .release = chp_release, 1030 .release = chp_release,
1000 }; 1031 };
1001 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1032 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
@@ -1017,7 +1048,7 @@ new_channel_path(int chpid)
1017 device_unregister(&chp->dev); 1048 device_unregister(&chp->dev);
1018 goto out_free; 1049 goto out_free;
1019 } else 1050 } else
1020 chps[chpid] = chp; 1051 css[0]->chps[chpid] = chp;
1021 return ret; 1052 return ret;
1022out_free: 1053out_free:
1023 kfree(chp); 1054 kfree(chp);
@@ -1030,7 +1061,7 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1030 struct channel_path *chp; 1061 struct channel_path *chp;
1031 struct channel_path_desc *desc; 1062 struct channel_path_desc *desc;
1032 1063
1033 chp = chps[sch->schib.pmcw.chpid[chp_no]]; 1064 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1034 if (!chp) 1065 if (!chp)
1035 return NULL; 1066 return NULL;
1036 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1067 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
@@ -1051,6 +1082,54 @@ chsc_alloc_sei_area(void)
1051 return (sei_page ? 0 : -ENOMEM); 1082 return (sei_page ? 0 : -ENOMEM);
1052} 1083}
1053 1084
1085int __init
1086chsc_enable_facility(int operation_code)
1087{
1088 int ret;
1089 struct {
1090 struct chsc_header request;
1091 u8 reserved1:4;
1092 u8 format:4;
1093 u8 reserved2;
1094 u16 operation_code;
1095 u32 reserved3;
1096 u32 reserved4;
1097 u32 operation_data_area[252];
1098 struct chsc_header response;
1099 u32 reserved5:4;
1100 u32 format2:4;
1101 u32 reserved6:24;
1102 } *sda_area;
1103
1104 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1105 if (!sda_area)
1106 return -ENOMEM;
1107 sda_area->request = (struct chsc_header) {
1108 .length = 0x0400,
1109 .code = 0x0031,
1110 };
1111 sda_area->operation_code = operation_code;
1112
1113 ret = chsc(sda_area);
1114 if (ret > 0) {
1115 ret = (ret == 3) ? -ENODEV : -EBUSY;
1116 goto out;
1117 }
1118 switch (sda_area->response.code) {
1119 case 0x0003: /* invalid request block */
1120 case 0x0007:
1121 ret = -EINVAL;
1122 break;
1123 case 0x0004: /* command not provided */
1124 case 0x0101: /* facility not provided */
1125 ret = -EOPNOTSUPP;
1126 break;
1127 }
1128 out:
1129 free_page((unsigned long)sda_area);
1130 return ret;
1131}
1132
1054subsys_initcall(chsc_alloc_sei_area); 1133subsys_initcall(chsc_alloc_sei_area);
1055 1134
1056struct css_general_char css_general_characteristics; 1135struct css_general_char css_general_characteristics;