diff options
Diffstat (limited to 'drivers/s390')
85 files changed, 4640 insertions, 2061 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1a4025683362..1b6c52ef7339 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
995 | now = get_clock(); | 995 | now = get_clock(); |
996 | 996 | ||
997 | DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", | 997 | DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", |
998 | cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), | 998 | cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) | |
999 | (unsigned int) intparm); | 999 | irb->scsw.cmd.dstat), (unsigned int) intparm); |
1000 | 1000 | ||
1001 | /* check for unsolicited interrupts */ | 1001 | /* check for unsolicited interrupts */ |
1002 | cqr = (struct dasd_ccw_req *) intparm; | 1002 | cqr = (struct dasd_ccw_req *) intparm; |
1003 | if (!cqr || ((irb->scsw.cc == 1) && | 1003 | if (!cqr || ((irb->scsw.cmd.cc == 1) && |
1004 | (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && | 1004 | (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && |
1005 | (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { | 1005 | (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) { |
1006 | if (cqr && cqr->status == DASD_CQR_IN_IO) | 1006 | if (cqr && cqr->status == DASD_CQR_IN_IO) |
1007 | cqr->status = DASD_CQR_QUEUED; | 1007 | cqr->status = DASD_CQR_QUEUED; |
1008 | device = dasd_device_from_cdev_locked(cdev); | 1008 | device = dasd_device_from_cdev_locked(cdev); |
@@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1025 | 1025 | ||
1026 | /* Check for clear pending */ | 1026 | /* Check for clear pending */ |
1027 | if (cqr->status == DASD_CQR_CLEAR_PENDING && | 1027 | if (cqr->status == DASD_CQR_CLEAR_PENDING && |
1028 | irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { | 1028 | irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { |
1029 | cqr->status = DASD_CQR_CLEARED; | 1029 | cqr->status = DASD_CQR_CLEARED; |
1030 | dasd_device_clear_timer(device); | 1030 | dasd_device_clear_timer(device); |
1031 | wake_up(&dasd_flush_wq); | 1031 | wake_up(&dasd_flush_wq); |
@@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1041 | return; | 1041 | return; |
1042 | } | 1042 | } |
1043 | DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", | 1043 | DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", |
1044 | ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); | 1044 | ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr); |
1045 | next = NULL; | 1045 | next = NULL; |
1046 | expires = 0; | 1046 | expires = 0; |
1047 | if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && | 1047 | if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && |
1048 | irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { | 1048 | irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) { |
1049 | /* request was completed successfully */ | 1049 | /* request was completed successfully */ |
1050 | cqr->status = DASD_CQR_SUCCESS; | 1050 | cqr->status = DASD_CQR_SUCCESS; |
1051 | cqr->stopclk = now; | 1051 | cqr->stopclk = now; |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index e6700df52df4..5c6e6f331cb0 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | |||
1572 | 1572 | ||
1573 | /* determine the address of the CCW to be restarted */ | 1573 | /* determine the address of the CCW to be restarted */ |
1574 | /* Imprecise ending is not set -> addr from IRB-SCSW */ | 1574 | /* Imprecise ending is not set -> addr from IRB-SCSW */ |
1575 | cpa = default_erp->refers->irb.scsw.cpa; | 1575 | cpa = default_erp->refers->irb.scsw.cmd.cpa; |
1576 | 1576 | ||
1577 | if (cpa == 0) { | 1577 | if (cpa == 0) { |
1578 | 1578 | ||
@@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) | |||
1725 | 1725 | ||
1726 | /* determine the address of the CCW to be restarted */ | 1726 | /* determine the address of the CCW to be restarted */ |
1727 | /* Imprecise ending is not set -> addr from IRB-SCSW */ | 1727 | /* Imprecise ending is not set -> addr from IRB-SCSW */ |
1728 | cpa = previous_erp->irb.scsw.cpa; | 1728 | cpa = previous_erp->irb.scsw.cmd.cpa; |
1729 | 1729 | ||
1730 | if (cpa == 0) { | 1730 | if (cpa == 0) { |
1731 | 1731 | ||
@@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp) | |||
2171 | { | 2171 | { |
2172 | struct dasd_device *device = erp->startdev; | 2172 | struct dasd_device *device = erp->startdev; |
2173 | 2173 | ||
2174 | if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2174 | if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
2175 | | SCHN_STAT_CHN_CTRL_CHK)) { | 2175 | | SCHN_STAT_CHN_CTRL_CHK)) { |
2176 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | 2176 | DEV_MESSAGE(KERN_DEBUG, device, "%s", |
2177 | "channel or interface control check"); | 2177 | "channel or interface control check"); |
@@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) | |||
2352 | 2352 | ||
2353 | if ((cqr1->irb.esw.esw0.erw.cons == 0) && | 2353 | if ((cqr1->irb.esw.esw0.erw.cons == 0) && |
2354 | (cqr2->irb.esw.esw0.erw.cons == 0)) { | 2354 | (cqr2->irb.esw.esw0.erw.cons == 0)) { |
2355 | if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | | 2355 | if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | |
2356 | SCHN_STAT_CHN_CTRL_CHK)) == | 2356 | SCHN_STAT_CHN_CTRL_CHK)) == |
2357 | (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | | 2357 | (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | |
2358 | SCHN_STAT_CHN_CTRL_CHK))) | 2358 | SCHN_STAT_CHN_CTRL_CHK))) |
2359 | return 1; /* match with ifcc*/ | 2359 | return 1; /* match with ifcc*/ |
2360 | } | 2360 | } |
@@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2622 | } | 2622 | } |
2623 | 2623 | ||
2624 | /* double-check if current erp/cqr was successfull */ | 2624 | /* double-check if current erp/cqr was successfull */ |
2625 | if ((cqr->irb.scsw.cstat == 0x00) && | 2625 | if ((cqr->irb.scsw.cmd.cstat == 0x00) && |
2626 | (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { | 2626 | (cqr->irb.scsw.cmd.dstat == |
2627 | (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { | ||
2627 | 2628 | ||
2628 | DEV_MESSAGE(KERN_DEBUG, device, | 2629 | DEV_MESSAGE(KERN_DEBUG, device, |
2629 | "ERP called for successful request %p" | 2630 | "ERP called for successful request %p" |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a0edae091b5e..e0b77210d37a 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1404,13 +1404,14 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, | |||
1404 | 1404 | ||
1405 | /* first of all check for state change pending interrupt */ | 1405 | /* first of all check for state change pending interrupt */ |
1406 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; | 1406 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; |
1407 | if ((irb->scsw.dstat & mask) == mask) { | 1407 | if ((irb->scsw.cmd.dstat & mask) == mask) { |
1408 | dasd_generic_handle_state_change(device); | 1408 | dasd_generic_handle_state_change(device); |
1409 | return; | 1409 | return; |
1410 | } | 1410 | } |
1411 | 1411 | ||
1412 | /* summary unit check */ | 1412 | /* summary unit check */ |
1413 | if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) { | 1413 | if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
1414 | (irb->ecw[7] == 0x0D)) { | ||
1414 | dasd_alias_handle_summary_unit_check(device, irb); | 1415 | dasd_alias_handle_summary_unit_check(device, irb); |
1415 | return; | 1416 | return; |
1416 | } | 1417 | } |
@@ -2068,11 +2069,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, | |||
2068 | device->cdev->dev.bus_id); | 2069 | device->cdev->dev.bus_id); |
2069 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 2070 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
2070 | " in req: %p CS: 0x%02X DS: 0x%02X\n", req, | 2071 | " in req: %p CS: 0x%02X DS: 0x%02X\n", req, |
2071 | irb->scsw.cstat, irb->scsw.dstat); | 2072 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); |
2072 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 2073 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
2073 | " device %s: Failing CCW: %p\n", | 2074 | " device %s: Failing CCW: %p\n", |
2074 | device->cdev->dev.bus_id, | 2075 | device->cdev->dev.bus_id, |
2075 | (void *) (addr_t) irb->scsw.cpa); | 2076 | (void *) (addr_t) irb->scsw.cmd.cpa); |
2076 | if (irb->esw.esw0.erw.cons) { | 2077 | if (irb->esw.esw0.erw.cons) { |
2077 | for (sl = 0; sl < 4; sl++) { | 2078 | for (sl = 0; sl < 4; sl++) { |
2078 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 2079 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
@@ -2122,7 +2123,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, | |||
2122 | /* scsw->cda is either valid or zero */ | 2123 | /* scsw->cda is either valid or zero */ |
2123 | len = 0; | 2124 | len = 0; |
2124 | from = ++to; | 2125 | from = ++to; |
2125 | fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ | 2126 | fail = (struct ccw1 *)(addr_t) |
2127 | irb->scsw.cmd.cpa; /* failing CCW */ | ||
2126 | if (from < fail - 2) { | 2128 | if (from < fail - 2) { |
2127 | from = fail - 2; /* there is a gap - print header */ | 2129 | from = fail - 2; /* there is a gap - print header */ |
2128 | len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); | 2130 | len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 6e53ab606e97..29da4413ad43 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/poll.h> | 16 | #include <linux/poll.h> |
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/smp_lock.h> | ||
18 | 19 | ||
19 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
20 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
@@ -525,6 +526,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp) | |||
525 | eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); | 526 | eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); |
526 | if (!eerb) | 527 | if (!eerb) |
527 | return -ENOMEM; | 528 | return -ENOMEM; |
529 | lock_kernel(); | ||
528 | eerb->buffer_page_count = eer_pages; | 530 | eerb->buffer_page_count = eer_pages; |
529 | if (eerb->buffer_page_count < 1 || | 531 | if (eerb->buffer_page_count < 1 || |
530 | eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { | 532 | eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { |
@@ -532,6 +534,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp) | |||
532 | MESSAGE(KERN_WARNING, "can't open device since module " | 534 | MESSAGE(KERN_WARNING, "can't open device since module " |
533 | "parameter eer_pages is smaller then 1 or" | 535 | "parameter eer_pages is smaller then 1 or" |
534 | " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); | 536 | " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); |
537 | unlock_kernel(); | ||
535 | return -EINVAL; | 538 | return -EINVAL; |
536 | } | 539 | } |
537 | eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; | 540 | eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; |
@@ -539,12 +542,14 @@ static int dasd_eer_open(struct inode *inp, struct file *filp) | |||
539 | GFP_KERNEL); | 542 | GFP_KERNEL); |
540 | if (!eerb->buffer) { | 543 | if (!eerb->buffer) { |
541 | kfree(eerb); | 544 | kfree(eerb); |
545 | unlock_kernel(); | ||
542 | return -ENOMEM; | 546 | return -ENOMEM; |
543 | } | 547 | } |
544 | if (dasd_eer_allocate_buffer_pages(eerb->buffer, | 548 | if (dasd_eer_allocate_buffer_pages(eerb->buffer, |
545 | eerb->buffer_page_count)) { | 549 | eerb->buffer_page_count)) { |
546 | kfree(eerb->buffer); | 550 | kfree(eerb->buffer); |
547 | kfree(eerb); | 551 | kfree(eerb); |
552 | unlock_kernel(); | ||
548 | return -ENOMEM; | 553 | return -ENOMEM; |
549 | } | 554 | } |
550 | filp->private_data = eerb; | 555 | filp->private_data = eerb; |
@@ -552,6 +557,7 @@ static int dasd_eer_open(struct inode *inp, struct file *filp) | |||
552 | list_add(&eerb->list, &bufferlist); | 557 | list_add(&eerb->list, &bufferlist); |
553 | spin_unlock_irqrestore(&bufferlock, flags); | 558 | spin_unlock_irqrestore(&bufferlock, flags); |
554 | 559 | ||
560 | unlock_kernel(); | ||
555 | return nonseekable_open(inp,filp); | 561 | return nonseekable_open(inp,filp); |
556 | } | 562 | } |
557 | 563 | ||
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 116611583df8..aee4656127f7 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -222,7 +222,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, | |||
222 | 222 | ||
223 | /* first of all check for state change pending interrupt */ | 223 | /* first of all check for state change pending interrupt */ |
224 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; | 224 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; |
225 | if ((irb->scsw.dstat & mask) == mask) { | 225 | if ((irb->scsw.cmd.dstat & mask) == mask) { |
226 | dasd_generic_handle_state_change(device); | 226 | dasd_generic_handle_state_change(device); |
227 | return; | 227 | return; |
228 | } | 228 | } |
@@ -449,11 +449,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | |||
449 | device->cdev->dev.bus_id); | 449 | device->cdev->dev.bus_id); |
450 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 450 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
451 | " in req: %p CS: 0x%02X DS: 0x%02X\n", req, | 451 | " in req: %p CS: 0x%02X DS: 0x%02X\n", req, |
452 | irb->scsw.cstat, irb->scsw.dstat); | 452 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); |
453 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 453 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
454 | " device %s: Failing CCW: %p\n", | 454 | " device %s: Failing CCW: %p\n", |
455 | device->cdev->dev.bus_id, | 455 | device->cdev->dev.bus_id, |
456 | (void *) (addr_t) irb->scsw.cpa); | 456 | (void *) (addr_t) irb->scsw.cmd.cpa); |
457 | if (irb->esw.esw0.erw.cons) { | 457 | if (irb->esw.esw0.erw.cons) { |
458 | for (sl = 0; sl < 4; sl++) { | 458 | for (sl = 0; sl < 4; sl++) { |
459 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 459 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
@@ -498,11 +498,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | |||
498 | 498 | ||
499 | /* print failing CCW area */ | 499 | /* print failing CCW area */ |
500 | len = 0; | 500 | len = 0; |
501 | if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) { | 501 | if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { |
502 | act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2; | 502 | act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; |
503 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); | 503 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); |
504 | } | 504 | } |
505 | end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last); | 505 | end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); |
506 | while (act <= end) { | 506 | while (act <= end) { |
507 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 507 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
508 | " CCW %p: %08X %08X DAT:", | 508 | " CCW %p: %08X %08X DAT:", |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index bb52d2fbac18..01fcdd91b846 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
167 | struct dcssblk_dev_info *dev_info; | 167 | struct dcssblk_dev_info *dev_info; |
168 | int rc; | 168 | int rc; |
169 | 169 | ||
170 | if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { | 170 | if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) |
171 | PRINT_WARN("Invalid value, must be 0 or 1\n"); | ||
172 | return -EINVAL; | 171 | return -EINVAL; |
173 | } | ||
174 | down_write(&dcssblk_devices_sem); | 172 | down_write(&dcssblk_devices_sem); |
175 | dev_info = container_of(dev, struct dcssblk_dev_info, dev); | 173 | dev_info = container_of(dev, struct dcssblk_dev_info, dev); |
176 | if (atomic_read(&dev_info->use_count)) { | 174 | if (atomic_read(&dev_info->use_count)) { |
@@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
215 | set_disk_ro(dev_info->gd, 0); | 213 | set_disk_ro(dev_info->gd, 0); |
216 | } | 214 | } |
217 | } else { | 215 | } else { |
218 | PRINT_WARN("Invalid value, must be 0 or 1\n"); | ||
219 | rc = -EINVAL; | 216 | rc = -EINVAL; |
220 | goto out; | 217 | goto out; |
221 | } | 218 | } |
@@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
258 | { | 255 | { |
259 | struct dcssblk_dev_info *dev_info; | 256 | struct dcssblk_dev_info *dev_info; |
260 | 257 | ||
261 | if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { | 258 | if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) |
262 | PRINT_WARN("Invalid value, must be 0 or 1\n"); | ||
263 | return -EINVAL; | 259 | return -EINVAL; |
264 | } | ||
265 | dev_info = container_of(dev, struct dcssblk_dev_info, dev); | 260 | dev_info = container_of(dev, struct dcssblk_dev_info, dev); |
266 | 261 | ||
267 | down_write(&dcssblk_devices_sem); | 262 | down_write(&dcssblk_devices_sem); |
@@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
289 | } | 284 | } |
290 | } else { | 285 | } else { |
291 | up_write(&dcssblk_devices_sem); | 286 | up_write(&dcssblk_devices_sem); |
292 | PRINT_WARN("Invalid value, must be 0 or 1\n"); | ||
293 | return -EINVAL; | 287 | return -EINVAL; |
294 | } | 288 | } |
295 | up_write(&dcssblk_devices_sem); | 289 | up_write(&dcssblk_devices_sem); |
@@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
441 | goto out; | 435 | goto out; |
442 | 436 | ||
443 | unregister_dev: | 437 | unregister_dev: |
444 | PRINT_ERR("device_create_file() failed!\n"); | ||
445 | list_del(&dev_info->lh); | 438 | list_del(&dev_info->lh); |
446 | blk_cleanup_queue(dev_info->dcssblk_queue); | 439 | blk_cleanup_queue(dev_info->dcssblk_queue); |
447 | dev_info->gd->queue = NULL; | 440 | dev_info->gd->queue = NULL; |
@@ -702,10 +695,8 @@ dcssblk_check_params(void) | |||
702 | static void __exit | 695 | static void __exit |
703 | dcssblk_exit(void) | 696 | dcssblk_exit(void) |
704 | { | 697 | { |
705 | PRINT_DEBUG("DCSSBLOCK EXIT...\n"); | ||
706 | s390_root_dev_unregister(dcssblk_root_dev); | 698 | s390_root_dev_unregister(dcssblk_root_dev); |
707 | unregister_blkdev(dcssblk_major, DCSSBLK_NAME); | 699 | unregister_blkdev(dcssblk_major, DCSSBLK_NAME); |
708 | PRINT_DEBUG("...finished!\n"); | ||
709 | } | 700 | } |
710 | 701 | ||
711 | static int __init | 702 | static int __init |
@@ -713,27 +704,21 @@ dcssblk_init(void) | |||
713 | { | 704 | { |
714 | int rc; | 705 | int rc; |
715 | 706 | ||
716 | PRINT_DEBUG("DCSSBLOCK INIT...\n"); | ||
717 | dcssblk_root_dev = s390_root_dev_register("dcssblk"); | 707 | dcssblk_root_dev = s390_root_dev_register("dcssblk"); |
718 | if (IS_ERR(dcssblk_root_dev)) { | 708 | if (IS_ERR(dcssblk_root_dev)) |
719 | PRINT_ERR("device_register() failed!\n"); | ||
720 | return PTR_ERR(dcssblk_root_dev); | 709 | return PTR_ERR(dcssblk_root_dev); |
721 | } | ||
722 | rc = device_create_file(dcssblk_root_dev, &dev_attr_add); | 710 | rc = device_create_file(dcssblk_root_dev, &dev_attr_add); |
723 | if (rc) { | 711 | if (rc) { |
724 | PRINT_ERR("device_create_file(add) failed!\n"); | ||
725 | s390_root_dev_unregister(dcssblk_root_dev); | 712 | s390_root_dev_unregister(dcssblk_root_dev); |
726 | return rc; | 713 | return rc; |
727 | } | 714 | } |
728 | rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); | 715 | rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); |
729 | if (rc) { | 716 | if (rc) { |
730 | PRINT_ERR("device_create_file(remove) failed!\n"); | ||
731 | s390_root_dev_unregister(dcssblk_root_dev); | 717 | s390_root_dev_unregister(dcssblk_root_dev); |
732 | return rc; | 718 | return rc; |
733 | } | 719 | } |
734 | rc = register_blkdev(0, DCSSBLK_NAME); | 720 | rc = register_blkdev(0, DCSSBLK_NAME); |
735 | if (rc < 0) { | 721 | if (rc < 0) { |
736 | PRINT_ERR("Can't get dynamic major!\n"); | ||
737 | s390_root_dev_unregister(dcssblk_root_dev); | 722 | s390_root_dev_unregister(dcssblk_root_dev); |
738 | return rc; | 723 | return rc; |
739 | } | 724 | } |
@@ -742,7 +727,6 @@ dcssblk_init(void) | |||
742 | 727 | ||
743 | dcssblk_check_params(); | 728 | dcssblk_check_params(); |
744 | 729 | ||
745 | PRINT_DEBUG("...finished!\n"); | ||
746 | return 0; | 730 | return 0; |
747 | } | 731 | } |
748 | 732 | ||
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index f231bc21b1ca..dd9b986389a2 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) | |||
100 | : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); | 100 | : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); |
101 | if (cc == 3) | 101 | if (cc == 3) |
102 | return -ENXIO; | 102 | return -ENXIO; |
103 | if (cc == 2) { | 103 | if (cc == 2) |
104 | PRINT_ERR("expanded storage lost!\n"); | ||
105 | return -ENXIO; | 104 | return -ENXIO; |
106 | } | 105 | if (cc == 1) |
107 | if (cc == 1) { | ||
108 | PRINT_ERR("page in failed for page index %u.\n", | ||
109 | xpage_index); | ||
110 | return -EIO; | 106 | return -EIO; |
111 | } | ||
112 | return 0; | 107 | return 0; |
113 | } | 108 | } |
114 | 109 | ||
@@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) | |||
135 | : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); | 130 | : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); |
136 | if (cc == 3) | 131 | if (cc == 3) |
137 | return -ENXIO; | 132 | return -ENXIO; |
138 | if (cc == 2) { | 133 | if (cc == 2) |
139 | PRINT_ERR("expanded storage lost!\n"); | ||
140 | return -ENXIO; | 134 | return -ENXIO; |
141 | } | 135 | if (cc == 1) |
142 | if (cc == 1) { | ||
143 | PRINT_ERR("page out failed for page index %u.\n", | ||
144 | xpage_index); | ||
145 | return -EIO; | 136 | return -EIO; |
146 | } | ||
147 | return 0; | 137 | return 0; |
148 | } | 138 | } |
149 | 139 | ||
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 3e5653c92f4b..d3ec9b55ab35 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -93,9 +93,6 @@ struct raw3215_info { | |||
93 | struct raw3215_req *queued_write;/* pointer to queued write requests */ | 93 | struct raw3215_req *queued_write;/* pointer to queued write requests */ |
94 | wait_queue_head_t empty_wait; /* wait queue for flushing */ | 94 | wait_queue_head_t empty_wait; /* wait queue for flushing */ |
95 | struct timer_list timer; /* timer for delayed output */ | 95 | struct timer_list timer; /* timer for delayed output */ |
96 | char *message; /* pending message from raw3215_irq */ | ||
97 | int msg_dstat; /* dstat for pending message */ | ||
98 | int msg_cstat; /* cstat for pending message */ | ||
99 | int line_pos; /* position on the line (for tabs) */ | 96 | int line_pos; /* position on the line (for tabs) */ |
100 | char ubuffer[80]; /* copy_from_user buffer */ | 97 | char ubuffer[80]; /* copy_from_user buffer */ |
101 | }; | 98 | }; |
@@ -359,11 +356,6 @@ raw3215_tasklet(void *data) | |||
359 | raw3215_mk_write_req(raw); | 356 | raw3215_mk_write_req(raw); |
360 | raw3215_try_io(raw); | 357 | raw3215_try_io(raw); |
361 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | 358 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); |
362 | /* Check for pending message from raw3215_irq */ | ||
363 | if (raw->message != NULL) { | ||
364 | printk(raw->message, raw->msg_dstat, raw->msg_cstat); | ||
365 | raw->message = NULL; | ||
366 | } | ||
367 | tty = raw->tty; | 359 | tty = raw->tty; |
368 | if (tty != NULL && | 360 | if (tty != NULL && |
369 | RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { | 361 | RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { |
@@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
381 | struct raw3215_req *req; | 373 | struct raw3215_req *req; |
382 | struct tty_struct *tty; | 374 | struct tty_struct *tty; |
383 | int cstat, dstat; | 375 | int cstat, dstat; |
384 | int count, slen; | 376 | int count; |
385 | 377 | ||
386 | raw = cdev->dev.driver_data; | 378 | raw = cdev->dev.driver_data; |
387 | req = (struct raw3215_req *) intparm; | 379 | req = (struct raw3215_req *) intparm; |
388 | cstat = irb->scsw.cstat; | 380 | cstat = irb->scsw.cmd.cstat; |
389 | dstat = irb->scsw.dstat; | 381 | dstat = irb->scsw.cmd.dstat; |
390 | if (cstat != 0) { | 382 | if (cstat != 0) |
391 | raw->message = KERN_WARNING | ||
392 | "Got nonzero channel status in raw3215_irq " | ||
393 | "(dev sts 0x%2x, sch sts 0x%2x)"; | ||
394 | raw->msg_dstat = dstat; | ||
395 | raw->msg_cstat = cstat; | ||
396 | tasklet_schedule(&raw->tasklet); | 383 | tasklet_schedule(&raw->tasklet); |
397 | } | ||
398 | if (dstat & 0x01) { /* we got a unit exception */ | 384 | if (dstat & 0x01) { /* we got a unit exception */ |
399 | dstat &= ~0x01; /* we can ignore it */ | 385 | dstat &= ~0x01; /* we can ignore it */ |
400 | } | 386 | } |
@@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
404 | break; | 390 | break; |
405 | /* Attention interrupt, someone hit the enter key */ | 391 | /* Attention interrupt, someone hit the enter key */ |
406 | raw3215_mk_read_req(raw); | 392 | raw3215_mk_read_req(raw); |
407 | if (MACHINE_IS_P390) | ||
408 | memset(raw->inbuf, 0, RAW3215_INBUF_SIZE); | ||
409 | tasklet_schedule(&raw->tasklet); | 393 | tasklet_schedule(&raw->tasklet); |
410 | break; | 394 | break; |
411 | case 0x08: | 395 | case 0x08: |
@@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
415 | return; /* That shouldn't happen ... */ | 399 | return; /* That shouldn't happen ... */ |
416 | if (req->type == RAW3215_READ) { | 400 | if (req->type == RAW3215_READ) { |
417 | /* store residual count, then wait for device end */ | 401 | /* store residual count, then wait for device end */ |
418 | req->residual = irb->scsw.count; | 402 | req->residual = irb->scsw.cmd.count; |
419 | } | 403 | } |
420 | if (dstat == 0x08) | 404 | if (dstat == 0x08) |
421 | break; | 405 | break; |
@@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
428 | 412 | ||
429 | tty = raw->tty; | 413 | tty = raw->tty; |
430 | count = 160 - req->residual; | 414 | count = 160 - req->residual; |
431 | if (MACHINE_IS_P390) { | ||
432 | slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE); | ||
433 | if (count > slen) | ||
434 | count = slen; | ||
435 | } else | ||
436 | EBCASC(raw->inbuf, count); | 415 | EBCASC(raw->inbuf, count); |
437 | cchar = ctrlchar_handle(raw->inbuf, count, tty); | 416 | cchar = ctrlchar_handle(raw->inbuf, count, tty); |
438 | switch (cchar & CTRLCHAR_MASK) { | 417 | switch (cchar & CTRLCHAR_MASK) { |
@@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
481 | raw->flags &= ~RAW3215_WORKING; | 460 | raw->flags &= ~RAW3215_WORKING; |
482 | raw3215_free_req(req); | 461 | raw3215_free_req(req); |
483 | } | 462 | } |
484 | raw->message = KERN_WARNING | ||
485 | "Spurious interrupt in in raw3215_irq " | ||
486 | "(dev sts 0x%2x, sch sts 0x%2x)"; | ||
487 | raw->msg_dstat = dstat; | ||
488 | raw->msg_cstat = cstat; | ||
489 | tasklet_schedule(&raw->tasklet); | 463 | tasklet_schedule(&raw->tasklet); |
490 | } | 464 | } |
491 | return; | 465 | return; |
@@ -883,7 +857,6 @@ con3215_init(void) | |||
883 | free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); | 857 | free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); |
884 | free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); | 858 | free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); |
885 | raw3215[0] = NULL; | 859 | raw3215[0] = NULL; |
886 | printk("Couldn't find a 3215 console device\n"); | ||
887 | return -ENODEV; | 860 | return -ENODEV; |
888 | } | 861 | } |
889 | register_console(&con3215); | 862 | register_console(&con3215); |
@@ -1157,7 +1130,6 @@ tty3215_init(void) | |||
1157 | tty_set_operations(driver, &tty3215_ops); | 1130 | tty_set_operations(driver, &tty3215_ops); |
1158 | ret = tty_register_driver(driver); | 1131 | ret = tty_register_driver(driver); |
1159 | if (ret) { | 1132 | if (ret) { |
1160 | printk("Couldn't register tty3215 driver\n"); | ||
1161 | put_tty_driver(driver); | 1133 | put_tty_driver(driver); |
1162 | return ret; | 1134 | return ret; |
1163 | } | 1135 | } |
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 0b040557db02..3c07974886ed 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
@@ -411,15 +411,15 @@ static int | |||
411 | con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) | 411 | con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) |
412 | { | 412 | { |
413 | /* Handle ATTN. Schedule tasklet to read aid. */ | 413 | /* Handle ATTN. Schedule tasklet to read aid. */ |
414 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) | 414 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) |
415 | con3270_issue_read(cp); | 415 | con3270_issue_read(cp); |
416 | 416 | ||
417 | if (rq) { | 417 | if (rq) { |
418 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 418 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
419 | rq->rc = -EIO; | 419 | rq->rc = -EIO; |
420 | else | 420 | else |
421 | /* Normal end. Copy residual count. */ | 421 | /* Normal end. Copy residual count. */ |
422 | rq->rescnt = irb->scsw.count; | 422 | rq->rescnt = irb->scsw.cmd.count; |
423 | } | 423 | } |
424 | return RAW3270_IO_DONE; | 424 | return RAW3270_IO_DONE; |
425 | } | 425 | } |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index ef36f2132aa4..d18e6d2e0b49 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/smp_lock.h> | ||
17 | 18 | ||
18 | #include <asm/ccwdev.h> | 19 | #include <asm/ccwdev.h> |
19 | #include <asm/cio.h> | 20 | #include <asm/cio.h> |
@@ -216,17 +217,17 @@ static int | |||
216 | fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) | 217 | fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) |
217 | { | 218 | { |
218 | /* Handle ATTN. Set indication and wake waiters for attention. */ | 219 | /* Handle ATTN. Set indication and wake waiters for attention. */ |
219 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 220 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
220 | fp->attention = 1; | 221 | fp->attention = 1; |
221 | wake_up(&fp->wait); | 222 | wake_up(&fp->wait); |
222 | } | 223 | } |
223 | 224 | ||
224 | if (rq) { | 225 | if (rq) { |
225 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 226 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
226 | rq->rc = -EIO; | 227 | rq->rc = -EIO; |
227 | else | 228 | else |
228 | /* Normal end. Copy residual count. */ | 229 | /* Normal end. Copy residual count. */ |
229 | rq->rescnt = irb->scsw.count; | 230 | rq->rescnt = irb->scsw.cmd.count; |
230 | } | 231 | } |
231 | return RAW3270_IO_DONE; | 232 | return RAW3270_IO_DONE; |
232 | } | 233 | } |
@@ -421,6 +422,7 @@ fs3270_open(struct inode *inode, struct file *filp) | |||
421 | 422 | ||
422 | if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR) | 423 | if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR) |
423 | return -ENODEV; | 424 | return -ENODEV; |
425 | lock_kernel(); | ||
424 | minor = iminor(filp->f_path.dentry->d_inode); | 426 | minor = iminor(filp->f_path.dentry->d_inode); |
425 | /* Check for minor 0 multiplexer. */ | 427 | /* Check for minor 0 multiplexer. */ |
426 | if (minor == 0) { | 428 | if (minor == 0) { |
@@ -429,7 +431,8 @@ fs3270_open(struct inode *inode, struct file *filp) | |||
429 | tty = get_current_tty(); | 431 | tty = get_current_tty(); |
430 | if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) { | 432 | if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) { |
431 | mutex_unlock(&tty_mutex); | 433 | mutex_unlock(&tty_mutex); |
432 | return -ENODEV; | 434 | rc = -ENODEV; |
435 | goto out; | ||
433 | } | 436 | } |
434 | minor = tty->index + RAW3270_FIRSTMINOR; | 437 | minor = tty->index + RAW3270_FIRSTMINOR; |
435 | mutex_unlock(&tty_mutex); | 438 | mutex_unlock(&tty_mutex); |
@@ -438,19 +441,22 @@ fs3270_open(struct inode *inode, struct file *filp) | |||
438 | fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); | 441 | fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); |
439 | if (!IS_ERR(fp)) { | 442 | if (!IS_ERR(fp)) { |
440 | raw3270_put_view(&fp->view); | 443 | raw3270_put_view(&fp->view); |
441 | return -EBUSY; | 444 | rc = -EBUSY; |
445 | goto out; | ||
442 | } | 446 | } |
443 | /* Allocate fullscreen view structure. */ | 447 | /* Allocate fullscreen view structure. */ |
444 | fp = fs3270_alloc_view(); | 448 | fp = fs3270_alloc_view(); |
445 | if (IS_ERR(fp)) | 449 | if (IS_ERR(fp)) { |
446 | return PTR_ERR(fp); | 450 | rc = PTR_ERR(fp); |
451 | goto out; | ||
452 | } | ||
447 | 453 | ||
448 | init_waitqueue_head(&fp->wait); | 454 | init_waitqueue_head(&fp->wait); |
449 | fp->fs_pid = get_pid(task_pid(current)); | 455 | fp->fs_pid = get_pid(task_pid(current)); |
450 | rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); | 456 | rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); |
451 | if (rc) { | 457 | if (rc) { |
452 | fs3270_free_view(&fp->view); | 458 | fs3270_free_view(&fp->view); |
453 | return rc; | 459 | goto out; |
454 | } | 460 | } |
455 | 461 | ||
456 | /* Allocate idal-buffer. */ | 462 | /* Allocate idal-buffer. */ |
@@ -458,7 +464,8 @@ fs3270_open(struct inode *inode, struct file *filp) | |||
458 | if (IS_ERR(ib)) { | 464 | if (IS_ERR(ib)) { |
459 | raw3270_put_view(&fp->view); | 465 | raw3270_put_view(&fp->view); |
460 | raw3270_del_view(&fp->view); | 466 | raw3270_del_view(&fp->view); |
461 | return PTR_ERR(fp); | 467 | rc = PTR_ERR(fp); |
468 | goto out; | ||
462 | } | 469 | } |
463 | fp->rdbuf = ib; | 470 | fp->rdbuf = ib; |
464 | 471 | ||
@@ -466,9 +473,11 @@ fs3270_open(struct inode *inode, struct file *filp) | |||
466 | if (rc) { | 473 | if (rc) { |
467 | raw3270_put_view(&fp->view); | 474 | raw3270_put_view(&fp->view); |
468 | raw3270_del_view(&fp->view); | 475 | raw3270_del_view(&fp->view); |
469 | return rc; | 476 | goto out; |
470 | } | 477 | } |
471 | filp->private_data = fp; | 478 | filp->private_data = fp; |
479 | out: | ||
480 | unlock_kernel(); | ||
472 | return 0; | 481 | return 0; |
473 | } | 482 | } |
474 | 483 | ||
@@ -512,11 +521,8 @@ fs3270_init(void) | |||
512 | int rc; | 521 | int rc; |
513 | 522 | ||
514 | rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); | 523 | rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); |
515 | if (rc) { | 524 | if (rc) |
516 | printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n", | ||
517 | IBM_FS3270_MAJOR, rc); | ||
518 | return rc; | 525 | return rc; |
519 | } | ||
520 | return 0; | 526 | return 0; |
521 | } | 527 | } |
522 | 528 | ||
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 1e1f50655bbf..35fd8dfcaaa6 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -3,14 +3,14 @@ | |||
3 | * | 3 | * |
4 | * Character device driver for reading z/VM *MONITOR service records. | 4 | * Character device driver for reading z/VM *MONITOR service records. |
5 | * | 5 | * |
6 | * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. | 6 | * Copyright IBM Corp. 2004, 2008 |
7 | * | 7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
8 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | ||
9 | */ | 8 | */ |
10 | 9 | ||
11 | #include <linux/module.h> | 10 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 11 | #include <linux/moduleparam.h> |
13 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/smp_lock.h> | ||
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -18,12 +18,11 @@ | |||
18 | #include <linux/ctype.h> | 18 | #include <linux/ctype.h> |
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/poll.h> | ||
22 | #include <net/iucv/iucv.h> | ||
21 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
22 | #include <asm/ebcdic.h> | 24 | #include <asm/ebcdic.h> |
23 | #include <asm/extmem.h> | 25 | #include <asm/extmem.h> |
24 | #include <linux/poll.h> | ||
25 | #include <net/iucv/iucv.h> | ||
26 | |||
27 | 26 | ||
28 | //#define MON_DEBUG /* Debug messages on/off */ | 27 | //#define MON_DEBUG /* Debug messages on/off */ |
29 | 28 | ||
@@ -152,10 +151,7 @@ static int mon_check_mca(struct mon_msg *monmsg) | |||
152 | (mon_mca_end(monmsg) > mon_dcss_end) || | 151 | (mon_mca_end(monmsg) > mon_dcss_end) || |
153 | (mon_mca_start(monmsg) < mon_dcss_start) || | 152 | (mon_mca_start(monmsg) < mon_dcss_start) || |
154 | ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) | 153 | ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) |
155 | { | ||
156 | P_DEBUG("READ, IGNORED INVALID MCA\n\n"); | ||
157 | return -EINVAL; | 154 | return -EINVAL; |
158 | } | ||
159 | return 0; | 155 | return 0; |
160 | } | 156 | } |
161 | 157 | ||
@@ -164,10 +160,6 @@ static int mon_send_reply(struct mon_msg *monmsg, | |||
164 | { | 160 | { |
165 | int rc; | 161 | int rc; |
166 | 162 | ||
167 | P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " | ||
168 | "0x%08X\n\n", | ||
169 | monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); | ||
170 | |||
171 | rc = iucv_message_reply(monpriv->path, &monmsg->msg, | 163 | rc = iucv_message_reply(monpriv->path, &monmsg->msg, |
172 | IUCV_IPRMDATA, NULL, 0); | 164 | IUCV_IPRMDATA, NULL, 0); |
173 | atomic_dec(&monpriv->msglim_count); | 165 | atomic_dec(&monpriv->msglim_count); |
@@ -202,15 +194,12 @@ static struct mon_private *mon_alloc_mem(void) | |||
202 | struct mon_private *monpriv; | 194 | struct mon_private *monpriv; |
203 | 195 | ||
204 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); | 196 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); |
205 | if (!monpriv) { | 197 | if (!monpriv) |
206 | P_ERROR("no memory for monpriv\n"); | ||
207 | return NULL; | 198 | return NULL; |
208 | } | ||
209 | for (i = 0; i < MON_MSGLIM; i++) { | 199 | for (i = 0; i < MON_MSGLIM; i++) { |
210 | monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), | 200 | monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), |
211 | GFP_KERNEL); | 201 | GFP_KERNEL); |
212 | if (!monpriv->msg_array[i]) { | 202 | if (!monpriv->msg_array[i]) { |
213 | P_ERROR("open, no memory for msg_array\n"); | ||
214 | mon_free_mem(monpriv); | 203 | mon_free_mem(monpriv); |
215 | return NULL; | 204 | return NULL; |
216 | } | 205 | } |
@@ -218,41 +207,10 @@ static struct mon_private *mon_alloc_mem(void) | |||
218 | return monpriv; | 207 | return monpriv; |
219 | } | 208 | } |
220 | 209 | ||
221 | static inline void mon_read_debug(struct mon_msg *monmsg, | ||
222 | struct mon_private *monpriv) | ||
223 | { | ||
224 | #ifdef MON_DEBUG | ||
225 | u8 msg_type[2], mca_type; | ||
226 | unsigned long records_len; | ||
227 | |||
228 | records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; | ||
229 | |||
230 | memcpy(msg_type, &monmsg->msg.class, 2); | ||
231 | EBCASC(msg_type, 2); | ||
232 | mca_type = mon_mca_type(monmsg, 0); | ||
233 | EBCASC(&mca_type, 1); | ||
234 | |||
235 | P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", | ||
236 | monpriv->read_index, monpriv->write_index); | ||
237 | P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", | ||
238 | monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); | ||
239 | P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", | ||
240 | msg_type[0], msg_type[1], mca_type ? mca_type : 'X', | ||
241 | mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); | ||
242 | P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n", | ||
243 | mon_mca_start(monmsg), mon_mca_end(monmsg)); | ||
244 | P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n", | ||
245 | mon_rec_start(monmsg), mon_rec_end(monmsg), records_len); | ||
246 | if (mon_mca_size(monmsg) > 12) | ||
247 | P_DEBUG("READ, MORE THAN ONE MCA\n\n"); | ||
248 | #endif | ||
249 | } | ||
250 | |||
251 | static inline void mon_next_mca(struct mon_msg *monmsg) | 210 | static inline void mon_next_mca(struct mon_msg *monmsg) |
252 | { | 211 | { |
253 | if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) | 212 | if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) |
254 | return; | 213 | return; |
255 | P_DEBUG("READ, NEXT MCA\n\n"); | ||
256 | monmsg->mca_offset += 12; | 214 | monmsg->mca_offset += 12; |
257 | monmsg->pos = 0; | 215 | monmsg->pos = 0; |
258 | } | 216 | } |
@@ -269,7 +227,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv) | |||
269 | monmsg->msglim_reached = 0; | 227 | monmsg->msglim_reached = 0; |
270 | monmsg->pos = 0; | 228 | monmsg->pos = 0; |
271 | monmsg->mca_offset = 0; | 229 | monmsg->mca_offset = 0; |
272 | P_WARNING("read, message limit reached\n"); | ||
273 | monpriv->read_index = (monpriv->read_index + 1) % | 230 | monpriv->read_index = (monpriv->read_index + 1) % |
274 | MON_MSGLIM; | 231 | MON_MSGLIM; |
275 | atomic_dec(&monpriv->read_ready); | 232 | atomic_dec(&monpriv->read_ready); |
@@ -286,10 +243,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) | |||
286 | { | 243 | { |
287 | struct mon_private *monpriv = path->private; | 244 | struct mon_private *monpriv = path->private; |
288 | 245 | ||
289 | P_DEBUG("IUCV connection completed\n"); | ||
290 | P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " | ||
291 | "0x%02X, Sample = 0x%02X\n", | ||
292 | ipuser[0], ipuser[1], ipuser[2]); | ||
293 | atomic_set(&monpriv->iucv_connected, 1); | 246 | atomic_set(&monpriv->iucv_connected, 1); |
294 | wake_up(&mon_conn_wait_queue); | 247 | wake_up(&mon_conn_wait_queue); |
295 | } | 248 | } |
@@ -310,7 +263,6 @@ static void mon_iucv_message_pending(struct iucv_path *path, | |||
310 | { | 263 | { |
311 | struct mon_private *monpriv = path->private; | 264 | struct mon_private *monpriv = path->private; |
312 | 265 | ||
313 | P_DEBUG("IUCV message pending\n"); | ||
314 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, | 266 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, |
315 | msg, sizeof(*msg)); | 267 | msg, sizeof(*msg)); |
316 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { | 268 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { |
@@ -340,6 +292,7 @@ static int mon_open(struct inode *inode, struct file *filp) | |||
340 | /* | 292 | /* |
341 | * only one user allowed | 293 | * only one user allowed |
342 | */ | 294 | */ |
295 | lock_kernel(); | ||
343 | rc = -EBUSY; | 296 | rc = -EBUSY; |
344 | if (test_and_set_bit(MON_IN_USE, &mon_in_use)) | 297 | if (test_and_set_bit(MON_IN_USE, &mon_in_use)) |
345 | goto out; | 298 | goto out; |
@@ -375,8 +328,8 @@ static int mon_open(struct inode *inode, struct file *filp) | |||
375 | rc = -EIO; | 328 | rc = -EIO; |
376 | goto out_path; | 329 | goto out_path; |
377 | } | 330 | } |
378 | P_INFO("open, established connection to *MONITOR service\n\n"); | ||
379 | filp->private_data = monpriv; | 331 | filp->private_data = monpriv; |
332 | unlock_kernel(); | ||
380 | return nonseekable_open(inode, filp); | 333 | return nonseekable_open(inode, filp); |
381 | 334 | ||
382 | out_path: | 335 | out_path: |
@@ -386,6 +339,7 @@ out_priv: | |||
386 | out_use: | 339 | out_use: |
387 | clear_bit(MON_IN_USE, &mon_in_use); | 340 | clear_bit(MON_IN_USE, &mon_in_use); |
388 | out: | 341 | out: |
342 | unlock_kernel(); | ||
389 | return rc; | 343 | return rc; |
390 | } | 344 | } |
391 | 345 | ||
@@ -400,8 +354,6 @@ static int mon_close(struct inode *inode, struct file *filp) | |||
400 | rc = iucv_path_sever(monpriv->path, user_data_sever); | 354 | rc = iucv_path_sever(monpriv->path, user_data_sever); |
401 | if (rc) | 355 | if (rc) |
402 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); | 356 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); |
403 | else | ||
404 | P_INFO("close, terminated connection to *MONITOR service\n"); | ||
405 | 357 | ||
406 | atomic_set(&monpriv->iucv_severed, 0); | 358 | atomic_set(&monpriv->iucv_severed, 0); |
407 | atomic_set(&monpriv->iucv_connected, 0); | 359 | atomic_set(&monpriv->iucv_connected, 0); |
@@ -442,10 +394,8 @@ static ssize_t mon_read(struct file *filp, char __user *data, | |||
442 | monmsg = monpriv->msg_array[monpriv->read_index]; | 394 | monmsg = monpriv->msg_array[monpriv->read_index]; |
443 | } | 395 | } |
444 | 396 | ||
445 | if (!monmsg->pos) { | 397 | if (!monmsg->pos) |
446 | monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; | 398 | monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; |
447 | mon_read_debug(monmsg, monpriv); | ||
448 | } | ||
449 | if (mon_check_mca(monmsg)) | 399 | if (mon_check_mca(monmsg)) |
450 | goto reply; | 400 | goto reply; |
451 | 401 | ||
@@ -531,7 +481,6 @@ static int __init mon_init(void) | |||
531 | P_ERROR("failed to register with iucv driver\n"); | 481 | P_ERROR("failed to register with iucv driver\n"); |
532 | return rc; | 482 | return rc; |
533 | } | 483 | } |
534 | P_INFO("open, registered with IUCV\n"); | ||
535 | 484 | ||
536 | rc = segment_type(mon_dcss_name); | 485 | rc = segment_type(mon_dcss_name); |
537 | if (rc < 0) { | 486 | if (rc < 0) { |
@@ -555,13 +504,8 @@ static int __init mon_init(void) | |||
555 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); | 504 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); |
556 | 505 | ||
557 | rc = misc_register(&mon_dev); | 506 | rc = misc_register(&mon_dev); |
558 | if (rc < 0 ) { | 507 | if (rc < 0 ) |
559 | P_ERROR("misc_register failed, rc = %i\n", rc); | ||
560 | goto out; | 508 | goto out; |
561 | } | ||
562 | P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n", | ||
563 | mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end, | ||
564 | mon_dcss_end - mon_dcss_start + 1); | ||
565 | return 0; | 509 | return 0; |
566 | 510 | ||
567 | out: | 511 | out: |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index a86c0534cd49..4d71aa8c1a79 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/moduleparam.h> | 12 | #include <linux/moduleparam.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/smp_lock.h> | ||
15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/miscdevice.h> | 18 | #include <linux/miscdevice.h> |
@@ -179,10 +180,12 @@ static int monwrite_open(struct inode *inode, struct file *filp) | |||
179 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); | 180 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); |
180 | if (!monpriv) | 181 | if (!monpriv) |
181 | return -ENOMEM; | 182 | return -ENOMEM; |
183 | lock_kernel(); | ||
182 | INIT_LIST_HEAD(&monpriv->list); | 184 | INIT_LIST_HEAD(&monpriv->list); |
183 | monpriv->hdr_to_read = sizeof(monpriv->hdr); | 185 | monpriv->hdr_to_read = sizeof(monpriv->hdr); |
184 | mutex_init(&monpriv->thread_mutex); | 186 | mutex_init(&monpriv->thread_mutex); |
185 | filp->private_data = monpriv; | 187 | filp->private_data = monpriv; |
188 | unlock_kernel(); | ||
186 | return nonseekable_open(inode, filp); | 189 | return nonseekable_open(inode, filp); |
187 | } | 190 | } |
188 | 191 | ||
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 848ef7e8523f..81a96e019080 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size) | |||
153 | struct raw3270_request *rq; | 153 | struct raw3270_request *rq; |
154 | 154 | ||
155 | rq = alloc_bootmem_low(sizeof(struct raw3270)); | 155 | rq = alloc_bootmem_low(sizeof(struct raw3270)); |
156 | if (!rq) | ||
157 | return ERR_PTR(-ENOMEM); | ||
158 | memset(rq, 0, sizeof(struct raw3270_request)); | ||
159 | 156 | ||
160 | /* alloc output buffer. */ | 157 | /* alloc output buffer. */ |
161 | if (size > 0) { | 158 | if (size > 0) |
162 | rq->buffer = alloc_bootmem_low(size); | 159 | rq->buffer = alloc_bootmem_low(size); |
163 | if (!rq->buffer) { | ||
164 | free_bootmem((unsigned long) rq, | ||
165 | sizeof(struct raw3270)); | ||
166 | return ERR_PTR(-ENOMEM); | ||
167 | } | ||
168 | } | ||
169 | rq->size = size; | 160 | rq->size = size; |
170 | INIT_LIST_HEAD(&rq->list); | 161 | INIT_LIST_HEAD(&rq->list); |
171 | 162 | ||
@@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
372 | 363 | ||
373 | if (IS_ERR(irb)) | 364 | if (IS_ERR(irb)) |
374 | rc = RAW3270_IO_RETRY; | 365 | rc = RAW3270_IO_RETRY; |
375 | else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { | 366 | else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { |
376 | rq->rc = -EIO; | 367 | rq->rc = -EIO; |
377 | rc = RAW3270_IO_DONE; | 368 | rc = RAW3270_IO_DONE; |
378 | } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | | 369 | } else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | |
379 | DEV_STAT_UNIT_EXCEP)) { | 370 | DEV_STAT_UNIT_EXCEP)) { |
380 | /* Handle CE-DE-UE and subsequent UDE */ | 371 | /* Handle CE-DE-UE and subsequent UDE */ |
381 | set_bit(RAW3270_FLAGS_BUSY, &rp->flags); | 372 | set_bit(RAW3270_FLAGS_BUSY, &rp->flags); |
382 | rc = RAW3270_IO_BUSY; | 373 | rc = RAW3270_IO_BUSY; |
383 | } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { | 374 | } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { |
384 | /* Wait for UDE if busy flag is set. */ | 375 | /* Wait for UDE if busy flag is set. */ |
385 | if (irb->scsw.dstat & DEV_STAT_DEV_END) { | 376 | if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { |
386 | clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); | 377 | clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); |
387 | /* Got it, now retry. */ | 378 | /* Got it, now retry. */ |
388 | rc = RAW3270_IO_RETRY; | 379 | rc = RAW3270_IO_RETRY; |
@@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, | |||
497 | * Unit-Check Processing: | 488 | * Unit-Check Processing: |
498 | * Expect Command Reject or Intervention Required. | 489 | * Expect Command Reject or Intervention Required. |
499 | */ | 490 | */ |
500 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 491 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
501 | /* Request finished abnormally. */ | 492 | /* Request finished abnormally. */ |
502 | if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { | 493 | if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { |
503 | set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); | 494 | set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); |
@@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, | |||
505 | } | 496 | } |
506 | } | 497 | } |
507 | if (rq) { | 498 | if (rq) { |
508 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 499 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
509 | if (irb->ecw[0] & SNS0_CMD_REJECT) | 500 | if (irb->ecw[0] & SNS0_CMD_REJECT) |
510 | rq->rc = -EOPNOTSUPP; | 501 | rq->rc = -EOPNOTSUPP; |
511 | else | 502 | else |
512 | rq->rc = -EIO; | 503 | rq->rc = -EIO; |
513 | } else | 504 | } else |
514 | /* Request finished normally. Copy residual count. */ | 505 | /* Request finished normally. Copy residual count. */ |
515 | rq->rescnt = irb->scsw.count; | 506 | rq->rescnt = irb->scsw.cmd.count; |
516 | } | 507 | } |
517 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 508 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
518 | set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); | 509 | set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); |
519 | wake_up(&raw3270_wait_queue); | 510 | wake_up(&raw3270_wait_queue); |
520 | } | 511 | } |
@@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp) | |||
619 | rp->cols = 132; | 610 | rp->cols = 132; |
620 | break; | 611 | break; |
621 | default: | 612 | default: |
622 | printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model); | ||
623 | rc = -EOPNOTSUPP; | 613 | rc = -EOPNOTSUPP; |
624 | break; | 614 | break; |
625 | } | 615 | } |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 2c7a1ee6b041..3c8b25e6c345 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf) | |||
506 | if (scbuf->validity_sclp_send_mask) | 506 | if (scbuf->validity_sclp_send_mask) |
507 | sclp_send_mask = scbuf->sclp_send_mask; | 507 | sclp_send_mask = scbuf->sclp_send_mask; |
508 | spin_unlock_irqrestore(&sclp_lock, flags); | 508 | spin_unlock_irqrestore(&sclp_lock, flags); |
509 | if (scbuf->validity_sclp_active_facility_mask) | ||
510 | sclp_facilities = scbuf->sclp_active_facility_mask; | ||
509 | sclp_dispatch_state_change(); | 511 | sclp_dispatch_state_change(); |
510 | } | 512 | } |
511 | 513 | ||
@@ -782,11 +784,9 @@ sclp_check_handler(__u16 code) | |||
782 | /* Is this the interrupt we are waiting for? */ | 784 | /* Is this the interrupt we are waiting for? */ |
783 | if (finished_sccb == 0) | 785 | if (finished_sccb == 0) |
784 | return; | 786 | return; |
785 | if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { | 787 | if (finished_sccb != (u32) (addr_t) sclp_init_sccb) |
786 | printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " | 788 | panic("sclp: unsolicited interrupt for buffer at 0x%x\n", |
787 | "for buffer at 0x%x\n", finished_sccb); | 789 | finished_sccb); |
788 | return; | ||
789 | } | ||
790 | spin_lock(&sclp_lock); | 790 | spin_lock(&sclp_lock); |
791 | if (sclp_running_state == sclp_running_state_running) { | 791 | if (sclp_running_state == sclp_running_state_running) { |
792 | sclp_init_req.status = SCLP_REQ_DONE; | 792 | sclp_init_req.status = SCLP_REQ_DONE; |
@@ -883,8 +883,6 @@ sclp_init(void) | |||
883 | unsigned long flags; | 883 | unsigned long flags; |
884 | int rc; | 884 | int rc; |
885 | 885 | ||
886 | if (!MACHINE_HAS_SCLP) | ||
887 | return -ENODEV; | ||
888 | spin_lock_irqsave(&sclp_lock, flags); | 886 | spin_lock_irqsave(&sclp_lock, flags); |
889 | /* Check for previous or running initialization */ | 887 | /* Check for previous or running initialization */ |
890 | if (sclp_init_state != sclp_init_state_uninitialized) { | 888 | if (sclp_init_state != sclp_init_state_uninitialized) { |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index b5c23396f8fe..0c2b77493db4 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -11,6 +11,9 @@ | |||
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
14 | #include <linux/mm.h> | ||
15 | #include <linux/mmzone.h> | ||
16 | #include <linux/memory.h> | ||
14 | #include <asm/chpid.h> | 17 | #include <asm/chpid.h> |
15 | #include <asm/sclp.h> | 18 | #include <asm/sclp.h> |
16 | #include "sclp.h" | 19 | #include "sclp.h" |
@@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid; | |||
43 | 46 | ||
44 | u64 sclp_facilities; | 47 | u64 sclp_facilities; |
45 | static u8 sclp_fac84; | 48 | static u8 sclp_fac84; |
49 | static unsigned long long rzm; | ||
50 | static unsigned long long rnmax; | ||
46 | 51 | ||
47 | static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) | 52 | static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) |
48 | { | 53 | { |
@@ -62,7 +67,7 @@ out: | |||
62 | return rc; | 67 | return rc; |
63 | } | 68 | } |
64 | 69 | ||
65 | void __init sclp_read_info_early(void) | 70 | static void __init sclp_read_info_early(void) |
66 | { | 71 | { |
67 | int rc; | 72 | int rc; |
68 | int i; | 73 | int i; |
@@ -92,34 +97,33 @@ void __init sclp_read_info_early(void) | |||
92 | 97 | ||
93 | void __init sclp_facilities_detect(void) | 98 | void __init sclp_facilities_detect(void) |
94 | { | 99 | { |
100 | struct read_info_sccb *sccb; | ||
101 | |||
102 | sclp_read_info_early(); | ||
95 | if (!early_read_info_sccb_valid) | 103 | if (!early_read_info_sccb_valid) |
96 | return; | 104 | return; |
97 | sclp_facilities = early_read_info_sccb.facilities; | 105 | |
98 | sclp_fac84 = early_read_info_sccb.fac84; | 106 | sccb = &early_read_info_sccb; |
107 | sclp_facilities = sccb->facilities; | ||
108 | sclp_fac84 = sccb->fac84; | ||
109 | rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; | ||
110 | rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; | ||
111 | rzm <<= 20; | ||
99 | } | 112 | } |
100 | 113 | ||
101 | unsigned long long __init sclp_memory_detect(void) | 114 | unsigned long long sclp_get_rnmax(void) |
102 | { | 115 | { |
103 | unsigned long long memsize; | 116 | return rnmax; |
104 | struct read_info_sccb *sccb; | 117 | } |
105 | 118 | ||
106 | if (!early_read_info_sccb_valid) | 119 | unsigned long long sclp_get_rzm(void) |
107 | return 0; | 120 | { |
108 | sccb = &early_read_info_sccb; | 121 | return rzm; |
109 | if (sccb->rnsize) | ||
110 | memsize = sccb->rnsize << 20; | ||
111 | else | ||
112 | memsize = sccb->rnsize2 << 20; | ||
113 | if (sccb->rnmax) | ||
114 | memsize *= sccb->rnmax; | ||
115 | else | ||
116 | memsize *= sccb->rnmax2; | ||
117 | return memsize; | ||
118 | } | 122 | } |
119 | 123 | ||
120 | /* | 124 | /* |
121 | * This function will be called after sclp_memory_detect(), which gets called | 125 | * This function will be called after sclp_facilities_detect(), which gets |
122 | * early from early.c code. Therefore the sccb should have valid contents. | 126 | * called from early.c code. Therefore the sccb should have valid contents. |
123 | */ | 127 | */ |
124 | void __init sclp_get_ipl_info(struct sclp_ipl_info *info) | 128 | void __init sclp_get_ipl_info(struct sclp_ipl_info *info) |
125 | { | 129 | { |
@@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu) | |||
278 | return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); | 282 | return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); |
279 | } | 283 | } |
280 | 284 | ||
285 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
286 | |||
287 | static DEFINE_MUTEX(sclp_mem_mutex); | ||
288 | static LIST_HEAD(sclp_mem_list); | ||
289 | static u8 sclp_max_storage_id; | ||
290 | static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; | ||
291 | |||
292 | struct memory_increment { | ||
293 | struct list_head list; | ||
294 | u16 rn; | ||
295 | int standby; | ||
296 | int usecount; | ||
297 | }; | ||
298 | |||
299 | struct assign_storage_sccb { | ||
300 | struct sccb_header header; | ||
301 | u16 rn; | ||
302 | } __packed; | ||
303 | |||
304 | static unsigned long long rn2addr(u16 rn) | ||
305 | { | ||
306 | return (unsigned long long) (rn - 1) * rzm; | ||
307 | } | ||
308 | |||
309 | static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) | ||
310 | { | ||
311 | struct assign_storage_sccb *sccb; | ||
312 | int rc; | ||
313 | |||
314 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
315 | if (!sccb) | ||
316 | return -ENOMEM; | ||
317 | sccb->header.length = PAGE_SIZE; | ||
318 | sccb->rn = rn; | ||
319 | rc = do_sync_request(cmd, sccb); | ||
320 | if (rc) | ||
321 | goto out; | ||
322 | switch (sccb->header.response_code) { | ||
323 | case 0x0020: | ||
324 | case 0x0120: | ||
325 | break; | ||
326 | default: | ||
327 | rc = -EIO; | ||
328 | break; | ||
329 | } | ||
330 | out: | ||
331 | free_page((unsigned long) sccb); | ||
332 | return rc; | ||
333 | } | ||
334 | |||
335 | static int sclp_assign_storage(u16 rn) | ||
336 | { | ||
337 | return do_assign_storage(0x000d0001, rn); | ||
338 | } | ||
339 | |||
340 | static int sclp_unassign_storage(u16 rn) | ||
341 | { | ||
342 | return do_assign_storage(0x000c0001, rn); | ||
343 | } | ||
344 | |||
345 | struct attach_storage_sccb { | ||
346 | struct sccb_header header; | ||
347 | u16 :16; | ||
348 | u16 assigned; | ||
349 | u32 :32; | ||
350 | u32 entries[0]; | ||
351 | } __packed; | ||
352 | |||
353 | static int sclp_attach_storage(u8 id) | ||
354 | { | ||
355 | struct attach_storage_sccb *sccb; | ||
356 | int rc; | ||
357 | int i; | ||
358 | |||
359 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
360 | if (!sccb) | ||
361 | return -ENOMEM; | ||
362 | sccb->header.length = PAGE_SIZE; | ||
363 | rc = do_sync_request(0x00080001 | id << 8, sccb); | ||
364 | if (rc) | ||
365 | goto out; | ||
366 | switch (sccb->header.response_code) { | ||
367 | case 0x0020: | ||
368 | set_bit(id, sclp_storage_ids); | ||
369 | for (i = 0; i < sccb->assigned; i++) | ||
370 | sclp_unassign_storage(sccb->entries[i] >> 16); | ||
371 | break; | ||
372 | default: | ||
373 | rc = -EIO; | ||
374 | break; | ||
375 | } | ||
376 | out: | ||
377 | free_page((unsigned long) sccb); | ||
378 | return rc; | ||
379 | } | ||
380 | |||
381 | static int sclp_mem_change_state(unsigned long start, unsigned long size, | ||
382 | int online) | ||
383 | { | ||
384 | struct memory_increment *incr; | ||
385 | unsigned long long istart; | ||
386 | int rc = 0; | ||
387 | |||
388 | list_for_each_entry(incr, &sclp_mem_list, list) { | ||
389 | istart = rn2addr(incr->rn); | ||
390 | if (start + size - 1 < istart) | ||
391 | break; | ||
392 | if (start > istart + rzm - 1) | ||
393 | continue; | ||
394 | if (online) { | ||
395 | if (incr->usecount++) | ||
396 | continue; | ||
397 | /* | ||
398 | * Don't break the loop if one assign fails. Loop may | ||
399 | * be walked again on CANCEL and we can't save | ||
400 | * information if state changed before or not. | ||
401 | * So continue and increase usecount for all increments. | ||
402 | */ | ||
403 | rc |= sclp_assign_storage(incr->rn); | ||
404 | } else { | ||
405 | if (--incr->usecount) | ||
406 | continue; | ||
407 | sclp_unassign_storage(incr->rn); | ||
408 | } | ||
409 | } | ||
410 | return rc ? -EIO : 0; | ||
411 | } | ||
412 | |||
413 | static int sclp_mem_notifier(struct notifier_block *nb, | ||
414 | unsigned long action, void *data) | ||
415 | { | ||
416 | unsigned long start, size; | ||
417 | struct memory_notify *arg; | ||
418 | unsigned char id; | ||
419 | int rc = 0; | ||
420 | |||
421 | arg = data; | ||
422 | start = arg->start_pfn << PAGE_SHIFT; | ||
423 | size = arg->nr_pages << PAGE_SHIFT; | ||
424 | mutex_lock(&sclp_mem_mutex); | ||
425 | for (id = 0; id <= sclp_max_storage_id; id++) | ||
426 | if (!test_bit(id, sclp_storage_ids)) | ||
427 | sclp_attach_storage(id); | ||
428 | switch (action) { | ||
429 | case MEM_ONLINE: | ||
430 | break; | ||
431 | case MEM_GOING_ONLINE: | ||
432 | rc = sclp_mem_change_state(start, size, 1); | ||
433 | break; | ||
434 | case MEM_CANCEL_ONLINE: | ||
435 | sclp_mem_change_state(start, size, 0); | ||
436 | break; | ||
437 | default: | ||
438 | rc = -EINVAL; | ||
439 | break; | ||
440 | } | ||
441 | mutex_unlock(&sclp_mem_mutex); | ||
442 | return rc ? NOTIFY_BAD : NOTIFY_OK; | ||
443 | } | ||
444 | |||
445 | static struct notifier_block sclp_mem_nb = { | ||
446 | .notifier_call = sclp_mem_notifier, | ||
447 | }; | ||
448 | |||
449 | static void __init add_memory_merged(u16 rn) | ||
450 | { | ||
451 | static u16 first_rn, num; | ||
452 | unsigned long long start, size; | ||
453 | |||
454 | if (rn && first_rn && (first_rn + num == rn)) { | ||
455 | num++; | ||
456 | return; | ||
457 | } | ||
458 | if (!first_rn) | ||
459 | goto skip_add; | ||
460 | start = rn2addr(first_rn); | ||
461 | size = (unsigned long long ) num * rzm; | ||
462 | if (start >= VMEM_MAX_PHYS) | ||
463 | goto skip_add; | ||
464 | if (start + size > VMEM_MAX_PHYS) | ||
465 | size = VMEM_MAX_PHYS - start; | ||
466 | add_memory(0, start, size); | ||
467 | skip_add: | ||
468 | first_rn = rn; | ||
469 | num = 1; | ||
470 | } | ||
471 | |||
472 | static void __init sclp_add_standby_memory(void) | ||
473 | { | ||
474 | struct memory_increment *incr; | ||
475 | |||
476 | list_for_each_entry(incr, &sclp_mem_list, list) | ||
477 | if (incr->standby) | ||
478 | add_memory_merged(incr->rn); | ||
479 | add_memory_merged(0); | ||
480 | } | ||
481 | |||
482 | static void __init insert_increment(u16 rn, int standby, int assigned) | ||
483 | { | ||
484 | struct memory_increment *incr, *new_incr; | ||
485 | struct list_head *prev; | ||
486 | u16 last_rn; | ||
487 | |||
488 | new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); | ||
489 | if (!new_incr) | ||
490 | return; | ||
491 | new_incr->rn = rn; | ||
492 | new_incr->standby = standby; | ||
493 | last_rn = 0; | ||
494 | prev = &sclp_mem_list; | ||
495 | list_for_each_entry(incr, &sclp_mem_list, list) { | ||
496 | if (assigned && incr->rn > rn) | ||
497 | break; | ||
498 | if (!assigned && incr->rn - last_rn > 1) | ||
499 | break; | ||
500 | last_rn = incr->rn; | ||
501 | prev = &incr->list; | ||
502 | } | ||
503 | if (!assigned) | ||
504 | new_incr->rn = last_rn + 1; | ||
505 | if (new_incr->rn > rnmax) { | ||
506 | kfree(new_incr); | ||
507 | return; | ||
508 | } | ||
509 | list_add(&new_incr->list, prev); | ||
510 | } | ||
511 | |||
512 | struct read_storage_sccb { | ||
513 | struct sccb_header header; | ||
514 | u16 max_id; | ||
515 | u16 assigned; | ||
516 | u16 standby; | ||
517 | u16 :16; | ||
518 | u32 entries[0]; | ||
519 | } __packed; | ||
520 | |||
521 | static int __init sclp_detect_standby_memory(void) | ||
522 | { | ||
523 | struct read_storage_sccb *sccb; | ||
524 | int i, id, assigned, rc; | ||
525 | |||
526 | if (!early_read_info_sccb_valid) | ||
527 | return 0; | ||
528 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) | ||
529 | return 0; | ||
530 | rc = -ENOMEM; | ||
531 | sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); | ||
532 | if (!sccb) | ||
533 | goto out; | ||
534 | assigned = 0; | ||
535 | for (id = 0; id <= sclp_max_storage_id; id++) { | ||
536 | memset(sccb, 0, PAGE_SIZE); | ||
537 | sccb->header.length = PAGE_SIZE; | ||
538 | rc = do_sync_request(0x00040001 | id << 8, sccb); | ||
539 | if (rc) | ||
540 | goto out; | ||
541 | switch (sccb->header.response_code) { | ||
542 | case 0x0010: | ||
543 | set_bit(id, sclp_storage_ids); | ||
544 | for (i = 0; i < sccb->assigned; i++) { | ||
545 | if (!sccb->entries[i]) | ||
546 | continue; | ||
547 | assigned++; | ||
548 | insert_increment(sccb->entries[i] >> 16, 0, 1); | ||
549 | } | ||
550 | break; | ||
551 | case 0x0310: | ||
552 | break; | ||
553 | case 0x0410: | ||
554 | for (i = 0; i < sccb->assigned; i++) { | ||
555 | if (!sccb->entries[i]) | ||
556 | continue; | ||
557 | assigned++; | ||
558 | insert_increment(sccb->entries[i] >> 16, 1, 1); | ||
559 | } | ||
560 | break; | ||
561 | default: | ||
562 | rc = -EIO; | ||
563 | break; | ||
564 | } | ||
565 | if (!rc) | ||
566 | sclp_max_storage_id = sccb->max_id; | ||
567 | } | ||
568 | if (rc || list_empty(&sclp_mem_list)) | ||
569 | goto out; | ||
570 | for (i = 1; i <= rnmax - assigned; i++) | ||
571 | insert_increment(0, 1, 0); | ||
572 | rc = register_memory_notifier(&sclp_mem_nb); | ||
573 | if (rc) | ||
574 | goto out; | ||
575 | sclp_add_standby_memory(); | ||
576 | out: | ||
577 | free_page((unsigned long) sccb); | ||
578 | return rc; | ||
579 | } | ||
580 | __initcall(sclp_detect_standby_memory); | ||
581 | |||
582 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
583 | |||
281 | /* | 584 | /* |
282 | * Channel path configuration related functions. | 585 | * Channel path configuration related functions. |
283 | */ | 586 | */ |
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index ead1043d788e..7e619c534bf4 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c | |||
@@ -14,14 +14,13 @@ | |||
14 | #include <linux/timer.h> | 14 | #include <linux/timer.h> |
15 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
16 | #include <linux/bootmem.h> | 16 | #include <linux/bootmem.h> |
17 | #include <linux/termios.h> | ||
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | 19 | ||
19 | #include "sclp.h" | 20 | #include "sclp.h" |
20 | #include "sclp_rw.h" | 21 | #include "sclp_rw.h" |
21 | #include "sclp_tty.h" | 22 | #include "sclp_tty.h" |
22 | 23 | ||
23 | #define SCLP_CON_PRINT_HEADER "sclp console driver: " | ||
24 | |||
25 | #define sclp_console_major 4 /* TTYAUX_MAJOR */ | 24 | #define sclp_console_major 4 /* TTYAUX_MAJOR */ |
26 | #define sclp_console_minor 64 | 25 | #define sclp_console_minor 64 |
27 | #define sclp_console_name "ttyS" | 26 | #define sclp_console_name "ttyS" |
@@ -222,8 +221,6 @@ sclp_console_init(void) | |||
222 | INIT_LIST_HEAD(&sclp_con_pages); | 221 | INIT_LIST_HEAD(&sclp_con_pages); |
223 | for (i = 0; i < MAX_CONSOLE_PAGES; i++) { | 222 | for (i = 0; i < MAX_CONSOLE_PAGES; i++) { |
224 | page = alloc_bootmem_low_pages(PAGE_SIZE); | 223 | page = alloc_bootmem_low_pages(PAGE_SIZE); |
225 | if (page == NULL) | ||
226 | return -ENOMEM; | ||
227 | list_add_tail((struct list_head *) page, &sclp_con_pages); | 224 | list_add_tail((struct list_head *) page, &sclp_con_pages); |
228 | } | 225 | } |
229 | INIT_LIST_HEAD(&sclp_con_outqueue); | 226 | INIT_LIST_HEAD(&sclp_con_outqueue); |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index ad05a87bc480..fff4ff485d9b 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/cpu.h> | 10 | #include <linux/cpu.h> |
11 | #include <linux/kthread.h> | ||
11 | #include <linux/sysdev.h> | 12 | #include <linux/sysdev.h> |
12 | #include <linux/workqueue.h> | 13 | #include <linux/workqueue.h> |
13 | #include <asm/smp.h> | 14 | #include <asm/smp.h> |
@@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
40 | put_online_cpus(); | 41 | put_online_cpus(); |
41 | } | 42 | } |
42 | 43 | ||
43 | static void __ref sclp_cpu_change_notify(struct work_struct *work) | 44 | static int sclp_cpu_kthread(void *data) |
44 | { | 45 | { |
45 | smp_rescan_cpus(); | 46 | smp_rescan_cpus(); |
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static void __ref sclp_cpu_change_notify(struct work_struct *work) | ||
51 | { | ||
52 | /* Can't call smp_rescan_cpus() from workqueue context since it may | ||
53 | * deadlock in case of cpu hotplug. So we have to create a kernel | ||
54 | * thread in order to call it. | ||
55 | */ | ||
56 | kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan"); | ||
46 | } | 57 | } |
47 | 58 | ||
48 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) | 59 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) |
@@ -74,10 +85,8 @@ static int __init sclp_conf_init(void) | |||
74 | INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); | 85 | INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); |
75 | 86 | ||
76 | rc = sclp_register(&sclp_conf_register); | 87 | rc = sclp_register(&sclp_conf_register); |
77 | if (rc) { | 88 | if (rc) |
78 | printk(KERN_ERR TAG "failed to register (%d).\n", rc); | ||
79 | return rc; | 89 | return rc; |
80 | } | ||
81 | 90 | ||
82 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { | 91 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { |
83 | printk(KERN_WARNING TAG "no configuration management.\n"); | 92 | printk(KERN_WARNING TAG "no configuration management.\n"); |
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index 9f37456222e9..d887bd261d28 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #define CPI_LENGTH_NAME 8 | 27 | #define CPI_LENGTH_NAME 8 |
28 | #define CPI_LENGTH_LEVEL 16 | 28 | #define CPI_LENGTH_LEVEL 16 |
29 | 29 | ||
30 | static DEFINE_MUTEX(sclp_cpi_mutex); | ||
31 | |||
30 | struct cpi_evbuf { | 32 | struct cpi_evbuf { |
31 | struct evbuf_header header; | 33 | struct evbuf_header header; |
32 | u8 id_format; | 34 | u8 id_format; |
@@ -124,21 +126,15 @@ static int cpi_req(void) | |||
124 | int response; | 126 | int response; |
125 | 127 | ||
126 | rc = sclp_register(&sclp_cpi_event); | 128 | rc = sclp_register(&sclp_cpi_event); |
127 | if (rc) { | 129 | if (rc) |
128 | printk(KERN_WARNING "cpi: could not register " | ||
129 | "to hardware console.\n"); | ||
130 | goto out; | 130 | goto out; |
131 | } | ||
132 | if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { | 131 | if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { |
133 | printk(KERN_WARNING "cpi: no control program " | ||
134 | "identification support\n"); | ||
135 | rc = -EOPNOTSUPP; | 132 | rc = -EOPNOTSUPP; |
136 | goto out_unregister; | 133 | goto out_unregister; |
137 | } | 134 | } |
138 | 135 | ||
139 | req = cpi_prepare_req(); | 136 | req = cpi_prepare_req(); |
140 | if (IS_ERR(req)) { | 137 | if (IS_ERR(req)) { |
141 | printk(KERN_WARNING "cpi: could not allocate request\n"); | ||
142 | rc = PTR_ERR(req); | 138 | rc = PTR_ERR(req); |
143 | goto out_unregister; | 139 | goto out_unregister; |
144 | } | 140 | } |
@@ -148,10 +144,8 @@ static int cpi_req(void) | |||
148 | 144 | ||
149 | /* Add request to sclp queue */ | 145 | /* Add request to sclp queue */ |
150 | rc = sclp_add_request(req); | 146 | rc = sclp_add_request(req); |
151 | if (rc) { | 147 | if (rc) |
152 | printk(KERN_WARNING "cpi: could not start request\n"); | ||
153 | goto out_free_req; | 148 | goto out_free_req; |
154 | } | ||
155 | 149 | ||
156 | wait_for_completion(&completion); | 150 | wait_for_completion(&completion); |
157 | 151 | ||
@@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value) | |||
223 | static ssize_t system_name_show(struct kobject *kobj, | 217 | static ssize_t system_name_show(struct kobject *kobj, |
224 | struct kobj_attribute *attr, char *page) | 218 | struct kobj_attribute *attr, char *page) |
225 | { | 219 | { |
226 | return snprintf(page, PAGE_SIZE, "%s\n", system_name); | 220 | int rc; |
221 | |||
222 | mutex_lock(&sclp_cpi_mutex); | ||
223 | rc = snprintf(page, PAGE_SIZE, "%s\n", system_name); | ||
224 | mutex_unlock(&sclp_cpi_mutex); | ||
225 | return rc; | ||
227 | } | 226 | } |
228 | 227 | ||
229 | static ssize_t system_name_store(struct kobject *kobj, | 228 | static ssize_t system_name_store(struct kobject *kobj, |
@@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj, | |||
237 | if (rc) | 236 | if (rc) |
238 | return rc; | 237 | return rc; |
239 | 238 | ||
239 | mutex_lock(&sclp_cpi_mutex); | ||
240 | set_string(system_name, buf); | 240 | set_string(system_name, buf); |
241 | mutex_unlock(&sclp_cpi_mutex); | ||
241 | 242 | ||
242 | return len; | 243 | return len; |
243 | } | 244 | } |
@@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr = | |||
248 | static ssize_t sysplex_name_show(struct kobject *kobj, | 249 | static ssize_t sysplex_name_show(struct kobject *kobj, |
249 | struct kobj_attribute *attr, char *page) | 250 | struct kobj_attribute *attr, char *page) |
250 | { | 251 | { |
251 | return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); | 252 | int rc; |
253 | |||
254 | mutex_lock(&sclp_cpi_mutex); | ||
255 | rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); | ||
256 | mutex_unlock(&sclp_cpi_mutex); | ||
257 | return rc; | ||
252 | } | 258 | } |
253 | 259 | ||
254 | static ssize_t sysplex_name_store(struct kobject *kobj, | 260 | static ssize_t sysplex_name_store(struct kobject *kobj, |
@@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj, | |||
262 | if (rc) | 268 | if (rc) |
263 | return rc; | 269 | return rc; |
264 | 270 | ||
271 | mutex_lock(&sclp_cpi_mutex); | ||
265 | set_string(sysplex_name, buf); | 272 | set_string(sysplex_name, buf); |
273 | mutex_unlock(&sclp_cpi_mutex); | ||
266 | 274 | ||
267 | return len; | 275 | return len; |
268 | } | 276 | } |
@@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr = | |||
273 | static ssize_t system_type_show(struct kobject *kobj, | 281 | static ssize_t system_type_show(struct kobject *kobj, |
274 | struct kobj_attribute *attr, char *page) | 282 | struct kobj_attribute *attr, char *page) |
275 | { | 283 | { |
276 | return snprintf(page, PAGE_SIZE, "%s\n", system_type); | 284 | int rc; |
285 | |||
286 | mutex_lock(&sclp_cpi_mutex); | ||
287 | rc = snprintf(page, PAGE_SIZE, "%s\n", system_type); | ||
288 | mutex_unlock(&sclp_cpi_mutex); | ||
289 | return rc; | ||
277 | } | 290 | } |
278 | 291 | ||
279 | static ssize_t system_type_store(struct kobject *kobj, | 292 | static ssize_t system_type_store(struct kobject *kobj, |
@@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj, | |||
287 | if (rc) | 300 | if (rc) |
288 | return rc; | 301 | return rc; |
289 | 302 | ||
303 | mutex_lock(&sclp_cpi_mutex); | ||
290 | set_string(system_type, buf); | 304 | set_string(system_type, buf); |
305 | mutex_unlock(&sclp_cpi_mutex); | ||
291 | 306 | ||
292 | return len; | 307 | return len; |
293 | } | 308 | } |
@@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr = | |||
298 | static ssize_t system_level_show(struct kobject *kobj, | 313 | static ssize_t system_level_show(struct kobject *kobj, |
299 | struct kobj_attribute *attr, char *page) | 314 | struct kobj_attribute *attr, char *page) |
300 | { | 315 | { |
301 | unsigned long long level = system_level; | 316 | unsigned long long level; |
302 | 317 | ||
318 | mutex_lock(&sclp_cpi_mutex); | ||
319 | level = system_level; | ||
320 | mutex_unlock(&sclp_cpi_mutex); | ||
303 | return snprintf(page, PAGE_SIZE, "%#018llx\n", level); | 321 | return snprintf(page, PAGE_SIZE, "%#018llx\n", level); |
304 | } | 322 | } |
305 | 323 | ||
@@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj, | |||
320 | if (*endp) | 338 | if (*endp) |
321 | return -EINVAL; | 339 | return -EINVAL; |
322 | 340 | ||
341 | mutex_lock(&sclp_cpi_mutex); | ||
323 | system_level = level; | 342 | system_level = level; |
324 | 343 | mutex_unlock(&sclp_cpi_mutex); | |
325 | return len; | 344 | return len; |
326 | } | 345 | } |
327 | 346 | ||
@@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj, | |||
334 | { | 353 | { |
335 | int rc; | 354 | int rc; |
336 | 355 | ||
356 | mutex_lock(&sclp_cpi_mutex); | ||
337 | rc = cpi_req(); | 357 | rc = cpi_req(); |
358 | mutex_unlock(&sclp_cpi_mutex); | ||
338 | if (rc) | 359 | if (rc) |
339 | return rc; | 360 | return rc; |
340 | 361 | ||
@@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type, | |||
373 | if (rc) | 394 | if (rc) |
374 | return rc; | 395 | return rc; |
375 | 396 | ||
397 | mutex_lock(&sclp_cpi_mutex); | ||
376 | set_string(system_name, system); | 398 | set_string(system_name, system); |
377 | set_string(sysplex_name, sysplex); | 399 | set_string(sysplex_name, sysplex); |
378 | set_string(system_type, type); | 400 | set_string(system_type, type); |
379 | system_level = level; | 401 | system_level = level; |
380 | 402 | ||
381 | return cpi_req(); | 403 | rc = cpi_req(); |
404 | mutex_unlock(&sclp_cpi_mutex); | ||
405 | |||
406 | return rc; | ||
382 | } | 407 | } |
383 | EXPORT_SYMBOL(sclp_cpi_set_data); | 408 | EXPORT_SYMBOL(sclp_cpi_set_data); |
384 | 409 | ||
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 45ff25e787cb..84c191c1cd62 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c | |||
@@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = { | |||
51 | static int __init | 51 | static int __init |
52 | sclp_quiesce_init(void) | 52 | sclp_quiesce_init(void) |
53 | { | 53 | { |
54 | int rc; | 54 | return sclp_register(&sclp_quiesce_event); |
55 | |||
56 | rc = sclp_register(&sclp_quiesce_event); | ||
57 | if (rc) | ||
58 | printk(KERN_WARNING "sclp: could not register quiesce handler " | ||
59 | "(rc=%d)\n", rc); | ||
60 | return rc; | ||
61 | } | 55 | } |
62 | 56 | ||
63 | module_init(sclp_quiesce_init); | 57 | module_init(sclp_quiesce_init); |
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index da09781b32f7..710af42603f8 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c | |||
@@ -19,8 +19,6 @@ | |||
19 | #include "sclp.h" | 19 | #include "sclp.h" |
20 | #include "sclp_rw.h" | 20 | #include "sclp_rw.h" |
21 | 21 | ||
22 | #define SCLP_RW_PRINT_HEADER "sclp low level driver: " | ||
23 | |||
24 | /* | 22 | /* |
25 | * The room for the SCCB (only for writing) is not equal to a pages size | 23 | * The room for the SCCB (only for writing) is not equal to a pages size |
26 | * (as it is specified as the maximum size in the SCLP documentation) | 24 | * (as it is specified as the maximum size in the SCLP documentation) |
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 1c064976b32b..8b854857ba07 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c | |||
@@ -239,10 +239,8 @@ int __init sclp_sdias_init(void) | |||
239 | debug_register_view(sdias_dbf, &debug_sprintf_view); | 239 | debug_register_view(sdias_dbf, &debug_sprintf_view); |
240 | debug_set_level(sdias_dbf, 6); | 240 | debug_set_level(sdias_dbf, 6); |
241 | rc = sclp_register(&sclp_sdias_register); | 241 | rc = sclp_register(&sclp_sdias_register); |
242 | if (rc) { | 242 | if (rc) |
243 | ERROR_MSG("sclp register failed\n"); | ||
244 | return rc; | 243 | return rc; |
245 | } | ||
246 | init_waitqueue_head(&sdias_wq); | 244 | init_waitqueue_head(&sdias_wq); |
247 | TRACE("init done\n"); | 245 | TRACE("init done\n"); |
248 | return 0; | 246 | return 0; |
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 40b11521cd20..434ba04b1309 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/tty.h> | 13 | #include <linux/tty.h> |
14 | #include <linux/tty_driver.h> | 14 | #include <linux/tty_driver.h> |
15 | #include <linux/tty_flip.h> | 15 | #include <linux/tty_flip.h> |
16 | #include <linux/wait.h> | ||
17 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
18 | #include <linux/err.h> | 17 | #include <linux/err.h> |
19 | #include <linux/init.h> | 18 | #include <linux/init.h> |
@@ -25,8 +24,6 @@ | |||
25 | #include "sclp_rw.h" | 24 | #include "sclp_rw.h" |
26 | #include "sclp_tty.h" | 25 | #include "sclp_tty.h" |
27 | 26 | ||
28 | #define SCLP_TTY_PRINT_HEADER "sclp tty driver: " | ||
29 | |||
30 | /* | 27 | /* |
31 | * size of a buffer that collects single characters coming in | 28 | * size of a buffer that collects single characters coming in |
32 | * via sclp_tty_put_char() | 29 | * via sclp_tty_put_char() |
@@ -50,8 +47,6 @@ static int sclp_tty_buffer_count; | |||
50 | static struct sclp_buffer *sclp_ttybuf; | 47 | static struct sclp_buffer *sclp_ttybuf; |
51 | /* Timer for delayed output of console messages. */ | 48 | /* Timer for delayed output of console messages. */ |
52 | static struct timer_list sclp_tty_timer; | 49 | static struct timer_list sclp_tty_timer; |
53 | /* Waitqueue to wait for buffers to get empty. */ | ||
54 | static wait_queue_head_t sclp_tty_waitq; | ||
55 | 50 | ||
56 | static struct tty_struct *sclp_tty; | 51 | static struct tty_struct *sclp_tty; |
57 | static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; | 52 | static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; |
@@ -59,19 +54,11 @@ static unsigned short int sclp_tty_chars_count; | |||
59 | 54 | ||
60 | struct tty_driver *sclp_tty_driver; | 55 | struct tty_driver *sclp_tty_driver; |
61 | 56 | ||
62 | static struct sclp_ioctls sclp_ioctls; | 57 | static int sclp_tty_tolower; |
63 | static struct sclp_ioctls sclp_ioctls_init = | 58 | static int sclp_tty_columns = 80; |
64 | { | 59 | |
65 | 8, /* 1 hor. tab. = 8 spaces */ | 60 | #define SPACES_PER_TAB 8 |
66 | 0, /* no echo of input by this driver */ | 61 | #define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */ |
67 | 80, /* 80 characters/line */ | ||
68 | 1, /* write after 1/10 s without final new line */ | ||
69 | MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */ | ||
70 | MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */ | ||
71 | 0, /* do not convert to lower case */ | ||
72 | 0x6c /* to seprate upper and lower case */ | ||
73 | /* ('%' in EBCDIC) */ | ||
74 | }; | ||
75 | 62 | ||
76 | /* This routine is called whenever we try to open a SCLP terminal. */ | 63 | /* This routine is called whenever we try to open a SCLP terminal. */ |
77 | static int | 64 | static int |
@@ -92,136 +79,6 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp) | |||
92 | sclp_tty = NULL; | 79 | sclp_tty = NULL; |
93 | } | 80 | } |
94 | 81 | ||
95 | /* execute commands to control the i/o behaviour of the SCLP tty at runtime */ | ||
96 | static int | ||
97 | sclp_tty_ioctl(struct tty_struct *tty, struct file * file, | ||
98 | unsigned int cmd, unsigned long arg) | ||
99 | { | ||
100 | unsigned long flags; | ||
101 | unsigned int obuf; | ||
102 | int check; | ||
103 | int rc; | ||
104 | |||
105 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
106 | return -EIO; | ||
107 | rc = 0; | ||
108 | check = 0; | ||
109 | switch (cmd) { | ||
110 | case TIOCSCLPSHTAB: | ||
111 | /* set width of horizontal tab */ | ||
112 | if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg)) | ||
113 | rc = -EFAULT; | ||
114 | else | ||
115 | check = 1; | ||
116 | break; | ||
117 | case TIOCSCLPGHTAB: | ||
118 | /* get width of horizontal tab */ | ||
119 | if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg)) | ||
120 | rc = -EFAULT; | ||
121 | break; | ||
122 | case TIOCSCLPSECHO: | ||
123 | /* enable/disable echo of input */ | ||
124 | if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg)) | ||
125 | rc = -EFAULT; | ||
126 | break; | ||
127 | case TIOCSCLPGECHO: | ||
128 | /* Is echo of input enabled ? */ | ||
129 | if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg)) | ||
130 | rc = -EFAULT; | ||
131 | break; | ||
132 | case TIOCSCLPSCOLS: | ||
133 | /* set number of columns for output */ | ||
134 | if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg)) | ||
135 | rc = -EFAULT; | ||
136 | else | ||
137 | check = 1; | ||
138 | break; | ||
139 | case TIOCSCLPGCOLS: | ||
140 | /* get number of columns for output */ | ||
141 | if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg)) | ||
142 | rc = -EFAULT; | ||
143 | break; | ||
144 | case TIOCSCLPSNL: | ||
145 | /* enable/disable writing without final new line character */ | ||
146 | if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg)) | ||
147 | rc = -EFAULT; | ||
148 | break; | ||
149 | case TIOCSCLPGNL: | ||
150 | /* Is writing without final new line character enabled ? */ | ||
151 | if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg)) | ||
152 | rc = -EFAULT; | ||
153 | break; | ||
154 | case TIOCSCLPSOBUF: | ||
155 | /* | ||
156 | * set the maximum buffers size for output, will be rounded | ||
157 | * up to next 4kB boundary and stored as number of SCCBs | ||
158 | * (4kB Buffers) limitation: 256 x 4kB | ||
159 | */ | ||
160 | if (get_user(obuf, (unsigned int __user *) arg) == 0) { | ||
161 | if (obuf & 0xFFF) | ||
162 | sclp_ioctls.max_sccb = (obuf >> 12) + 1; | ||
163 | else | ||
164 | sclp_ioctls.max_sccb = (obuf >> 12); | ||
165 | } else | ||
166 | rc = -EFAULT; | ||
167 | break; | ||
168 | case TIOCSCLPGOBUF: | ||
169 | /* get the maximum buffers size for output */ | ||
170 | obuf = sclp_ioctls.max_sccb << 12; | ||
171 | if (put_user(obuf, (unsigned int __user *) arg)) | ||
172 | rc = -EFAULT; | ||
173 | break; | ||
174 | case TIOCSCLPGKBUF: | ||
175 | /* get the number of buffers got from kernel at startup */ | ||
176 | if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg)) | ||
177 | rc = -EFAULT; | ||
178 | break; | ||
179 | case TIOCSCLPSCASE: | ||
180 | /* enable/disable conversion from upper to lower case */ | ||
181 | if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg)) | ||
182 | rc = -EFAULT; | ||
183 | break; | ||
184 | case TIOCSCLPGCASE: | ||
185 | /* Is conversion from upper to lower case of input enabled? */ | ||
186 | if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg)) | ||
187 | rc = -EFAULT; | ||
188 | break; | ||
189 | case TIOCSCLPSDELIM: | ||
190 | /* | ||
191 | * set special character used for separating upper and | ||
192 | * lower case, 0x00 disables this feature | ||
193 | */ | ||
194 | if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg)) | ||
195 | rc = -EFAULT; | ||
196 | break; | ||
197 | case TIOCSCLPGDELIM: | ||
198 | /* | ||
199 | * get special character used for separating upper and | ||
200 | * lower case, 0x00 disables this feature | ||
201 | */ | ||
202 | if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg)) | ||
203 | rc = -EFAULT; | ||
204 | break; | ||
205 | case TIOCSCLPSINIT: | ||
206 | /* set initial (default) sclp ioctls */ | ||
207 | sclp_ioctls = sclp_ioctls_init; | ||
208 | check = 1; | ||
209 | break; | ||
210 | default: | ||
211 | rc = -ENOIOCTLCMD; | ||
212 | break; | ||
213 | } | ||
214 | if (check) { | ||
215 | spin_lock_irqsave(&sclp_tty_lock, flags); | ||
216 | if (sclp_ttybuf != NULL) { | ||
217 | sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab); | ||
218 | sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns); | ||
219 | } | ||
220 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | ||
221 | } | ||
222 | return rc; | ||
223 | } | ||
224 | |||
225 | /* | 82 | /* |
226 | * This routine returns the numbers of characters the tty driver | 83 | * This routine returns the numbers of characters the tty driver |
227 | * will accept for queuing to be written. This number is subject | 84 | * will accept for queuing to be written. This number is subject |
@@ -268,7 +125,6 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc) | |||
268 | struct sclp_buffer, list); | 125 | struct sclp_buffer, list); |
269 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | 126 | spin_unlock_irqrestore(&sclp_tty_lock, flags); |
270 | } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); | 127 | } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); |
271 | wake_up(&sclp_tty_waitq); | ||
272 | /* check if the tty needs a wake up call */ | 128 | /* check if the tty needs a wake up call */ |
273 | if (sclp_tty != NULL) { | 129 | if (sclp_tty != NULL) { |
274 | tty_wakeup(sclp_tty); | 130 | tty_wakeup(sclp_tty); |
@@ -316,37 +172,37 @@ sclp_tty_timeout(unsigned long data) | |||
316 | /* | 172 | /* |
317 | * Write a string to the sclp tty. | 173 | * Write a string to the sclp tty. |
318 | */ | 174 | */ |
319 | static void | 175 | static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail) |
320 | sclp_tty_write_string(const unsigned char *str, int count) | ||
321 | { | 176 | { |
322 | unsigned long flags; | 177 | unsigned long flags; |
323 | void *page; | 178 | void *page; |
324 | int written; | 179 | int written; |
180 | int overall_written; | ||
325 | struct sclp_buffer *buf; | 181 | struct sclp_buffer *buf; |
326 | 182 | ||
327 | if (count <= 0) | 183 | if (count <= 0) |
328 | return; | 184 | return 0; |
185 | overall_written = 0; | ||
329 | spin_lock_irqsave(&sclp_tty_lock, flags); | 186 | spin_lock_irqsave(&sclp_tty_lock, flags); |
330 | do { | 187 | do { |
331 | /* Create a sclp output buffer if none exists yet */ | 188 | /* Create a sclp output buffer if none exists yet */ |
332 | if (sclp_ttybuf == NULL) { | 189 | if (sclp_ttybuf == NULL) { |
333 | while (list_empty(&sclp_tty_pages)) { | 190 | while (list_empty(&sclp_tty_pages)) { |
334 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | 191 | spin_unlock_irqrestore(&sclp_tty_lock, flags); |
335 | if (in_interrupt()) | 192 | if (may_fail) |
336 | sclp_sync_wait(); | 193 | goto out; |
337 | else | 194 | else |
338 | wait_event(sclp_tty_waitq, | 195 | sclp_sync_wait(); |
339 | !list_empty(&sclp_tty_pages)); | ||
340 | spin_lock_irqsave(&sclp_tty_lock, flags); | 196 | spin_lock_irqsave(&sclp_tty_lock, flags); |
341 | } | 197 | } |
342 | page = sclp_tty_pages.next; | 198 | page = sclp_tty_pages.next; |
343 | list_del((struct list_head *) page); | 199 | list_del((struct list_head *) page); |
344 | sclp_ttybuf = sclp_make_buffer(page, | 200 | sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns, |
345 | sclp_ioctls.columns, | 201 | SPACES_PER_TAB); |
346 | sclp_ioctls.htab); | ||
347 | } | 202 | } |
348 | /* try to write the string to the current output buffer */ | 203 | /* try to write the string to the current output buffer */ |
349 | written = sclp_write(sclp_ttybuf, str, count); | 204 | written = sclp_write(sclp_ttybuf, str, count); |
205 | overall_written += written; | ||
350 | if (written == count) | 206 | if (written == count) |
351 | break; | 207 | break; |
352 | /* | 208 | /* |
@@ -363,27 +219,17 @@ sclp_tty_write_string(const unsigned char *str, int count) | |||
363 | count -= written; | 219 | count -= written; |
364 | } while (count > 0); | 220 | } while (count > 0); |
365 | /* Setup timer to output current console buffer after 1/10 second */ | 221 | /* Setup timer to output current console buffer after 1/10 second */ |
366 | if (sclp_ioctls.final_nl) { | 222 | if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) && |
367 | if (sclp_ttybuf != NULL && | 223 | !timer_pending(&sclp_tty_timer)) { |
368 | sclp_chars_in_buffer(sclp_ttybuf) != 0 && | 224 | init_timer(&sclp_tty_timer); |
369 | !timer_pending(&sclp_tty_timer)) { | 225 | sclp_tty_timer.function = sclp_tty_timeout; |
370 | init_timer(&sclp_tty_timer); | 226 | sclp_tty_timer.data = 0UL; |
371 | sclp_tty_timer.function = sclp_tty_timeout; | 227 | sclp_tty_timer.expires = jiffies + HZ/10; |
372 | sclp_tty_timer.data = 0UL; | 228 | add_timer(&sclp_tty_timer); |
373 | sclp_tty_timer.expires = jiffies + HZ/10; | ||
374 | add_timer(&sclp_tty_timer); | ||
375 | } | ||
376 | } else { | ||
377 | if (sclp_ttybuf != NULL && | ||
378 | sclp_chars_in_buffer(sclp_ttybuf) != 0) { | ||
379 | buf = sclp_ttybuf; | ||
380 | sclp_ttybuf = NULL; | ||
381 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | ||
382 | __sclp_ttybuf_emit(buf); | ||
383 | spin_lock_irqsave(&sclp_tty_lock, flags); | ||
384 | } | ||
385 | } | 229 | } |
386 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | 230 | spin_unlock_irqrestore(&sclp_tty_lock, flags); |
231 | out: | ||
232 | return overall_written; | ||
387 | } | 233 | } |
388 | 234 | ||
389 | /* | 235 | /* |
@@ -395,11 +241,10 @@ static int | |||
395 | sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) | 241 | sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) |
396 | { | 242 | { |
397 | if (sclp_tty_chars_count > 0) { | 243 | if (sclp_tty_chars_count > 0) { |
398 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 244 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); |
399 | sclp_tty_chars_count = 0; | 245 | sclp_tty_chars_count = 0; |
400 | } | 246 | } |
401 | sclp_tty_write_string(buf, count); | 247 | return sclp_tty_write_string(buf, count, 1); |
402 | return count; | ||
403 | } | 248 | } |
404 | 249 | ||
405 | /* | 250 | /* |
@@ -417,9 +262,10 @@ sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) | |||
417 | { | 262 | { |
418 | sclp_tty_chars[sclp_tty_chars_count++] = ch; | 263 | sclp_tty_chars[sclp_tty_chars_count++] = ch; |
419 | if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { | 264 | if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { |
420 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 265 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); |
421 | sclp_tty_chars_count = 0; | 266 | sclp_tty_chars_count = 0; |
422 | } return 1; | 267 | } |
268 | return 1; | ||
423 | } | 269 | } |
424 | 270 | ||
425 | /* | 271 | /* |
@@ -430,7 +276,7 @@ static void | |||
430 | sclp_tty_flush_chars(struct tty_struct *tty) | 276 | sclp_tty_flush_chars(struct tty_struct *tty) |
431 | { | 277 | { |
432 | if (sclp_tty_chars_count > 0) { | 278 | if (sclp_tty_chars_count > 0) { |
433 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 279 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); |
434 | sclp_tty_chars_count = 0; | 280 | sclp_tty_chars_count = 0; |
435 | } | 281 | } |
436 | } | 282 | } |
@@ -469,7 +315,7 @@ static void | |||
469 | sclp_tty_flush_buffer(struct tty_struct *tty) | 315 | sclp_tty_flush_buffer(struct tty_struct *tty) |
470 | { | 316 | { |
471 | if (sclp_tty_chars_count > 0) { | 317 | if (sclp_tty_chars_count > 0) { |
472 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 318 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); |
473 | sclp_tty_chars_count = 0; | 319 | sclp_tty_chars_count = 0; |
474 | } | 320 | } |
475 | } | 321 | } |
@@ -517,9 +363,7 @@ sclp_tty_input(unsigned char* buf, unsigned int count) | |||
517 | * modifiy original string, | 363 | * modifiy original string, |
518 | * returns length of resulting string | 364 | * returns length of resulting string |
519 | */ | 365 | */ |
520 | static int | 366 | static int sclp_switch_cases(unsigned char *buf, int count) |
521 | sclp_switch_cases(unsigned char *buf, int count, | ||
522 | unsigned char delim, int tolower) | ||
523 | { | 367 | { |
524 | unsigned char *ip, *op; | 368 | unsigned char *ip, *op; |
525 | int toggle; | 369 | int toggle; |
@@ -529,9 +373,9 @@ sclp_switch_cases(unsigned char *buf, int count, | |||
529 | ip = op = buf; | 373 | ip = op = buf; |
530 | while (count-- > 0) { | 374 | while (count-- > 0) { |
531 | /* compare with special character */ | 375 | /* compare with special character */ |
532 | if (*ip == delim) { | 376 | if (*ip == CASE_DELIMITER) { |
533 | /* followed by another special character? */ | 377 | /* followed by another special character? */ |
534 | if (count && ip[1] == delim) { | 378 | if (count && ip[1] == CASE_DELIMITER) { |
535 | /* | 379 | /* |
536 | * ... then put a single copy of the special | 380 | * ... then put a single copy of the special |
537 | * character to the output string | 381 | * character to the output string |
@@ -550,7 +394,7 @@ sclp_switch_cases(unsigned char *buf, int count, | |||
550 | /* not the special character */ | 394 | /* not the special character */ |
551 | if (toggle) | 395 | if (toggle) |
552 | /* but case switching is on */ | 396 | /* but case switching is on */ |
553 | if (tolower) | 397 | if (sclp_tty_tolower) |
554 | /* switch to uppercase */ | 398 | /* switch to uppercase */ |
555 | *op++ = _ebc_toupper[(int) *ip++]; | 399 | *op++ = _ebc_toupper[(int) *ip++]; |
556 | else | 400 | else |
@@ -570,30 +414,12 @@ sclp_get_input(unsigned char *start, unsigned char *end) | |||
570 | int count; | 414 | int count; |
571 | 415 | ||
572 | count = end - start; | 416 | count = end - start; |
573 | /* | 417 | if (sclp_tty_tolower) |
574 | * if set in ioctl convert EBCDIC to lower case | ||
575 | * (modify original input in SCCB) | ||
576 | */ | ||
577 | if (sclp_ioctls.tolower) | ||
578 | EBC_TOLOWER(start, count); | 418 | EBC_TOLOWER(start, count); |
579 | 419 | count = sclp_switch_cases(start, count); | |
580 | /* | ||
581 | * if set in ioctl find out characters in lower or upper case | ||
582 | * (depends on current case) separated by a special character, | ||
583 | * works on EBCDIC | ||
584 | */ | ||
585 | if (sclp_ioctls.delim) | ||
586 | count = sclp_switch_cases(start, count, | ||
587 | sclp_ioctls.delim, | ||
588 | sclp_ioctls.tolower); | ||
589 | |||
590 | /* convert EBCDIC to ASCII (modify original input in SCCB) */ | 420 | /* convert EBCDIC to ASCII (modify original input in SCCB) */ |
591 | sclp_ebcasc_str(start, count); | 421 | sclp_ebcasc_str(start, count); |
592 | 422 | ||
593 | /* if set in ioctl write operators input to console */ | ||
594 | if (sclp_ioctls.echo) | ||
595 | sclp_tty_write(sclp_tty, start, count); | ||
596 | |||
597 | /* transfer input to high level driver */ | 423 | /* transfer input to high level driver */ |
598 | sclp_tty_input(start, count); | 424 | sclp_tty_input(start, count); |
599 | } | 425 | } |
@@ -717,7 +543,6 @@ static const struct tty_operations sclp_ops = { | |||
717 | .write_room = sclp_tty_write_room, | 543 | .write_room = sclp_tty_write_room, |
718 | .chars_in_buffer = sclp_tty_chars_in_buffer, | 544 | .chars_in_buffer = sclp_tty_chars_in_buffer, |
719 | .flush_buffer = sclp_tty_flush_buffer, | 545 | .flush_buffer = sclp_tty_flush_buffer, |
720 | .ioctl = sclp_tty_ioctl, | ||
721 | }; | 546 | }; |
722 | 547 | ||
723 | static int __init | 548 | static int __init |
@@ -736,9 +561,6 @@ sclp_tty_init(void) | |||
736 | 561 | ||
737 | rc = sclp_rw_init(); | 562 | rc = sclp_rw_init(); |
738 | if (rc) { | 563 | if (rc) { |
739 | printk(KERN_ERR SCLP_TTY_PRINT_HEADER | ||
740 | "could not register tty - " | ||
741 | "sclp_rw_init returned %d\n", rc); | ||
742 | put_tty_driver(driver); | 564 | put_tty_driver(driver); |
743 | return rc; | 565 | return rc; |
744 | } | 566 | } |
@@ -754,7 +576,6 @@ sclp_tty_init(void) | |||
754 | } | 576 | } |
755 | INIT_LIST_HEAD(&sclp_tty_outqueue); | 577 | INIT_LIST_HEAD(&sclp_tty_outqueue); |
756 | spin_lock_init(&sclp_tty_lock); | 578 | spin_lock_init(&sclp_tty_lock); |
757 | init_waitqueue_head(&sclp_tty_waitq); | ||
758 | init_timer(&sclp_tty_timer); | 579 | init_timer(&sclp_tty_timer); |
759 | sclp_ttybuf = NULL; | 580 | sclp_ttybuf = NULL; |
760 | sclp_tty_buffer_count = 0; | 581 | sclp_tty_buffer_count = 0; |
@@ -763,11 +584,10 @@ sclp_tty_init(void) | |||
763 | * save 4 characters for the CPU number | 584 | * save 4 characters for the CPU number |
764 | * written at start of each line by VM/CP | 585 | * written at start of each line by VM/CP |
765 | */ | 586 | */ |
766 | sclp_ioctls_init.columns = 76; | 587 | sclp_tty_columns = 76; |
767 | /* case input lines to lowercase */ | 588 | /* case input lines to lowercase */ |
768 | sclp_ioctls_init.tolower = 1; | 589 | sclp_tty_tolower = 1; |
769 | } | 590 | } |
770 | sclp_ioctls = sclp_ioctls_init; | ||
771 | sclp_tty_chars_count = 0; | 591 | sclp_tty_chars_count = 0; |
772 | sclp_tty = NULL; | 592 | sclp_tty = NULL; |
773 | 593 | ||
@@ -792,9 +612,6 @@ sclp_tty_init(void) | |||
792 | tty_set_operations(driver, &sclp_ops); | 612 | tty_set_operations(driver, &sclp_ops); |
793 | rc = tty_register_driver(driver); | 613 | rc = tty_register_driver(driver); |
794 | if (rc) { | 614 | if (rc) { |
795 | printk(KERN_ERR SCLP_TTY_PRINT_HEADER | ||
796 | "could not register tty - " | ||
797 | "tty_register_driver returned %d\n", rc); | ||
798 | put_tty_driver(driver); | 615 | put_tty_driver(driver); |
799 | return rc; | 616 | return rc; |
800 | } | 617 | } |
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h index 0ce2c1fc5340..4b965b22fecd 100644 --- a/drivers/s390/char/sclp_tty.h +++ b/drivers/s390/char/sclp_tty.h | |||
@@ -11,61 +11,8 @@ | |||
11 | #ifndef __SCLP_TTY_H__ | 11 | #ifndef __SCLP_TTY_H__ |
12 | #define __SCLP_TTY_H__ | 12 | #define __SCLP_TTY_H__ |
13 | 13 | ||
14 | #include <linux/ioctl.h> | ||
15 | #include <linux/termios.h> | ||
16 | #include <linux/tty_driver.h> | 14 | #include <linux/tty_driver.h> |
17 | 15 | ||
18 | /* This is the type of data structures storing sclp ioctl setting. */ | ||
19 | struct sclp_ioctls { | ||
20 | unsigned short htab; | ||
21 | unsigned char echo; | ||
22 | unsigned short columns; | ||
23 | unsigned char final_nl; | ||
24 | unsigned short max_sccb; | ||
25 | unsigned short kmem_sccb; /* can't be modified at run time */ | ||
26 | unsigned char tolower; | ||
27 | unsigned char delim; | ||
28 | }; | ||
29 | |||
30 | /* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */ | ||
31 | #define SCLP_IOCTL_LETTER 'B' | ||
32 | |||
33 | /* set width of horizontal tabulator */ | ||
34 | #define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short) | ||
35 | /* enable/disable echo of input (independent from line discipline) */ | ||
36 | #define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char) | ||
37 | /* set number of colums for output */ | ||
38 | #define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short) | ||
39 | /* enable/disable writing without final new line character */ | ||
40 | #define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char) | ||
41 | /* set the maximum buffers size for output, rounded up to next 4kB boundary */ | ||
42 | #define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short) | ||
43 | /* set initial (default) sclp ioctls */ | ||
44 | #define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6) | ||
45 | /* enable/disable conversion from upper to lower case of input */ | ||
46 | #define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char) | ||
47 | /* set special character used for separating upper and lower case, */ | ||
48 | /* 0x00 disables this feature */ | ||
49 | #define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char) | ||
50 | |||
51 | /* get width of horizontal tabulator */ | ||
52 | #define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short) | ||
53 | /* Is echo of input enabled ? (independent from line discipline) */ | ||
54 | #define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char) | ||
55 | /* get number of colums for output */ | ||
56 | #define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short) | ||
57 | /* Is writing without final new line character enabled ? */ | ||
58 | #define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char) | ||
59 | /* get the maximum buffers size for output */ | ||
60 | #define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short) | ||
61 | /* Is conversion from upper to lower case of input enabled ? */ | ||
62 | #define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char) | ||
63 | /* get special character used for separating upper and lower case, */ | ||
64 | /* 0x00 disables this feature */ | ||
65 | #define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char) | ||
66 | /* get the number of buffers/pages got from kernel at startup */ | ||
67 | #define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short) | ||
68 | |||
69 | extern struct tty_driver *sclp_tty_driver; | 16 | extern struct tty_driver *sclp_tty_driver; |
70 | 17 | ||
71 | #endif /* __SCLP_TTY_H__ */ | 18 | #endif /* __SCLP_TTY_H__ */ |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 3e577f655b18..ad51738c4261 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
28 | #include "sclp.h" | 28 | #include "sclp.h" |
29 | 29 | ||
30 | #define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: " | ||
31 | #define SCLP_VT220_MAJOR TTY_MAJOR | 30 | #define SCLP_VT220_MAJOR TTY_MAJOR |
32 | #define SCLP_VT220_MINOR 65 | 31 | #define SCLP_VT220_MINOR 65 |
33 | #define SCLP_VT220_DRIVER_NAME "sclp_vt220" | 32 | #define SCLP_VT220_DRIVER_NAME "sclp_vt220" |
@@ -82,8 +81,8 @@ static struct sclp_vt220_request *sclp_vt220_current_request; | |||
82 | /* Number of characters in current request buffer */ | 81 | /* Number of characters in current request buffer */ |
83 | static int sclp_vt220_buffered_chars; | 82 | static int sclp_vt220_buffered_chars; |
84 | 83 | ||
85 | /* Flag indicating whether this driver has already been initialized */ | 84 | /* Counter controlling core driver initialization. */ |
86 | static int sclp_vt220_initialized = 0; | 85 | static int __initdata sclp_vt220_init_count; |
87 | 86 | ||
88 | /* Flag indicating that sclp_vt220_current_request should really | 87 | /* Flag indicating that sclp_vt220_current_request should really |
89 | * have been already queued but wasn't because the SCLP was processing | 88 | * have been already queued but wasn't because the SCLP was processing |
@@ -609,10 +608,8 @@ sclp_vt220_flush_buffer(struct tty_struct *tty) | |||
609 | sclp_vt220_emit_current(); | 608 | sclp_vt220_emit_current(); |
610 | } | 609 | } |
611 | 610 | ||
612 | /* | 611 | /* Release allocated pages. */ |
613 | * Initialize all relevant components and register driver with system. | 612 | static void __init __sclp_vt220_free_pages(void) |
614 | */ | ||
615 | static void __init __sclp_vt220_cleanup(void) | ||
616 | { | 613 | { |
617 | struct list_head *page, *p; | 614 | struct list_head *page, *p; |
618 | 615 | ||
@@ -623,21 +620,30 @@ static void __init __sclp_vt220_cleanup(void) | |||
623 | else | 620 | else |
624 | free_bootmem((unsigned long) page, PAGE_SIZE); | 621 | free_bootmem((unsigned long) page, PAGE_SIZE); |
625 | } | 622 | } |
626 | if (!list_empty(&sclp_vt220_register.list)) | ||
627 | sclp_unregister(&sclp_vt220_register); | ||
628 | sclp_vt220_initialized = 0; | ||
629 | } | 623 | } |
630 | 624 | ||
631 | static int __init __sclp_vt220_init(void) | 625 | /* Release memory and unregister from sclp core. Controlled by init counting - |
626 | * only the last invoker will actually perform these actions. */ | ||
627 | static void __init __sclp_vt220_cleanup(void) | ||
628 | { | ||
629 | sclp_vt220_init_count--; | ||
630 | if (sclp_vt220_init_count != 0) | ||
631 | return; | ||
632 | sclp_unregister(&sclp_vt220_register); | ||
633 | __sclp_vt220_free_pages(); | ||
634 | } | ||
635 | |||
636 | /* Allocate buffer pages and register with sclp core. Controlled by init | ||
637 | * counting - only the first invoker will actually perform these actions. */ | ||
638 | static int __init __sclp_vt220_init(int num_pages) | ||
632 | { | 639 | { |
633 | void *page; | 640 | void *page; |
634 | int i; | 641 | int i; |
635 | int num_pages; | ||
636 | int rc; | 642 | int rc; |
637 | 643 | ||
638 | if (sclp_vt220_initialized) | 644 | sclp_vt220_init_count++; |
645 | if (sclp_vt220_init_count != 1) | ||
639 | return 0; | 646 | return 0; |
640 | sclp_vt220_initialized = 1; | ||
641 | spin_lock_init(&sclp_vt220_lock); | 647 | spin_lock_init(&sclp_vt220_lock); |
642 | INIT_LIST_HEAD(&sclp_vt220_empty); | 648 | INIT_LIST_HEAD(&sclp_vt220_empty); |
643 | INIT_LIST_HEAD(&sclp_vt220_outqueue); | 649 | INIT_LIST_HEAD(&sclp_vt220_outqueue); |
@@ -649,24 +655,22 @@ static int __init __sclp_vt220_init(void) | |||
649 | sclp_vt220_flush_later = 0; | 655 | sclp_vt220_flush_later = 0; |
650 | 656 | ||
651 | /* Allocate pages for output buffering */ | 657 | /* Allocate pages for output buffering */ |
652 | num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES; | ||
653 | for (i = 0; i < num_pages; i++) { | 658 | for (i = 0; i < num_pages; i++) { |
654 | if (slab_is_available()) | 659 | if (slab_is_available()) |
655 | page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 660 | page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
656 | else | 661 | else |
657 | page = alloc_bootmem_low_pages(PAGE_SIZE); | 662 | page = alloc_bootmem_low_pages(PAGE_SIZE); |
658 | if (!page) { | 663 | if (!page) { |
659 | __sclp_vt220_cleanup(); | 664 | rc = -ENOMEM; |
660 | return -ENOMEM; | 665 | goto out; |
661 | } | 666 | } |
662 | list_add_tail((struct list_head *) page, &sclp_vt220_empty); | 667 | list_add_tail((struct list_head *) page, &sclp_vt220_empty); |
663 | } | 668 | } |
664 | rc = sclp_register(&sclp_vt220_register); | 669 | rc = sclp_register(&sclp_vt220_register); |
670 | out: | ||
665 | if (rc) { | 671 | if (rc) { |
666 | printk(KERN_ERR SCLP_VT220_PRINT_HEADER | 672 | __sclp_vt220_free_pages(); |
667 | "could not register vt220 - " | 673 | sclp_vt220_init_count--; |
668 | "sclp_register returned %d\n", rc); | ||
669 | __sclp_vt220_cleanup(); | ||
670 | } | 674 | } |
671 | return rc; | 675 | return rc; |
672 | } | 676 | } |
@@ -689,15 +693,13 @@ static int __init sclp_vt220_tty_init(void) | |||
689 | { | 693 | { |
690 | struct tty_driver *driver; | 694 | struct tty_driver *driver; |
691 | int rc; | 695 | int rc; |
692 | int cleanup; | ||
693 | 696 | ||
694 | /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve | 697 | /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve |
695 | * symmetry between VM and LPAR systems regarding ttyS1. */ | 698 | * symmetry between VM and LPAR systems regarding ttyS1. */ |
696 | driver = alloc_tty_driver(1); | 699 | driver = alloc_tty_driver(1); |
697 | if (!driver) | 700 | if (!driver) |
698 | return -ENOMEM; | 701 | return -ENOMEM; |
699 | cleanup = !sclp_vt220_initialized; | 702 | rc = __sclp_vt220_init(MAX_KMEM_PAGES); |
700 | rc = __sclp_vt220_init(); | ||
701 | if (rc) | 703 | if (rc) |
702 | goto out_driver; | 704 | goto out_driver; |
703 | 705 | ||
@@ -713,18 +715,13 @@ static int __init sclp_vt220_tty_init(void) | |||
713 | tty_set_operations(driver, &sclp_vt220_ops); | 715 | tty_set_operations(driver, &sclp_vt220_ops); |
714 | 716 | ||
715 | rc = tty_register_driver(driver); | 717 | rc = tty_register_driver(driver); |
716 | if (rc) { | 718 | if (rc) |
717 | printk(KERN_ERR SCLP_VT220_PRINT_HEADER | ||
718 | "could not register tty - " | ||
719 | "tty_register_driver returned %d\n", rc); | ||
720 | goto out_init; | 719 | goto out_init; |
721 | } | ||
722 | sclp_vt220_driver = driver; | 720 | sclp_vt220_driver = driver; |
723 | return 0; | 721 | return 0; |
724 | 722 | ||
725 | out_init: | 723 | out_init: |
726 | if (cleanup) | 724 | __sclp_vt220_cleanup(); |
727 | __sclp_vt220_cleanup(); | ||
728 | out_driver: | 725 | out_driver: |
729 | put_tty_driver(driver); | 726 | put_tty_driver(driver); |
730 | return rc; | 727 | return rc; |
@@ -773,10 +770,9 @@ sclp_vt220_con_init(void) | |||
773 | { | 770 | { |
774 | int rc; | 771 | int rc; |
775 | 772 | ||
776 | INIT_LIST_HEAD(&sclp_vt220_register.list); | ||
777 | if (!CONSOLE_IS_SCLP) | 773 | if (!CONSOLE_IS_SCLP) |
778 | return 0; | 774 | return 0; |
779 | rc = __sclp_vt220_init(); | 775 | rc = __sclp_vt220_init(MAX_CONSOLE_PAGES); |
780 | if (rc) | 776 | if (rc) |
781 | return rc; | 777 | return rc; |
782 | /* Attach linux console */ | 778 | /* Attach linux console */ |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 874adf365e46..22ca34361ed7 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
@@ -196,7 +196,7 @@ tape_34xx_erp_retry(struct tape_request *request) | |||
196 | static int | 196 | static int |
197 | tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) | 197 | tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) |
198 | { | 198 | { |
199 | if (irb->scsw.dstat == 0x85 /* READY */) { | 199 | if (irb->scsw.cmd.dstat == 0x85) { /* READY */ |
200 | /* A medium was inserted in the drive. */ | 200 | /* A medium was inserted in the drive. */ |
201 | DBF_EVENT(6, "xuud med\n"); | 201 | DBF_EVENT(6, "xuud med\n"); |
202 | tape_34xx_delete_sbid_from(device, 0); | 202 | tape_34xx_delete_sbid_from(device, 0); |
@@ -844,22 +844,22 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request, | |||
844 | if (request == NULL) | 844 | if (request == NULL) |
845 | return tape_34xx_unsolicited_irq(device, irb); | 845 | return tape_34xx_unsolicited_irq(device, irb); |
846 | 846 | ||
847 | if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && | 847 | if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && |
848 | (irb->scsw.dstat & DEV_STAT_DEV_END) && | 848 | (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && |
849 | (request->op == TO_WRI)) { | 849 | (request->op == TO_WRI)) { |
850 | /* Write at end of volume */ | 850 | /* Write at end of volume */ |
851 | PRINT_INFO("End of volume\n"); /* XXX */ | 851 | PRINT_INFO("End of volume\n"); /* XXX */ |
852 | return tape_34xx_erp_failed(request, -ENOSPC); | 852 | return tape_34xx_erp_failed(request, -ENOSPC); |
853 | } | 853 | } |
854 | 854 | ||
855 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 855 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
856 | return tape_34xx_unit_check(device, request, irb); | 856 | return tape_34xx_unit_check(device, request, irb); |
857 | 857 | ||
858 | if (irb->scsw.dstat & DEV_STAT_DEV_END) { | 858 | if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { |
859 | /* | 859 | /* |
860 | * A unit exception occurs on skipping over a tapemark block. | 860 | * A unit exception occurs on skipping over a tapemark block. |
861 | */ | 861 | */ |
862 | if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { | 862 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { |
863 | if (request->op == TO_BSB || request->op == TO_FSB) | 863 | if (request->op == TO_BSB || request->op == TO_FSB) |
864 | request->rescnt++; | 864 | request->rescnt++; |
865 | else | 865 | else |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 42ce7915fc5d..839987618ffd 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -837,13 +837,13 @@ tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, | |||
837 | static int | 837 | static int |
838 | tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) | 838 | tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) |
839 | { | 839 | { |
840 | if (irb->scsw.dstat == DEV_STAT_CHN_END) | 840 | if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END) |
841 | /* Probably result of halt ssch */ | 841 | /* Probably result of halt ssch */ |
842 | return TAPE_IO_PENDING; | 842 | return TAPE_IO_PENDING; |
843 | else if (irb->scsw.dstat == 0x85) | 843 | else if (irb->scsw.cmd.dstat == 0x85) |
844 | /* Device Ready */ | 844 | /* Device Ready */ |
845 | DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); | 845 | DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); |
846 | else if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 846 | else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
847 | tape_3590_schedule_work(device, TO_READ_ATTMSG); | 847 | tape_3590_schedule_work(device, TO_READ_ATTMSG); |
848 | } else { | 848 | } else { |
849 | DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); | 849 | DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); |
@@ -1515,18 +1515,19 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request, | |||
1515 | if (request == NULL) | 1515 | if (request == NULL) |
1516 | return tape_3590_unsolicited_irq(device, irb); | 1516 | return tape_3590_unsolicited_irq(device, irb); |
1517 | 1517 | ||
1518 | if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && | 1518 | if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && |
1519 | (irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { | 1519 | (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && |
1520 | (request->op == TO_WRI)) { | ||
1520 | /* Write at end of volume */ | 1521 | /* Write at end of volume */ |
1521 | DBF_EVENT(2, "End of volume\n"); | 1522 | DBF_EVENT(2, "End of volume\n"); |
1522 | return tape_3590_erp_failed(device, request, irb, -ENOSPC); | 1523 | return tape_3590_erp_failed(device, request, irb, -ENOSPC); |
1523 | } | 1524 | } |
1524 | 1525 | ||
1525 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 1526 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
1526 | return tape_3590_unit_check(device, request, irb); | 1527 | return tape_3590_unit_check(device, request, irb); |
1527 | 1528 | ||
1528 | if (irb->scsw.dstat & DEV_STAT_DEV_END) { | 1529 | if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { |
1529 | if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) { | 1530 | if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) { |
1530 | if (request->op == TO_FSB || request->op == TO_BSB) | 1531 | if (request->op == TO_FSB || request->op == TO_BSB) |
1531 | request->rescnt++; | 1532 | request->rescnt++; |
1532 | else | 1533 | else |
@@ -1536,12 +1537,12 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request, | |||
1536 | return tape_3590_done(device, request); | 1537 | return tape_3590_done(device, request); |
1537 | } | 1538 | } |
1538 | 1539 | ||
1539 | if (irb->scsw.dstat & DEV_STAT_CHN_END) { | 1540 | if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) { |
1540 | DBF_EVENT(2, "cannel end\n"); | 1541 | DBF_EVENT(2, "cannel end\n"); |
1541 | return TAPE_IO_PENDING; | 1542 | return TAPE_IO_PENDING; |
1542 | } | 1543 | } |
1543 | 1544 | ||
1544 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 1545 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
1545 | DBF_EVENT(2, "Unit Attention when busy..\n"); | 1546 | DBF_EVENT(2, "Unit Attention when busy..\n"); |
1546 | return TAPE_IO_PENDING; | 1547 | return TAPE_IO_PENDING; |
1547 | } | 1548 | } |
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index ebe84067bae9..687720b552d1 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
16 | #include <linux/mtio.h> | 16 | #include <linux/mtio.h> |
17 | #include <linux/smp_lock.h> | ||
17 | 18 | ||
18 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
19 | 20 | ||
@@ -289,21 +290,26 @@ tapechar_open (struct inode *inode, struct file *filp) | |||
289 | if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) | 290 | if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) |
290 | return -ENODEV; | 291 | return -ENODEV; |
291 | 292 | ||
293 | lock_kernel(); | ||
292 | minor = iminor(filp->f_path.dentry->d_inode); | 294 | minor = iminor(filp->f_path.dentry->d_inode); |
293 | device = tape_get_device(minor / TAPE_MINORS_PER_DEV); | 295 | device = tape_get_device(minor / TAPE_MINORS_PER_DEV); |
294 | if (IS_ERR(device)) { | 296 | if (IS_ERR(device)) { |
295 | DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); | 297 | DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); |
296 | return PTR_ERR(device); | 298 | rc = PTR_ERR(device); |
299 | goto out; | ||
297 | } | 300 | } |
298 | 301 | ||
299 | 302 | ||
300 | rc = tape_open(device); | 303 | rc = tape_open(device); |
301 | if (rc == 0) { | 304 | if (rc == 0) { |
302 | filp->private_data = device; | 305 | filp->private_data = device; |
303 | return nonseekable_open(inode, filp); | 306 | rc = nonseekable_open(inode, filp); |
304 | } | 307 | } |
305 | tape_put_device(device); | 308 | else |
309 | tape_put_device(device); | ||
306 | 310 | ||
311 | out: | ||
312 | unlock_kernel(); | ||
307 | return rc; | 313 | return rc; |
308 | } | 314 | } |
309 | 315 | ||
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index c20e3c548343..181a5441af16 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -839,7 +839,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request, | |||
839 | 839 | ||
840 | PRINT_INFO("-------------------------------------------------\n"); | 840 | PRINT_INFO("-------------------------------------------------\n"); |
841 | PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", | 841 | PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", |
842 | irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); | 842 | irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa); |
843 | PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); | 843 | PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); |
844 | if (request != NULL) | 844 | if (request != NULL) |
845 | PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); | 845 | PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); |
@@ -867,7 +867,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, | |||
867 | else | 867 | else |
868 | op = "---"; | 868 | op = "---"; |
869 | DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", | 869 | DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", |
870 | irb->scsw.dstat,irb->scsw.cstat); | 870 | irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); |
871 | DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); | 871 | DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); |
872 | sptr = (unsigned int *) irb->ecw; | 872 | sptr = (unsigned int *) irb->ecw; |
873 | DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); | 873 | DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); |
@@ -1083,10 +1083,11 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1083 | * error might still apply. So we just schedule the request to be | 1083 | * error might still apply. So we just schedule the request to be |
1084 | * started later. | 1084 | * started later. |
1085 | */ | 1085 | */ |
1086 | if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && | 1086 | if (irb->scsw.cmd.cc != 0 && |
1087 | (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
1087 | (request->status == TAPE_REQUEST_IN_IO)) { | 1088 | (request->status == TAPE_REQUEST_IN_IO)) { |
1088 | DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", | 1089 | DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", |
1089 | device->cdev_id, irb->scsw.cc, irb->scsw.fctl); | 1090 | device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); |
1090 | request->status = TAPE_REQUEST_QUEUED; | 1091 | request->status = TAPE_REQUEST_QUEUED; |
1091 | schedule_delayed_work(&device->tape_dnr, HZ); | 1092 | schedule_delayed_work(&device->tape_dnr, HZ); |
1092 | return; | 1093 | return; |
@@ -1094,8 +1095,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1094 | 1095 | ||
1095 | /* May be an unsolicited irq */ | 1096 | /* May be an unsolicited irq */ |
1096 | if(request != NULL) | 1097 | if(request != NULL) |
1097 | request->rescnt = irb->scsw.count; | 1098 | request->rescnt = irb->scsw.cmd.count; |
1098 | else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && | 1099 | else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && |
1099 | !list_empty(&device->req_queue)) { | 1100 | !list_empty(&device->req_queue)) { |
1100 | /* Not Ready to Ready after long busy ? */ | 1101 | /* Not Ready to Ready after long busy ? */ |
1101 | struct tape_request *req; | 1102 | struct tape_request *req; |
@@ -1111,7 +1112,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1111 | return; | 1112 | return; |
1112 | } | 1113 | } |
1113 | } | 1114 | } |
1114 | if (irb->scsw.dstat != 0x0c) { | 1115 | if (irb->scsw.cmd.dstat != 0x0c) { |
1115 | /* Set the 'ONLINE' flag depending on sense byte 1 */ | 1116 | /* Set the 'ONLINE' flag depending on sense byte 1 */ |
1116 | if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) | 1117 | if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) |
1117 | device->tape_generic_status |= GMT_ONLINE(~0); | 1118 | device->tape_generic_status |= GMT_ONLINE(~0); |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 5043150019ac..a7fe6302c982 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -663,7 +663,7 @@ static int | |||
663 | tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) | 663 | tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) |
664 | { | 664 | { |
665 | /* Handle ATTN. Schedule tasklet to read aid. */ | 665 | /* Handle ATTN. Schedule tasklet to read aid. */ |
666 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 666 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
667 | if (!tp->throttle) | 667 | if (!tp->throttle) |
668 | tty3270_issue_read(tp, 0); | 668 | tty3270_issue_read(tp, 0); |
669 | else | 669 | else |
@@ -671,11 +671,11 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) | |||
671 | } | 671 | } |
672 | 672 | ||
673 | if (rq) { | 673 | if (rq) { |
674 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 674 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
675 | rq->rc = -EIO; | 675 | rq->rc = -EIO; |
676 | else | 676 | else |
677 | /* Normal end. Copy residual count. */ | 677 | /* Normal end. Copy residual count. */ |
678 | rq->rescnt = irb->scsw.count; | 678 | rq->rescnt = irb->scsw.cmd.count; |
679 | } | 679 | } |
680 | return RAW3270_IO_DONE; | 680 | return RAW3270_IO_DONE; |
681 | } | 681 | } |
@@ -1792,15 +1792,12 @@ static int __init tty3270_init(void) | |||
1792 | tty_set_operations(driver, &tty3270_ops); | 1792 | tty_set_operations(driver, &tty3270_ops); |
1793 | ret = tty_register_driver(driver); | 1793 | ret = tty_register_driver(driver); |
1794 | if (ret) { | 1794 | if (ret) { |
1795 | printk(KERN_ERR "tty3270 registration failed with %d\n", ret); | ||
1796 | put_tty_driver(driver); | 1795 | put_tty_driver(driver); |
1797 | return ret; | 1796 | return ret; |
1798 | } | 1797 | } |
1799 | tty3270_driver = driver; | 1798 | tty3270_driver = driver; |
1800 | ret = raw3270_register_notifier(tty3270_notifier); | 1799 | ret = raw3270_register_notifier(tty3270_notifier); |
1801 | if (ret) { | 1800 | if (ret) { |
1802 | printk(KERN_ERR "tty3270 notifier registration failed " | ||
1803 | "with %d\n", ret); | ||
1804 | put_tty_driver(driver); | 1801 | put_tty_driver(driver); |
1805 | return ret; | 1802 | return ret; |
1806 | 1803 | ||
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 2f419b0ea628..09e7d9bf438b 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/miscdevice.h> | 17 | #include <linux/miscdevice.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/smp_lock.h> | ||
19 | #include <asm/cpcmd.h> | 20 | #include <asm/cpcmd.h> |
20 | #include <asm/debug.h> | 21 | #include <asm/debug.h> |
21 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
@@ -39,11 +40,14 @@ static int vmcp_open(struct inode *inode, struct file *file) | |||
39 | session = kmalloc(sizeof(*session), GFP_KERNEL); | 40 | session = kmalloc(sizeof(*session), GFP_KERNEL); |
40 | if (!session) | 41 | if (!session) |
41 | return -ENOMEM; | 42 | return -ENOMEM; |
43 | |||
44 | lock_kernel(); | ||
42 | session->bufsize = PAGE_SIZE; | 45 | session->bufsize = PAGE_SIZE; |
43 | session->response = NULL; | 46 | session->response = NULL; |
44 | session->resp_size = 0; | 47 | session->resp_size = 0; |
45 | mutex_init(&session->mutex); | 48 | mutex_init(&session->mutex); |
46 | file->private_data = session; | 49 | file->private_data = session; |
50 | unlock_kernel(); | ||
47 | return nonseekable_open(inode, file); | 51 | return nonseekable_open(inode, file); |
48 | } | 52 | } |
49 | 53 | ||
@@ -61,30 +65,24 @@ static int vmcp_release(struct inode *inode, struct file *file) | |||
61 | static ssize_t | 65 | static ssize_t |
62 | vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) | 66 | vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) |
63 | { | 67 | { |
64 | size_t tocopy; | 68 | ssize_t ret; |
69 | size_t size; | ||
65 | struct vmcp_session *session; | 70 | struct vmcp_session *session; |
66 | 71 | ||
67 | session = (struct vmcp_session *)file->private_data; | 72 | session = file->private_data; |
68 | if (mutex_lock_interruptible(&session->mutex)) | 73 | if (mutex_lock_interruptible(&session->mutex)) |
69 | return -ERESTARTSYS; | 74 | return -ERESTARTSYS; |
70 | if (!session->response) { | 75 | if (!session->response) { |
71 | mutex_unlock(&session->mutex); | 76 | mutex_unlock(&session->mutex); |
72 | return 0; | 77 | return 0; |
73 | } | 78 | } |
74 | if (*ppos > session->resp_size) { | 79 | size = min_t(size_t, session->resp_size, session->bufsize); |
75 | mutex_unlock(&session->mutex); | 80 | ret = simple_read_from_buffer(buff, count, ppos, |
76 | return 0; | 81 | session->response, size); |
77 | } | ||
78 | tocopy = min(session->resp_size - (size_t) (*ppos), count); | ||
79 | tocopy = min(tocopy, session->bufsize - (size_t) (*ppos)); | ||
80 | 82 | ||
81 | if (copy_to_user(buff, session->response + (*ppos), tocopy)) { | ||
82 | mutex_unlock(&session->mutex); | ||
83 | return -EFAULT; | ||
84 | } | ||
85 | mutex_unlock(&session->mutex); | 83 | mutex_unlock(&session->mutex); |
86 | *ppos += tocopy; | 84 | |
87 | return tocopy; | 85 | return ret; |
88 | } | 86 | } |
89 | 87 | ||
90 | static ssize_t | 88 | static ssize_t |
@@ -198,27 +196,23 @@ static int __init vmcp_init(void) | |||
198 | PRINT_WARN("z/VM CP interface is only available under z/VM\n"); | 196 | PRINT_WARN("z/VM CP interface is only available under z/VM\n"); |
199 | return -ENODEV; | 197 | return -ENODEV; |
200 | } | 198 | } |
199 | |||
201 | vmcp_debug = debug_register("vmcp", 1, 1, 240); | 200 | vmcp_debug = debug_register("vmcp", 1, 1, 240); |
202 | if (!vmcp_debug) { | 201 | if (!vmcp_debug) |
203 | PRINT_ERR("z/VM CP interface not loaded. Could not register " | ||
204 | "debug feature\n"); | ||
205 | return -ENOMEM; | 202 | return -ENOMEM; |
206 | } | 203 | |
207 | ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); | 204 | ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); |
208 | if (ret) { | 205 | if (ret) { |
209 | PRINT_ERR("z/VM CP interface not loaded. Could not register " | ||
210 | "debug feature view. Error code: %d\n", ret); | ||
211 | debug_unregister(vmcp_debug); | 206 | debug_unregister(vmcp_debug); |
212 | return ret; | 207 | return ret; |
213 | } | 208 | } |
209 | |||
214 | ret = misc_register(&vmcp_dev); | 210 | ret = misc_register(&vmcp_dev); |
215 | if (ret) { | 211 | if (ret) { |
216 | PRINT_ERR("z/VM CP interface not loaded. Could not register " | ||
217 | "misc device. Error code: %d\n", ret); | ||
218 | debug_unregister(vmcp_debug); | 212 | debug_unregister(vmcp_debug); |
219 | return ret; | 213 | return ret; |
220 | } | 214 | } |
221 | PRINT_INFO("z/VM CP interface loaded\n"); | 215 | |
222 | return 0; | 216 | return 0; |
223 | } | 217 | } |
224 | 218 | ||
@@ -226,7 +220,6 @@ static void __exit vmcp_exit(void) | |||
226 | { | 220 | { |
227 | misc_deregister(&vmcp_dev); | 221 | misc_deregister(&vmcp_dev); |
228 | debug_unregister(vmcp_debug); | 222 | debug_unregister(vmcp_debug); |
229 | PRINT_INFO("z/VM CP interface unloaded.\n"); | ||
230 | } | 223 | } |
231 | 224 | ||
232 | module_init(vmcp_init); | 225 | module_init(vmcp_init); |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 2c2428cc05d8..c31faefa2b3b 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/kmod.h> | 25 | #include <linux/kmod.h> |
26 | #include <linux/cdev.h> | 26 | #include <linux/cdev.h> |
27 | #include <linux/device.h> | 27 | #include <linux/device.h> |
28 | #include <linux/smp_lock.h> | ||
28 | #include <linux/string.h> | 29 | #include <linux/string.h> |
29 | 30 | ||
30 | 31 | ||
@@ -216,9 +217,7 @@ static int vmlogrdr_get_recording_class_AB(void) | |||
216 | char *tail; | 217 | char *tail; |
217 | int len,i; | 218 | int len,i; |
218 | 219 | ||
219 | printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command); | ||
220 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 220 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
221 | printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response); | ||
222 | len = strnlen(cp_response,sizeof(cp_response)); | 221 | len = strnlen(cp_response,sizeof(cp_response)); |
223 | // now the parsing | 222 | // now the parsing |
224 | tail=strnchr(cp_response,len,'='); | 223 | tail=strnchr(cp_response,len,'='); |
@@ -268,11 +267,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, | |||
268 | logptr->recording_name, | 267 | logptr->recording_name, |
269 | qid_string); | 268 | qid_string); |
270 | 269 | ||
271 | printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", | ||
272 | cp_command); | ||
273 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 270 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
274 | printk (KERN_DEBUG "vmlogrdr: recording response: %s", | ||
275 | cp_response); | ||
276 | } | 271 | } |
277 | 272 | ||
278 | memset(cp_command, 0x00, sizeof(cp_command)); | 273 | memset(cp_command, 0x00, sizeof(cp_command)); |
@@ -282,10 +277,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, | |||
282 | onoff, | 277 | onoff, |
283 | qid_string); | 278 | qid_string); |
284 | 279 | ||
285 | printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command); | ||
286 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 280 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
287 | printk (KERN_DEBUG "vmlogrdr: recording response: %s", | ||
288 | cp_response); | ||
289 | /* The recording command will usually answer with 'Command complete' | 281 | /* The recording command will usually answer with 'Command complete' |
290 | * on success, but when the specific service was never connected | 282 | * on success, but when the specific service was never connected |
291 | * before then there might be an additional informational message | 283 | * before then there might be an additional informational message |
@@ -319,9 +311,11 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) | |||
319 | return -ENOSYS; | 311 | return -ENOSYS; |
320 | 312 | ||
321 | /* Besure this device hasn't already been opened */ | 313 | /* Besure this device hasn't already been opened */ |
314 | lock_kernel(); | ||
322 | spin_lock_bh(&logptr->priv_lock); | 315 | spin_lock_bh(&logptr->priv_lock); |
323 | if (logptr->dev_in_use) { | 316 | if (logptr->dev_in_use) { |
324 | spin_unlock_bh(&logptr->priv_lock); | 317 | spin_unlock_bh(&logptr->priv_lock); |
318 | unlock_kernel(); | ||
325 | return -EBUSY; | 319 | return -EBUSY; |
326 | } | 320 | } |
327 | logptr->dev_in_use = 1; | 321 | logptr->dev_in_use = 1; |
@@ -365,7 +359,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) | |||
365 | || (logptr->iucv_path_severed)); | 359 | || (logptr->iucv_path_severed)); |
366 | if (logptr->iucv_path_severed) | 360 | if (logptr->iucv_path_severed) |
367 | goto out_record; | 361 | goto out_record; |
368 | return nonseekable_open(inode, filp); | 362 | ret = nonseekable_open(inode, filp); |
363 | unlock_kernel(); | ||
364 | return ret; | ||
369 | 365 | ||
370 | out_record: | 366 | out_record: |
371 | if (logptr->autorecording) | 367 | if (logptr->autorecording) |
@@ -375,6 +371,7 @@ out_path: | |||
375 | logptr->path = NULL; | 371 | logptr->path = NULL; |
376 | out_dev: | 372 | out_dev: |
377 | logptr->dev_in_use = 0; | 373 | logptr->dev_in_use = 0; |
374 | unlock_kernel(); | ||
378 | return -EIO; | 375 | return -EIO; |
379 | } | 376 | } |
380 | 377 | ||
@@ -567,10 +564,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev, | |||
567 | "RECORDING %s PURGE ", | 564 | "RECORDING %s PURGE ", |
568 | priv->recording_name); | 565 | priv->recording_name); |
569 | 566 | ||
570 | printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command); | ||
571 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 567 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
572 | printk (KERN_DEBUG "vmlogrdr: recording response: %s", | ||
573 | cp_response); | ||
574 | 568 | ||
575 | return count; | 569 | return count; |
576 | } | 570 | } |
@@ -682,28 +676,20 @@ static int vmlogrdr_register_driver(void) | |||
682 | 676 | ||
683 | /* Register with iucv driver */ | 677 | /* Register with iucv driver */ |
684 | ret = iucv_register(&vmlogrdr_iucv_handler, 1); | 678 | ret = iucv_register(&vmlogrdr_iucv_handler, 1); |
685 | if (ret) { | 679 | if (ret) |
686 | printk (KERN_ERR "vmlogrdr: failed to register with " | ||
687 | "iucv driver\n"); | ||
688 | goto out; | 680 | goto out; |
689 | } | ||
690 | 681 | ||
691 | ret = driver_register(&vmlogrdr_driver); | 682 | ret = driver_register(&vmlogrdr_driver); |
692 | if (ret) { | 683 | if (ret) |
693 | printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); | ||
694 | goto out_iucv; | 684 | goto out_iucv; |
695 | } | ||
696 | 685 | ||
697 | ret = driver_create_file(&vmlogrdr_driver, | 686 | ret = driver_create_file(&vmlogrdr_driver, |
698 | &driver_attr_recording_status); | 687 | &driver_attr_recording_status); |
699 | if (ret) { | 688 | if (ret) |
700 | printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); | ||
701 | goto out_driver; | 689 | goto out_driver; |
702 | } | ||
703 | 690 | ||
704 | vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); | 691 | vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); |
705 | if (IS_ERR(vmlogrdr_class)) { | 692 | if (IS_ERR(vmlogrdr_class)) { |
706 | printk(KERN_ERR "vmlogrdr: failed to create class.\n"); | ||
707 | ret = PTR_ERR(vmlogrdr_class); | 693 | ret = PTR_ERR(vmlogrdr_class); |
708 | vmlogrdr_class = NULL; | 694 | vmlogrdr_class = NULL; |
709 | goto out_attr; | 695 | goto out_attr; |
@@ -871,12 +857,10 @@ static int __init vmlogrdr_init(void) | |||
871 | rc = vmlogrdr_register_cdev(dev); | 857 | rc = vmlogrdr_register_cdev(dev); |
872 | if (rc) | 858 | if (rc) |
873 | goto cleanup; | 859 | goto cleanup; |
874 | printk (KERN_INFO "vmlogrdr: driver loaded\n"); | ||
875 | return 0; | 860 | return 0; |
876 | 861 | ||
877 | cleanup: | 862 | cleanup: |
878 | vmlogrdr_cleanup(); | 863 | vmlogrdr_cleanup(); |
879 | printk (KERN_ERR "vmlogrdr: driver not loaded.\n"); | ||
880 | return rc; | 864 | return rc; |
881 | } | 865 | } |
882 | 866 | ||
@@ -884,7 +868,6 @@ cleanup: | |||
884 | static void __exit vmlogrdr_exit(void) | 868 | static void __exit vmlogrdr_exit(void) |
885 | { | 869 | { |
886 | vmlogrdr_cleanup(); | 870 | vmlogrdr_cleanup(); |
887 | printk (KERN_INFO "vmlogrdr: driver unloaded\n"); | ||
888 | return; | 871 | return; |
889 | } | 872 | } |
890 | 873 | ||
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 83ae9a852f00..0a9f1cccbe58 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/cdev.h> | 11 | #include <linux/cdev.h> |
12 | #include <linux/smp_lock.h> | ||
12 | 13 | ||
13 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
14 | #include <asm/cio.h> | 15 | #include <asm/cio.h> |
@@ -277,7 +278,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
277 | struct urdev *urd; | 278 | struct urdev *urd; |
278 | 279 | ||
279 | TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", | 280 | TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", |
280 | intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); | 281 | intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, |
282 | irb->scsw.cmd.count); | ||
281 | 283 | ||
282 | if (!intparm) { | 284 | if (!intparm) { |
283 | TRACE("ur_int_handler: unsolicited interrupt\n"); | 285 | TRACE("ur_int_handler: unsolicited interrupt\n"); |
@@ -288,7 +290,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
288 | /* On special conditions irb is an error pointer */ | 290 | /* On special conditions irb is an error pointer */ |
289 | if (IS_ERR(irb)) | 291 | if (IS_ERR(irb)) |
290 | urd->io_request_rc = PTR_ERR(irb); | 292 | urd->io_request_rc = PTR_ERR(irb); |
291 | else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | 293 | else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) |
292 | urd->io_request_rc = 0; | 294 | urd->io_request_rc = 0; |
293 | else | 295 | else |
294 | urd->io_request_rc = -EIO; | 296 | urd->io_request_rc = -EIO; |
@@ -668,7 +670,7 @@ static int ur_open(struct inode *inode, struct file *file) | |||
668 | 670 | ||
669 | if (accmode == O_RDWR) | 671 | if (accmode == O_RDWR) |
670 | return -EACCES; | 672 | return -EACCES; |
671 | 673 | lock_kernel(); | |
672 | /* | 674 | /* |
673 | * We treat the minor number as the devno of the ur device | 675 | * We treat the minor number as the devno of the ur device |
674 | * to find in the driver tree. | 676 | * to find in the driver tree. |
@@ -676,8 +678,10 @@ static int ur_open(struct inode *inode, struct file *file) | |||
676 | devno = MINOR(file->f_dentry->d_inode->i_rdev); | 678 | devno = MINOR(file->f_dentry->d_inode->i_rdev); |
677 | 679 | ||
678 | urd = urdev_get_from_devno(devno); | 680 | urd = urdev_get_from_devno(devno); |
679 | if (!urd) | 681 | if (!urd) { |
680 | return -ENXIO; | 682 | rc = -ENXIO; |
683 | goto out; | ||
684 | } | ||
681 | 685 | ||
682 | spin_lock(&urd->open_lock); | 686 | spin_lock(&urd->open_lock); |
683 | while (urd->open_flag) { | 687 | while (urd->open_flag) { |
@@ -720,6 +724,7 @@ static int ur_open(struct inode *inode, struct file *file) | |||
720 | goto fail_urfile_free; | 724 | goto fail_urfile_free; |
721 | urf->file_reclen = rc; | 725 | urf->file_reclen = rc; |
722 | file->private_data = urf; | 726 | file->private_data = urf; |
727 | unlock_kernel(); | ||
723 | return 0; | 728 | return 0; |
724 | 729 | ||
725 | fail_urfile_free: | 730 | fail_urfile_free: |
@@ -730,6 +735,8 @@ fail_unlock: | |||
730 | spin_unlock(&urd->open_lock); | 735 | spin_unlock(&urd->open_lock); |
731 | fail_put: | 736 | fail_put: |
732 | urdev_put(urd); | 737 | urdev_put(urd); |
738 | out: | ||
739 | unlock_kernel(); | ||
733 | return rc; | 740 | return rc; |
734 | } | 741 | } |
735 | 742 | ||
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 19f8389291b6..21a2a829bf4e 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/watchdog.h> | 15 | #include <linux/watchdog.h> |
16 | #include <linux/smp_lock.h> | ||
16 | 17 | ||
17 | #include <asm/ebcdic.h> | 18 | #include <asm/ebcdic.h> |
18 | #include <asm/io.h> | 19 | #include <asm/io.h> |
@@ -92,23 +93,15 @@ static int vmwdt_keepalive(void) | |||
92 | 93 | ||
93 | func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; | 94 | func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; |
94 | ret = __diag288(func, vmwdt_interval, ebc_cmd, len); | 95 | ret = __diag288(func, vmwdt_interval, ebc_cmd, len); |
96 | WARN_ON(ret != 0); | ||
95 | kfree(ebc_cmd); | 97 | kfree(ebc_cmd); |
96 | |||
97 | if (ret) { | ||
98 | printk(KERN_WARNING "%s: problem setting interval %d, " | ||
99 | "cmd %s\n", __func__, vmwdt_interval, | ||
100 | vmwdt_cmd); | ||
101 | } | ||
102 | return ret; | 98 | return ret; |
103 | } | 99 | } |
104 | 100 | ||
105 | static int vmwdt_disable(void) | 101 | static int vmwdt_disable(void) |
106 | { | 102 | { |
107 | int ret = __diag288(wdt_cancel, 0, "", 0); | 103 | int ret = __diag288(wdt_cancel, 0, "", 0); |
108 | if (ret) { | 104 | WARN_ON(ret != 0); |
109 | printk(KERN_WARNING "%s: problem disabling watchdog\n", | ||
110 | __func__); | ||
111 | } | ||
112 | return ret; | 105 | return ret; |
113 | } | 106 | } |
114 | 107 | ||
@@ -121,21 +114,23 @@ static int __init vmwdt_probe(void) | |||
121 | static char __initdata ebc_begin[] = { | 114 | static char __initdata ebc_begin[] = { |
122 | 194, 197, 199, 201, 213 | 115 | 194, 197, 199, 201, 213 |
123 | }; | 116 | }; |
124 | if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) { | 117 | if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) |
125 | printk(KERN_INFO "z/VM watchdog not available\n"); | ||
126 | return -EINVAL; | 118 | return -EINVAL; |
127 | } | ||
128 | return vmwdt_disable(); | 119 | return vmwdt_disable(); |
129 | } | 120 | } |
130 | 121 | ||
131 | static int vmwdt_open(struct inode *i, struct file *f) | 122 | static int vmwdt_open(struct inode *i, struct file *f) |
132 | { | 123 | { |
133 | int ret; | 124 | int ret; |
134 | if (test_and_set_bit(0, &vmwdt_is_open)) | 125 | lock_kernel(); |
126 | if (test_and_set_bit(0, &vmwdt_is_open)) { | ||
127 | unlock_kernel(); | ||
135 | return -EBUSY; | 128 | return -EBUSY; |
129 | } | ||
136 | ret = vmwdt_keepalive(); | 130 | ret = vmwdt_keepalive(); |
137 | if (ret) | 131 | if (ret) |
138 | clear_bit(0, &vmwdt_is_open); | 132 | clear_bit(0, &vmwdt_is_open); |
133 | unlock_kernel(); | ||
139 | return ret ? ret : nonseekable_open(i, f); | 134 | return ret ? ret : nonseekable_open(i, f); |
140 | } | 135 | } |
141 | 136 | ||
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index bbbd14e9d48f..047dd92ae804 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -223,12 +223,10 @@ static int __init init_cpu_info(enum arch_id arch) | |||
223 | /* get info for boot cpu from lowcore, stored in the HSA */ | 223 | /* get info for boot cpu from lowcore, stored in the HSA */ |
224 | 224 | ||
225 | sa = kmalloc(sizeof(*sa), GFP_KERNEL); | 225 | sa = kmalloc(sizeof(*sa), GFP_KERNEL); |
226 | if (!sa) { | 226 | if (!sa) |
227 | ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__); | ||
228 | return -ENOMEM; | 227 | return -ENOMEM; |
229 | } | ||
230 | if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { | 228 | if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { |
231 | ERROR_MSG("could not copy from HSA\n"); | 229 | TRACE("could not copy from HSA\n"); |
232 | kfree(sa); | 230 | kfree(sa); |
233 | return -EIO; | 231 | return -EIO; |
234 | } | 232 | } |
@@ -511,6 +509,8 @@ static void __init set_s390x_lc_mask(union save_area *map) | |||
511 | */ | 509 | */ |
512 | static int __init sys_info_init(enum arch_id arch) | 510 | static int __init sys_info_init(enum arch_id arch) |
513 | { | 511 | { |
512 | int rc; | ||
513 | |||
514 | switch (arch) { | 514 | switch (arch) { |
515 | case ARCH_S390X: | 515 | case ARCH_S390X: |
516 | MSG("DETECTED 'S390X (64 bit) OS'\n"); | 516 | MSG("DETECTED 'S390X (64 bit) OS'\n"); |
@@ -529,10 +529,9 @@ static int __init sys_info_init(enum arch_id arch) | |||
529 | return -EINVAL; | 529 | return -EINVAL; |
530 | } | 530 | } |
531 | sys_info.arch = arch; | 531 | sys_info.arch = arch; |
532 | if (init_cpu_info(arch)) { | 532 | rc = init_cpu_info(arch); |
533 | ERROR_MSG("get cpu info failed\n"); | 533 | if (rc) |
534 | return -ENOMEM; | 534 | return rc; |
535 | } | ||
536 | sys_info.mem_size = real_memory_size; | 535 | sys_info.mem_size = real_memory_size; |
537 | 536 | ||
538 | return 0; | 537 | return 0; |
@@ -544,12 +543,12 @@ static int __init check_sdias(void) | |||
544 | 543 | ||
545 | rc = sclp_sdias_blk_count(); | 544 | rc = sclp_sdias_blk_count(); |
546 | if (rc < 0) { | 545 | if (rc < 0) { |
547 | ERROR_MSG("Could not determine HSA size\n"); | 546 | TRACE("Could not determine HSA size\n"); |
548 | return rc; | 547 | return rc; |
549 | } | 548 | } |
550 | act_hsa_size = (rc - 1) * PAGE_SIZE; | 549 | act_hsa_size = (rc - 1) * PAGE_SIZE; |
551 | if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { | 550 | if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { |
552 | ERROR_MSG("HSA size too small: %i\n", act_hsa_size); | 551 | TRACE("HSA size too small: %i\n", act_hsa_size); |
553 | return -EINVAL; | 552 | return -EINVAL; |
554 | } | 553 | } |
555 | return 0; | 554 | return 0; |
@@ -590,16 +589,12 @@ static int __init zcore_init(void) | |||
590 | goto fail; | 589 | goto fail; |
591 | 590 | ||
592 | rc = check_sdias(); | 591 | rc = check_sdias(); |
593 | if (rc) { | 592 | if (rc) |
594 | ERROR_MSG("Dump initialization failed\n"); | ||
595 | goto fail; | 593 | goto fail; |
596 | } | ||
597 | 594 | ||
598 | rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); | 595 | rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); |
599 | if (rc) { | 596 | if (rc) |
600 | ERROR_MSG("sdial memcpy for arch id failed\n"); | ||
601 | goto fail; | 597 | goto fail; |
602 | } | ||
603 | 598 | ||
604 | #ifndef __s390x__ | 599 | #ifndef __s390x__ |
605 | if (arch == ARCH_S390X) { | 600 | if (arch == ARCH_S390X) { |
@@ -610,10 +605,8 @@ static int __init zcore_init(void) | |||
610 | #endif | 605 | #endif |
611 | 606 | ||
612 | rc = sys_info_init(arch); | 607 | rc = sys_info_init(arch); |
613 | if (rc) { | 608 | if (rc) |
614 | ERROR_MSG("arch init failed\n"); | ||
615 | goto fail; | 609 | goto fail; |
616 | } | ||
617 | 610 | ||
618 | zcore_header_init(arch, &zcore_header); | 611 | zcore_header_init(arch, &zcore_header); |
619 | 612 | ||
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index cfaf77b320f5..91e9e3f3073a 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
@@ -2,9 +2,11 @@ | |||
2 | # Makefile for the S/390 common i/o drivers | 2 | # Makefile for the S/390 common i/o drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o | 5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ |
6 | fcx.o itcw.o | ||
6 | ccw_device-objs += device.o device_fsm.o device_ops.o | 7 | ccw_device-objs += device.o device_fsm.o device_ops.o |
7 | ccw_device-objs += device_id.o device_pgid.o device_status.o | 8 | ccw_device-objs += device_id.o device_pgid.o device_status.o |
8 | obj-y += ccw_device.o cmf.o | 9 | obj-y += ccw_device.o cmf.o |
10 | obj-$(CONFIG_CHSC_SCH) += chsc_sch.o | ||
9 | obj-$(CONFIG_CCWGROUP) += ccwgroup.o | 11 | obj-$(CONFIG_CCWGROUP) += ccwgroup.o |
10 | obj-$(CONFIG_QDIO) += qdio.o | 12 | obj-$(CONFIG_QDIO) += qdio.o |
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index b7a07a866291..fe6cea15bbaf 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/rcupdate.h> | 15 | #include <linux/rcupdate.h> |
16 | 16 | ||
17 | #include <asm/airq.h> | 17 | #include <asm/airq.h> |
18 | #include <asm/isc.h> | ||
18 | 19 | ||
19 | #include "cio.h" | 20 | #include "cio.h" |
20 | #include "cio_debug.h" | 21 | #include "cio_debug.h" |
@@ -33,15 +34,15 @@ struct airq_t { | |||
33 | void *drv_data; | 34 | void *drv_data; |
34 | }; | 35 | }; |
35 | 36 | ||
36 | static union indicator_t indicators; | 37 | static union indicator_t indicators[MAX_ISC]; |
37 | static struct airq_t *airqs[NR_AIRQS]; | 38 | static struct airq_t *airqs[MAX_ISC][NR_AIRQS]; |
38 | 39 | ||
39 | static int register_airq(struct airq_t *airq) | 40 | static int register_airq(struct airq_t *airq, u8 isc) |
40 | { | 41 | { |
41 | int i; | 42 | int i; |
42 | 43 | ||
43 | for (i = 0; i < NR_AIRQS; i++) | 44 | for (i = 0; i < NR_AIRQS; i++) |
44 | if (!cmpxchg(&airqs[i], NULL, airq)) | 45 | if (!cmpxchg(&airqs[isc][i], NULL, airq)) |
45 | return i; | 46 | return i; |
46 | return -ENOMEM; | 47 | return -ENOMEM; |
47 | } | 48 | } |
@@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq) | |||
50 | * s390_register_adapter_interrupt() - register adapter interrupt handler | 51 | * s390_register_adapter_interrupt() - register adapter interrupt handler |
51 | * @handler: adapter handler to be registered | 52 | * @handler: adapter handler to be registered |
52 | * @drv_data: driver data passed with each call to the handler | 53 | * @drv_data: driver data passed with each call to the handler |
54 | * @isc: isc for which the handler should be called | ||
53 | * | 55 | * |
54 | * Returns: | 56 | * Returns: |
55 | * Pointer to the indicator to be used on success | 57 | * Pointer to the indicator to be used on success |
56 | * ERR_PTR() if registration failed | 58 | * ERR_PTR() if registration failed |
57 | */ | 59 | */ |
58 | void *s390_register_adapter_interrupt(adapter_int_handler_t handler, | 60 | void *s390_register_adapter_interrupt(adapter_int_handler_t handler, |
59 | void *drv_data) | 61 | void *drv_data, u8 isc) |
60 | { | 62 | { |
61 | struct airq_t *airq; | 63 | struct airq_t *airq; |
62 | char dbf_txt[16]; | 64 | char dbf_txt[16]; |
63 | int ret; | 65 | int ret; |
64 | 66 | ||
67 | if (isc > MAX_ISC) | ||
68 | return ERR_PTR(-EINVAL); | ||
65 | airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); | 69 | airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); |
66 | if (!airq) { | 70 | if (!airq) { |
67 | ret = -ENOMEM; | 71 | ret = -ENOMEM; |
@@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler, | |||
69 | } | 73 | } |
70 | airq->handler = handler; | 74 | airq->handler = handler; |
71 | airq->drv_data = drv_data; | 75 | airq->drv_data = drv_data; |
72 | ret = register_airq(airq); | 76 | |
73 | if (ret < 0) | 77 | ret = register_airq(airq, isc); |
74 | kfree(airq); | ||
75 | out: | 78 | out: |
76 | snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); | 79 | snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); |
77 | CIO_TRACE_EVENT(4, dbf_txt); | 80 | CIO_TRACE_EVENT(4, dbf_txt); |
78 | if (ret < 0) | 81 | if (ret < 0) { |
82 | kfree(airq); | ||
79 | return ERR_PTR(ret); | 83 | return ERR_PTR(ret); |
80 | else | 84 | } else |
81 | return &indicators.byte[ret]; | 85 | return &indicators[isc].byte[ret]; |
82 | } | 86 | } |
83 | EXPORT_SYMBOL(s390_register_adapter_interrupt); | 87 | EXPORT_SYMBOL(s390_register_adapter_interrupt); |
84 | 88 | ||
85 | /** | 89 | /** |
86 | * s390_unregister_adapter_interrupt - unregister adapter interrupt handler | 90 | * s390_unregister_adapter_interrupt - unregister adapter interrupt handler |
87 | * @ind: indicator for which the handler is to be unregistered | 91 | * @ind: indicator for which the handler is to be unregistered |
92 | * @isc: interruption subclass | ||
88 | */ | 93 | */ |
89 | void s390_unregister_adapter_interrupt(void *ind) | 94 | void s390_unregister_adapter_interrupt(void *ind, u8 isc) |
90 | { | 95 | { |
91 | struct airq_t *airq; | 96 | struct airq_t *airq; |
92 | char dbf_txt[16]; | 97 | char dbf_txt[16]; |
93 | int i; | 98 | int i; |
94 | 99 | ||
95 | i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); | 100 | i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]); |
96 | snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); | 101 | snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); |
97 | CIO_TRACE_EVENT(4, dbf_txt); | 102 | CIO_TRACE_EVENT(4, dbf_txt); |
98 | indicators.byte[i] = 0; | 103 | indicators[isc].byte[i] = 0; |
99 | airq = xchg(&airqs[i], NULL); | 104 | airq = xchg(&airqs[isc][i], NULL); |
100 | /* | 105 | /* |
101 | * Allow interrupts to complete. This will ensure that the airq handle | 106 | * Allow interrupts to complete. This will ensure that the airq handle |
102 | * is no longer referenced by any interrupt handler. | 107 | * is no longer referenced by any interrupt handler. |
@@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt); | |||
108 | 113 | ||
109 | #define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) | 114 | #define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) |
110 | 115 | ||
111 | void do_adapter_IO(void) | 116 | void do_adapter_IO(u8 isc) |
112 | { | 117 | { |
113 | int w; | 118 | int w; |
114 | int i; | 119 | int i; |
@@ -120,22 +125,22 @@ void do_adapter_IO(void) | |||
120 | * fetch operations. | 125 | * fetch operations. |
121 | */ | 126 | */ |
122 | for (w = 0; w < NR_AIRQ_WORDS; w++) { | 127 | for (w = 0; w < NR_AIRQ_WORDS; w++) { |
123 | word = indicators.word[w]; | 128 | word = indicators[isc].word[w]; |
124 | i = w * NR_AIRQS_PER_WORD; | 129 | i = w * NR_AIRQS_PER_WORD; |
125 | /* | 130 | /* |
126 | * Check bytes within word for active indicators. | 131 | * Check bytes within word for active indicators. |
127 | */ | 132 | */ |
128 | while (word) { | 133 | while (word) { |
129 | if (word & INDICATOR_MASK) { | 134 | if (word & INDICATOR_MASK) { |
130 | airq = airqs[i]; | 135 | airq = airqs[isc][i]; |
131 | if (likely(airq)) | 136 | if (likely(airq)) |
132 | airq->handler(&indicators.byte[i], | 137 | airq->handler(&indicators[isc].byte[i], |
133 | airq->drv_data); | 138 | airq->drv_data); |
134 | else | 139 | else |
135 | /* | 140 | /* |
136 | * Reset ill-behaved indicator. | 141 | * Reset ill-behaved indicator. |
137 | */ | 142 | */ |
138 | indicators.byte[i] = 0; | 143 | indicators[isc].byte[i] = 0; |
139 | } | 144 | } |
140 | word <<= 8; | 145 | word <<= 8; |
141 | i++; | 146 | i++; |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 297cdceb0ca4..db00b0591733 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/chpid.h> | 18 | #include <asm/chpid.h> |
19 | #include <asm/sclp.h> | 19 | #include <asm/sclp.h> |
20 | 20 | ||
21 | #include "../s390mach.h" | ||
21 | #include "cio.h" | 22 | #include "cio.h" |
22 | #include "css.h" | 23 | #include "css.h" |
23 | #include "ioasm.h" | 24 | #include "ioasm.h" |
@@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch) | |||
94 | } | 95 | } |
95 | return opm; | 96 | return opm; |
96 | } | 97 | } |
98 | EXPORT_SYMBOL_GPL(chp_get_sch_opm); | ||
97 | 99 | ||
98 | /** | 100 | /** |
99 | * chp_is_registered - check if a channel-path is registered | 101 | * chp_is_registered - check if a channel-path is registered |
@@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on) | |||
121 | CIO_TRACE_EVENT(2, dbf_text); | 123 | CIO_TRACE_EVENT(2, dbf_text); |
122 | 124 | ||
123 | status = chp_get_status(chpid); | 125 | status = chp_get_status(chpid); |
124 | if (!on && !status) { | 126 | if (!on && !status) |
125 | printk(KERN_ERR "cio: chpid %x.%02x is already offline\n", | 127 | return 0; |
126 | chpid.cssid, chpid.id); | ||
127 | return -EINVAL; | ||
128 | } | ||
129 | 128 | ||
130 | set_chp_logically_online(chpid, on); | 129 | set_chp_logically_online(chpid, on); |
131 | chsc_chp_vary(chpid, on); | 130 | chsc_chp_vary(chpid, on); |
@@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj, | |||
141 | { | 140 | { |
142 | struct channel_path *chp; | 141 | struct channel_path *chp; |
143 | struct device *device; | 142 | struct device *device; |
144 | unsigned int size; | ||
145 | 143 | ||
146 | device = container_of(kobj, struct device, kobj); | 144 | device = container_of(kobj, struct device, kobj); |
147 | chp = to_channelpath(device); | 145 | chp = to_channelpath(device); |
148 | if (!chp->cmg_chars) | 146 | if (!chp->cmg_chars) |
149 | return 0; | 147 | return 0; |
150 | 148 | ||
151 | size = sizeof(struct cmg_chars); | 149 | return memory_read_from_buffer(buf, count, &off, |
152 | 150 | chp->cmg_chars, sizeof(struct cmg_chars)); | |
153 | if (off > size) | ||
154 | return 0; | ||
155 | if (off + count > size) | ||
156 | count = size - off; | ||
157 | memcpy(buf, chp->cmg_chars + off, count); | ||
158 | return count; | ||
159 | } | 151 | } |
160 | 152 | ||
161 | static struct bin_attribute chp_measurement_chars_attr = { | 153 | static struct bin_attribute chp_measurement_chars_attr = { |
@@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid) | |||
405 | chpid.id); | 397 | chpid.id); |
406 | 398 | ||
407 | /* Obtain channel path description and fill it in. */ | 399 | /* Obtain channel path description and fill it in. */ |
408 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | 400 | ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); |
409 | if (ret) | 401 | if (ret) |
410 | goto out_free; | 402 | goto out_free; |
411 | if ((chp->desc.flags & 0x80) == 0) { | 403 | if ((chp->desc.flags & 0x80) == 0) { |
@@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid) | |||
413 | goto out_free; | 405 | goto out_free; |
414 | } | 406 | } |
415 | /* Get channel-measurement characteristics. */ | 407 | /* Get channel-measurement characteristics. */ |
416 | if (css_characteristics_avail && css_chsc_characteristics.scmc | 408 | if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) { |
417 | && css_chsc_characteristics.secm) { | ||
418 | ret = chsc_get_channel_measurement_chars(chp); | 409 | ret = chsc_get_channel_measurement_chars(chp); |
419 | if (ret) | 410 | if (ret) |
420 | goto out_free; | 411 | goto out_free; |
@@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid) | |||
476 | 467 | ||
477 | /** | 468 | /** |
478 | * chp_process_crw - process channel-path status change | 469 | * chp_process_crw - process channel-path status change |
479 | * @id: channel-path ID number | 470 | * @crw0: channel report-word to handler |
480 | * @status: non-zero if channel-path has become available, zero otherwise | 471 | * @crw1: second channel-report word (always NULL) |
472 | * @overflow: crw overflow indication | ||
481 | * | 473 | * |
482 | * Handle channel-report-words indicating that the status of a channel-path | 474 | * Handle channel-report-words indicating that the status of a channel-path |
483 | * has changed. | 475 | * has changed. |
484 | */ | 476 | */ |
485 | void chp_process_crw(int id, int status) | 477 | static void chp_process_crw(struct crw *crw0, struct crw *crw1, |
478 | int overflow) | ||
486 | { | 479 | { |
487 | struct chp_id chpid; | 480 | struct chp_id chpid; |
488 | 481 | ||
482 | if (overflow) { | ||
483 | css_schedule_eval_all(); | ||
484 | return; | ||
485 | } | ||
486 | CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " | ||
487 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | ||
488 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, | ||
489 | crw0->erc, crw0->rsid); | ||
490 | /* | ||
491 | * Check for solicited machine checks. These are | ||
492 | * created by reset channel path and need not be | ||
493 | * handled here. | ||
494 | */ | ||
495 | if (crw0->slct) { | ||
496 | CIO_CRW_EVENT(2, "solicited machine check for " | ||
497 | "channel path %02X\n", crw0->rsid); | ||
498 | return; | ||
499 | } | ||
489 | chp_id_init(&chpid); | 500 | chp_id_init(&chpid); |
490 | chpid.id = id; | 501 | chpid.id = crw0->rsid; |
491 | if (status) { | 502 | switch (crw0->erc) { |
503 | case CRW_ERC_IPARM: /* Path has come. */ | ||
492 | if (!chp_is_registered(chpid)) | 504 | if (!chp_is_registered(chpid)) |
493 | chp_new(chpid); | 505 | chp_new(chpid); |
494 | chsc_chp_online(chpid); | 506 | chsc_chp_online(chpid); |
495 | } else | 507 | break; |
508 | case CRW_ERC_PERRI: /* Path has gone. */ | ||
509 | case CRW_ERC_PERRN: | ||
496 | chsc_chp_offline(chpid); | 510 | chsc_chp_offline(chpid); |
511 | break; | ||
512 | default: | ||
513 | CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n", | ||
514 | crw0->erc); | ||
515 | } | ||
497 | } | 516 | } |
498 | 517 | ||
518 | int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link) | ||
519 | { | ||
520 | int i; | ||
521 | int mask; | ||
522 | |||
523 | for (i = 0; i < 8; i++) { | ||
524 | mask = 0x80 >> i; | ||
525 | if (!(ssd->path_mask & mask)) | ||
526 | continue; | ||
527 | if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid)) | ||
528 | continue; | ||
529 | if ((ssd->fla_valid_mask & mask) && | ||
530 | ((ssd->fla[i] & link->fla_mask) != link->fla)) | ||
531 | continue; | ||
532 | return mask; | ||
533 | } | ||
534 | return 0; | ||
535 | } | ||
536 | EXPORT_SYMBOL_GPL(chp_ssd_get_mask); | ||
537 | |||
499 | static inline int info_bit_num(struct chp_id id) | 538 | static inline int info_bit_num(struct chp_id id) |
500 | { | 539 | { |
501 | return id.id + id.cssid * (__MAX_CHPID + 1); | 540 | return id.id + id.cssid * (__MAX_CHPID + 1); |
@@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work) | |||
575 | { | 614 | { |
576 | struct chp_id chpid; | 615 | struct chp_id chpid; |
577 | enum cfg_task_t t; | 616 | enum cfg_task_t t; |
617 | int rc; | ||
578 | 618 | ||
579 | mutex_lock(&cfg_lock); | 619 | mutex_lock(&cfg_lock); |
580 | t = cfg_none; | 620 | t = cfg_none; |
@@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work) | |||
589 | 629 | ||
590 | switch (t) { | 630 | switch (t) { |
591 | case cfg_configure: | 631 | case cfg_configure: |
592 | sclp_chp_configure(chpid); | 632 | rc = sclp_chp_configure(chpid); |
593 | info_expire(); | 633 | if (rc) |
594 | chsc_chp_online(chpid); | 634 | CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)=" |
635 | "%d\n", chpid.cssid, chpid.id, rc); | ||
636 | else { | ||
637 | info_expire(); | ||
638 | chsc_chp_online(chpid); | ||
639 | } | ||
595 | break; | 640 | break; |
596 | case cfg_deconfigure: | 641 | case cfg_deconfigure: |
597 | sclp_chp_deconfigure(chpid); | 642 | rc = sclp_chp_deconfigure(chpid); |
598 | info_expire(); | 643 | if (rc) |
599 | chsc_chp_offline(chpid); | 644 | CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)=" |
645 | "%d\n", chpid.cssid, chpid.id, rc); | ||
646 | else { | ||
647 | info_expire(); | ||
648 | chsc_chp_offline(chpid); | ||
649 | } | ||
600 | break; | 650 | break; |
601 | case cfg_none: | 651 | case cfg_none: |
602 | /* Get updated information after last change. */ | 652 | /* Get updated information after last change. */ |
@@ -654,10 +704,16 @@ static int cfg_wait_idle(void) | |||
654 | static int __init chp_init(void) | 704 | static int __init chp_init(void) |
655 | { | 705 | { |
656 | struct chp_id chpid; | 706 | struct chp_id chpid; |
707 | int ret; | ||
657 | 708 | ||
709 | ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw); | ||
710 | if (ret) | ||
711 | return ret; | ||
658 | chp_wq = create_singlethread_workqueue("cio_chp"); | 712 | chp_wq = create_singlethread_workqueue("cio_chp"); |
659 | if (!chp_wq) | 713 | if (!chp_wq) { |
714 | s390_unregister_crw_handler(CRW_RSC_CPATH); | ||
660 | return -ENOMEM; | 715 | return -ENOMEM; |
716 | } | ||
661 | INIT_WORK(&cfg_work, cfg_func); | 717 | INIT_WORK(&cfg_work, cfg_func); |
662 | init_waitqueue_head(&cfg_wait_queue); | 718 | init_waitqueue_head(&cfg_wait_queue); |
663 | if (info_update()) | 719 | if (info_update()) |
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 65286563c592..26c3d2246176 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h | |||
@@ -12,12 +12,24 @@ | |||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <asm/chpid.h> | 13 | #include <asm/chpid.h> |
14 | #include "chsc.h" | 14 | #include "chsc.h" |
15 | #include "css.h" | ||
15 | 16 | ||
16 | #define CHP_STATUS_STANDBY 0 | 17 | #define CHP_STATUS_STANDBY 0 |
17 | #define CHP_STATUS_CONFIGURED 1 | 18 | #define CHP_STATUS_CONFIGURED 1 |
18 | #define CHP_STATUS_RESERVED 2 | 19 | #define CHP_STATUS_RESERVED 2 |
19 | #define CHP_STATUS_NOT_RECOGNIZED 3 | 20 | #define CHP_STATUS_NOT_RECOGNIZED 3 |
20 | 21 | ||
22 | #define CHP_ONLINE 0 | ||
23 | #define CHP_OFFLINE 1 | ||
24 | #define CHP_VARY_ON 2 | ||
25 | #define CHP_VARY_OFF 3 | ||
26 | |||
27 | struct chp_link { | ||
28 | struct chp_id chpid; | ||
29 | u32 fla_mask; | ||
30 | u16 fla; | ||
31 | }; | ||
32 | |||
21 | static inline int chp_test_bit(u8 *bitmap, int num) | 33 | static inline int chp_test_bit(u8 *bitmap, int num) |
22 | { | 34 | { |
23 | int byte = num >> 3; | 35 | int byte = num >> 3; |
@@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid); | |||
42 | u8 chp_get_sch_opm(struct subchannel *sch); | 54 | u8 chp_get_sch_opm(struct subchannel *sch); |
43 | int chp_is_registered(struct chp_id chpid); | 55 | int chp_is_registered(struct chp_id chpid); |
44 | void *chp_get_chp_desc(struct chp_id chpid); | 56 | void *chp_get_chp_desc(struct chp_id chpid); |
45 | void chp_process_crw(int id, int available); | ||
46 | void chp_remove_cmg_attr(struct channel_path *chp); | 57 | void chp_remove_cmg_attr(struct channel_path *chp); |
47 | int chp_add_cmg_attr(struct channel_path *chp); | 58 | int chp_add_cmg_attr(struct channel_path *chp); |
48 | int chp_new(struct chp_id chpid); | 59 | int chp_new(struct chp_id chpid); |
49 | void chp_cfg_schedule(struct chp_id chpid, int configure); | 60 | void chp_cfg_schedule(struct chp_id chpid, int configure); |
50 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); | 61 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); |
51 | int chp_info_get_status(struct chp_id chpid); | 62 | int chp_info_get_status(struct chp_id chpid); |
52 | 63 | int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *); | |
53 | #endif /* S390_CHP_H */ | 64 | #endif /* S390_CHP_H */ |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 5de86908b0d0..65264a38057d 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -2,8 +2,7 @@ | |||
2 | * drivers/s390/cio/chsc.c | 2 | * drivers/s390/cio/chsc.c |
3 | * S/390 common I/O routines -- channel subsystem call | 3 | * S/390 common I/O routines -- channel subsystem call |
4 | * | 4 | * |
5 | * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 1999,2008 |
6 | * IBM Corporation | ||
7 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
8 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
9 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
@@ -16,7 +15,9 @@ | |||
16 | 15 | ||
17 | #include <asm/cio.h> | 16 | #include <asm/cio.h> |
18 | #include <asm/chpid.h> | 17 | #include <asm/chpid.h> |
18 | #include <asm/chsc.h> | ||
19 | 19 | ||
20 | #include "../s390mach.h" | ||
20 | #include "css.h" | 21 | #include "css.h" |
21 | #include "cio.h" | 22 | #include "cio.h" |
22 | #include "cio_debug.h" | 23 | #include "cio_debug.h" |
@@ -127,77 +128,12 @@ out_free: | |||
127 | return ret; | 128 | return ret; |
128 | } | 129 | } |
129 | 130 | ||
130 | static int check_for_io_on_path(struct subchannel *sch, int mask) | ||
131 | { | ||
132 | int cc; | ||
133 | |||
134 | cc = stsch(sch->schid, &sch->schib); | ||
135 | if (cc) | ||
136 | return 0; | ||
137 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) | ||
138 | return 1; | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static void terminate_internal_io(struct subchannel *sch) | ||
143 | { | ||
144 | if (cio_clear(sch)) { | ||
145 | /* Recheck device in case clear failed. */ | ||
146 | sch->lpm = 0; | ||
147 | if (device_trigger_verify(sch) != 0) | ||
148 | css_schedule_eval(sch->schid); | ||
149 | return; | ||
150 | } | ||
151 | /* Request retry of internal operation. */ | ||
152 | device_set_intretry(sch); | ||
153 | /* Call handler. */ | ||
154 | if (sch->driver && sch->driver->termination) | ||
155 | sch->driver->termination(sch); | ||
156 | } | ||
157 | |||
158 | static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) | 131 | static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) |
159 | { | 132 | { |
160 | int j; | ||
161 | int mask; | ||
162 | struct chp_id *chpid = data; | ||
163 | struct schib schib; | ||
164 | |||
165 | for (j = 0; j < 8; j++) { | ||
166 | mask = 0x80 >> j; | ||
167 | if ((sch->schib.pmcw.pim & mask) && | ||
168 | (sch->schib.pmcw.chpid[j] == chpid->id)) | ||
169 | break; | ||
170 | } | ||
171 | if (j >= 8) | ||
172 | return 0; | ||
173 | |||
174 | spin_lock_irq(sch->lock); | 133 | spin_lock_irq(sch->lock); |
175 | 134 | if (sch->driver && sch->driver->chp_event) | |
176 | stsch(sch->schid, &schib); | 135 | if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) |
177 | if (!css_sch_is_valid(&schib)) | ||
178 | goto out_unreg; | ||
179 | memcpy(&sch->schib, &schib, sizeof(struct schib)); | ||
180 | /* Check for single path devices. */ | ||
181 | if (sch->schib.pmcw.pim == 0x80) | ||
182 | goto out_unreg; | ||
183 | |||
184 | if (check_for_io_on_path(sch, mask)) { | ||
185 | if (device_is_online(sch)) | ||
186 | device_kill_io(sch); | ||
187 | else { | ||
188 | terminate_internal_io(sch); | ||
189 | /* Re-start path verification. */ | ||
190 | if (sch->driver && sch->driver->verify) | ||
191 | sch->driver->verify(sch); | ||
192 | } | ||
193 | } else { | ||
194 | /* trigger path verification. */ | ||
195 | if (sch->driver && sch->driver->verify) | ||
196 | sch->driver->verify(sch); | ||
197 | else if (sch->lpm == mask) | ||
198 | goto out_unreg; | 136 | goto out_unreg; |
199 | } | ||
200 | |||
201 | spin_unlock_irq(sch->lock); | 137 | spin_unlock_irq(sch->lock); |
202 | return 0; | 138 | return 0; |
203 | 139 | ||
@@ -211,15 +147,18 @@ out_unreg: | |||
211 | void chsc_chp_offline(struct chp_id chpid) | 147 | void chsc_chp_offline(struct chp_id chpid) |
212 | { | 148 | { |
213 | char dbf_txt[15]; | 149 | char dbf_txt[15]; |
150 | struct chp_link link; | ||
214 | 151 | ||
215 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); | 152 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); |
216 | CIO_TRACE_EVENT(2, dbf_txt); | 153 | CIO_TRACE_EVENT(2, dbf_txt); |
217 | 154 | ||
218 | if (chp_get_status(chpid) <= 0) | 155 | if (chp_get_status(chpid) <= 0) |
219 | return; | 156 | return; |
157 | memset(&link, 0, sizeof(struct chp_link)); | ||
158 | link.chpid = chpid; | ||
220 | /* Wait until previous actions have settled. */ | 159 | /* Wait until previous actions have settled. */ |
221 | css_wait_for_slow_path(); | 160 | css_wait_for_slow_path(); |
222 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); | 161 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); |
223 | } | 162 | } |
224 | 163 | ||
225 | static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) | 164 | static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) |
@@ -242,67 +181,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) | |||
242 | return 0; | 181 | return 0; |
243 | } | 182 | } |
244 | 183 | ||
245 | struct res_acc_data { | ||
246 | struct chp_id chpid; | ||
247 | u32 fla_mask; | ||
248 | u16 fla; | ||
249 | }; | ||
250 | |||
251 | static int get_res_chpid_mask(struct chsc_ssd_info *ssd, | ||
252 | struct res_acc_data *data) | ||
253 | { | ||
254 | int i; | ||
255 | int mask; | ||
256 | |||
257 | for (i = 0; i < 8; i++) { | ||
258 | mask = 0x80 >> i; | ||
259 | if (!(ssd->path_mask & mask)) | ||
260 | continue; | ||
261 | if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) | ||
262 | continue; | ||
263 | if ((ssd->fla_valid_mask & mask) && | ||
264 | ((ssd->fla[i] & data->fla_mask) != data->fla)) | ||
265 | continue; | ||
266 | return mask; | ||
267 | } | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | static int __s390_process_res_acc(struct subchannel *sch, void *data) | 184 | static int __s390_process_res_acc(struct subchannel *sch, void *data) |
272 | { | 185 | { |
273 | int chp_mask, old_lpm; | ||
274 | struct res_acc_data *res_data = data; | ||
275 | |||
276 | spin_lock_irq(sch->lock); | 186 | spin_lock_irq(sch->lock); |
277 | chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); | 187 | if (sch->driver && sch->driver->chp_event) |
278 | if (chp_mask == 0) | 188 | sch->driver->chp_event(sch, data, CHP_ONLINE); |
279 | goto out; | ||
280 | if (stsch(sch->schid, &sch->schib)) | ||
281 | goto out; | ||
282 | old_lpm = sch->lpm; | ||
283 | sch->lpm = ((sch->schib.pmcw.pim & | ||
284 | sch->schib.pmcw.pam & | ||
285 | sch->schib.pmcw.pom) | ||
286 | | chp_mask) & sch->opm; | ||
287 | if (!old_lpm && sch->lpm) | ||
288 | device_trigger_reprobe(sch); | ||
289 | else if (sch->driver && sch->driver->verify) | ||
290 | sch->driver->verify(sch); | ||
291 | out: | ||
292 | spin_unlock_irq(sch->lock); | 189 | spin_unlock_irq(sch->lock); |
293 | 190 | ||
294 | return 0; | 191 | return 0; |
295 | } | 192 | } |
296 | 193 | ||
297 | static void s390_process_res_acc (struct res_acc_data *res_data) | 194 | static void s390_process_res_acc(struct chp_link *link) |
298 | { | 195 | { |
299 | char dbf_txt[15]; | 196 | char dbf_txt[15]; |
300 | 197 | ||
301 | sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, | 198 | sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, |
302 | res_data->chpid.id); | 199 | link->chpid.id); |
303 | CIO_TRACE_EVENT( 2, dbf_txt); | 200 | CIO_TRACE_EVENT( 2, dbf_txt); |
304 | if (res_data->fla != 0) { | 201 | if (link->fla != 0) { |
305 | sprintf(dbf_txt, "fla%x", res_data->fla); | 202 | sprintf(dbf_txt, "fla%x", link->fla); |
306 | CIO_TRACE_EVENT( 2, dbf_txt); | 203 | CIO_TRACE_EVENT( 2, dbf_txt); |
307 | } | 204 | } |
308 | /* Wait until previous actions have settled. */ | 205 | /* Wait until previous actions have settled. */ |
@@ -315,7 +212,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data) | |||
315 | * will we have to do. | 212 | * will we have to do. |
316 | */ | 213 | */ |
317 | for_each_subchannel_staged(__s390_process_res_acc, | 214 | for_each_subchannel_staged(__s390_process_res_acc, |
318 | s390_process_res_acc_new_sch, res_data); | 215 | s390_process_res_acc_new_sch, link); |
319 | } | 216 | } |
320 | 217 | ||
321 | static int | 218 | static int |
@@ -388,7 +285,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | |||
388 | 285 | ||
389 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | 286 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) |
390 | { | 287 | { |
391 | struct res_acc_data res_data; | 288 | struct chp_link link; |
392 | struct chp_id chpid; | 289 | struct chp_id chpid; |
393 | int status; | 290 | int status; |
394 | 291 | ||
@@ -404,18 +301,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | |||
404 | chp_new(chpid); | 301 | chp_new(chpid); |
405 | else if (!status) | 302 | else if (!status) |
406 | return; | 303 | return; |
407 | memset(&res_data, 0, sizeof(struct res_acc_data)); | 304 | memset(&link, 0, sizeof(struct chp_link)); |
408 | res_data.chpid = chpid; | 305 | link.chpid = chpid; |
409 | if ((sei_area->vf & 0xc0) != 0) { | 306 | if ((sei_area->vf & 0xc0) != 0) { |
410 | res_data.fla = sei_area->fla; | 307 | link.fla = sei_area->fla; |
411 | if ((sei_area->vf & 0xc0) == 0xc0) | 308 | if ((sei_area->vf & 0xc0) == 0xc0) |
412 | /* full link address */ | 309 | /* full link address */ |
413 | res_data.fla_mask = 0xffff; | 310 | link.fla_mask = 0xffff; |
414 | else | 311 | else |
415 | /* link address */ | 312 | /* link address */ |
416 | res_data.fla_mask = 0xff00; | 313 | link.fla_mask = 0xff00; |
417 | } | 314 | } |
418 | s390_process_res_acc(&res_data); | 315 | s390_process_res_acc(&link); |
419 | } | 316 | } |
420 | 317 | ||
421 | struct chp_config_data { | 318 | struct chp_config_data { |
@@ -480,17 +377,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area) | |||
480 | } | 377 | } |
481 | } | 378 | } |
482 | 379 | ||
483 | void chsc_process_crw(void) | 380 | static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) |
484 | { | 381 | { |
485 | struct chsc_sei_area *sei_area; | 382 | struct chsc_sei_area *sei_area; |
486 | 383 | ||
384 | if (overflow) { | ||
385 | css_schedule_eval_all(); | ||
386 | return; | ||
387 | } | ||
388 | CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " | ||
389 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | ||
390 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, | ||
391 | crw0->erc, crw0->rsid); | ||
487 | if (!sei_page) | 392 | if (!sei_page) |
488 | return; | 393 | return; |
489 | /* Access to sei_page is serialized through machine check handler | 394 | /* Access to sei_page is serialized through machine check handler |
490 | * thread, so no need for locking. */ | 395 | * thread, so no need for locking. */ |
491 | sei_area = sei_page; | 396 | sei_area = sei_page; |
492 | 397 | ||
493 | CIO_TRACE_EVENT( 2, "prcss"); | 398 | CIO_TRACE_EVENT(2, "prcss"); |
494 | do { | 399 | do { |
495 | memset(sei_area, 0, sizeof(*sei_area)); | 400 | memset(sei_area, 0, sizeof(*sei_area)); |
496 | sei_area->request.length = 0x0010; | 401 | sei_area->request.length = 0x0010; |
@@ -509,114 +414,36 @@ void chsc_process_crw(void) | |||
509 | } while (sei_area->flags & 0x80); | 414 | } while (sei_area->flags & 0x80); |
510 | } | 415 | } |
511 | 416 | ||
512 | static int __chp_add_new_sch(struct subchannel_id schid, void *data) | ||
513 | { | ||
514 | struct schib schib; | ||
515 | |||
516 | if (stsch_err(schid, &schib)) | ||
517 | /* We're through */ | ||
518 | return -ENXIO; | ||
519 | |||
520 | /* Put it on the slow path. */ | ||
521 | css_schedule_eval(schid); | ||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | |||
526 | static int __chp_add(struct subchannel *sch, void *data) | ||
527 | { | ||
528 | int i, mask; | ||
529 | struct chp_id *chpid = data; | ||
530 | |||
531 | spin_lock_irq(sch->lock); | ||
532 | for (i=0; i<8; i++) { | ||
533 | mask = 0x80 >> i; | ||
534 | if ((sch->schib.pmcw.pim & mask) && | ||
535 | (sch->schib.pmcw.chpid[i] == chpid->id)) | ||
536 | break; | ||
537 | } | ||
538 | if (i==8) { | ||
539 | spin_unlock_irq(sch->lock); | ||
540 | return 0; | ||
541 | } | ||
542 | if (stsch(sch->schid, &sch->schib)) { | ||
543 | spin_unlock_irq(sch->lock); | ||
544 | css_schedule_eval(sch->schid); | ||
545 | return 0; | ||
546 | } | ||
547 | sch->lpm = ((sch->schib.pmcw.pim & | ||
548 | sch->schib.pmcw.pam & | ||
549 | sch->schib.pmcw.pom) | ||
550 | | mask) & sch->opm; | ||
551 | |||
552 | if (sch->driver && sch->driver->verify) | ||
553 | sch->driver->verify(sch); | ||
554 | |||
555 | spin_unlock_irq(sch->lock); | ||
556 | |||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | void chsc_chp_online(struct chp_id chpid) | 417 | void chsc_chp_online(struct chp_id chpid) |
561 | { | 418 | { |
562 | char dbf_txt[15]; | 419 | char dbf_txt[15]; |
420 | struct chp_link link; | ||
563 | 421 | ||
564 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); | 422 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); |
565 | CIO_TRACE_EVENT(2, dbf_txt); | 423 | CIO_TRACE_EVENT(2, dbf_txt); |
566 | 424 | ||
567 | if (chp_get_status(chpid) != 0) { | 425 | if (chp_get_status(chpid) != 0) { |
426 | memset(&link, 0, sizeof(struct chp_link)); | ||
427 | link.chpid = chpid; | ||
568 | /* Wait until previous actions have settled. */ | 428 | /* Wait until previous actions have settled. */ |
569 | css_wait_for_slow_path(); | 429 | css_wait_for_slow_path(); |
570 | for_each_subchannel_staged(__chp_add, __chp_add_new_sch, | 430 | for_each_subchannel_staged(__s390_process_res_acc, NULL, |
571 | &chpid); | 431 | &link); |
572 | } | 432 | } |
573 | } | 433 | } |
574 | 434 | ||
575 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, | 435 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
576 | struct chp_id chpid, int on) | 436 | struct chp_id chpid, int on) |
577 | { | 437 | { |
578 | int chp, old_lpm; | ||
579 | int mask; | ||
580 | unsigned long flags; | 438 | unsigned long flags; |
439 | struct chp_link link; | ||
581 | 440 | ||
441 | memset(&link, 0, sizeof(struct chp_link)); | ||
442 | link.chpid = chpid; | ||
582 | spin_lock_irqsave(sch->lock, flags); | 443 | spin_lock_irqsave(sch->lock, flags); |
583 | old_lpm = sch->lpm; | 444 | if (sch->driver && sch->driver->chp_event) |
584 | for (chp = 0; chp < 8; chp++) { | 445 | sch->driver->chp_event(sch, &link, |
585 | mask = 0x80 >> chp; | 446 | on ? CHP_VARY_ON : CHP_VARY_OFF); |
586 | if (!(sch->ssd_info.path_mask & mask)) | ||
587 | continue; | ||
588 | if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) | ||
589 | continue; | ||
590 | |||
591 | if (on) { | ||
592 | sch->opm |= mask; | ||
593 | sch->lpm |= mask; | ||
594 | if (!old_lpm) | ||
595 | device_trigger_reprobe(sch); | ||
596 | else if (sch->driver && sch->driver->verify) | ||
597 | sch->driver->verify(sch); | ||
598 | break; | ||
599 | } | ||
600 | sch->opm &= ~mask; | ||
601 | sch->lpm &= ~mask; | ||
602 | if (check_for_io_on_path(sch, mask)) { | ||
603 | if (device_is_online(sch)) | ||
604 | /* Path verification is done after killing. */ | ||
605 | device_kill_io(sch); | ||
606 | else { | ||
607 | /* Kill and retry internal I/O. */ | ||
608 | terminate_internal_io(sch); | ||
609 | /* Re-start path verification. */ | ||
610 | if (sch->driver && sch->driver->verify) | ||
611 | sch->driver->verify(sch); | ||
612 | } | ||
613 | } else if (!sch->lpm) { | ||
614 | if (device_trigger_verify(sch) != 0) | ||
615 | css_schedule_eval(sch->schid); | ||
616 | } else if (sch->driver && sch->driver->verify) | ||
617 | sch->driver->verify(sch); | ||
618 | break; | ||
619 | } | ||
620 | spin_unlock_irqrestore(sch->lock, flags); | 447 | spin_unlock_irqrestore(sch->lock, flags); |
621 | } | 448 | } |
622 | 449 | ||
@@ -656,6 +483,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) | |||
656 | */ | 483 | */ |
657 | int chsc_chp_vary(struct chp_id chpid, int on) | 484 | int chsc_chp_vary(struct chp_id chpid, int on) |
658 | { | 485 | { |
486 | struct chp_link link; | ||
487 | |||
488 | memset(&link, 0, sizeof(struct chp_link)); | ||
489 | link.chpid = chpid; | ||
659 | /* Wait until previous actions have settled. */ | 490 | /* Wait until previous actions have settled. */ |
660 | css_wait_for_slow_path(); | 491 | css_wait_for_slow_path(); |
661 | /* | 492 | /* |
@@ -664,10 +495,10 @@ int chsc_chp_vary(struct chp_id chpid, int on) | |||
664 | 495 | ||
665 | if (on) | 496 | if (on) |
666 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, | 497 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, |
667 | __s390_vary_chpid_on, &chpid); | 498 | __s390_vary_chpid_on, &link); |
668 | else | 499 | else |
669 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, | 500 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, |
670 | NULL, &chpid); | 501 | NULL, &link); |
671 | 502 | ||
672 | return 0; | 503 | return 0; |
673 | } | 504 | } |
@@ -797,23 +628,33 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
797 | return ret; | 628 | return ret; |
798 | } | 629 | } |
799 | 630 | ||
800 | int chsc_determine_channel_path_description(struct chp_id chpid, | 631 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
801 | struct channel_path_desc *desc) | 632 | int c, int m, |
633 | struct chsc_response_struct *resp) | ||
802 | { | 634 | { |
803 | int ccode, ret; | 635 | int ccode, ret; |
804 | 636 | ||
805 | struct { | 637 | struct { |
806 | struct chsc_header request; | 638 | struct chsc_header request; |
807 | u32 : 24; | 639 | u32 : 2; |
640 | u32 m : 1; | ||
641 | u32 c : 1; | ||
642 | u32 fmt : 4; | ||
643 | u32 cssid : 8; | ||
644 | u32 : 4; | ||
645 | u32 rfmt : 4; | ||
808 | u32 first_chpid : 8; | 646 | u32 first_chpid : 8; |
809 | u32 : 24; | 647 | u32 : 24; |
810 | u32 last_chpid : 8; | 648 | u32 last_chpid : 8; |
811 | u32 zeroes1; | 649 | u32 zeroes1; |
812 | struct chsc_header response; | 650 | struct chsc_header response; |
813 | u32 zeroes2; | 651 | u8 data[PAGE_SIZE - 20]; |
814 | struct channel_path_desc desc; | ||
815 | } __attribute__ ((packed)) *scpd_area; | 652 | } __attribute__ ((packed)) *scpd_area; |
816 | 653 | ||
654 | if ((rfmt == 1) && !css_general_characteristics.fcs) | ||
655 | return -EINVAL; | ||
656 | if ((rfmt == 2) && !css_general_characteristics.cib) | ||
657 | return -EINVAL; | ||
817 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 658 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
818 | if (!scpd_area) | 659 | if (!scpd_area) |
819 | return -ENOMEM; | 660 | return -ENOMEM; |
@@ -821,8 +662,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid, | |||
821 | scpd_area->request.length = 0x0010; | 662 | scpd_area->request.length = 0x0010; |
822 | scpd_area->request.code = 0x0002; | 663 | scpd_area->request.code = 0x0002; |
823 | 664 | ||
665 | scpd_area->cssid = chpid.cssid; | ||
824 | scpd_area->first_chpid = chpid.id; | 666 | scpd_area->first_chpid = chpid.id; |
825 | scpd_area->last_chpid = chpid.id; | 667 | scpd_area->last_chpid = chpid.id; |
668 | scpd_area->m = m; | ||
669 | scpd_area->c = c; | ||
670 | scpd_area->fmt = fmt; | ||
671 | scpd_area->rfmt = rfmt; | ||
826 | 672 | ||
827 | ccode = chsc(scpd_area); | 673 | ccode = chsc(scpd_area); |
828 | if (ccode > 0) { | 674 | if (ccode > 0) { |
@@ -833,8 +679,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid, | |||
833 | ret = chsc_error_from_response(scpd_area->response.code); | 679 | ret = chsc_error_from_response(scpd_area->response.code); |
834 | if (ret == 0) | 680 | if (ret == 0) |
835 | /* Success. */ | 681 | /* Success. */ |
836 | memcpy(desc, &scpd_area->desc, | 682 | memcpy(resp, &scpd_area->response, scpd_area->response.length); |
837 | sizeof(struct channel_path_desc)); | ||
838 | else | 683 | else |
839 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", | 684 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", |
840 | scpd_area->response.code); | 685 | scpd_area->response.code); |
@@ -842,6 +687,25 @@ out: | |||
842 | free_page((unsigned long)scpd_area); | 687 | free_page((unsigned long)scpd_area); |
843 | return ret; | 688 | return ret; |
844 | } | 689 | } |
690 | EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); | ||
691 | |||
692 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, | ||
693 | struct channel_path_desc *desc) | ||
694 | { | ||
695 | struct chsc_response_struct *chsc_resp; | ||
696 | int ret; | ||
697 | |||
698 | chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); | ||
699 | if (!chsc_resp) | ||
700 | return -ENOMEM; | ||
701 | ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); | ||
702 | if (ret) | ||
703 | goto out_free; | ||
704 | memcpy(desc, &chsc_resp->data, chsc_resp->length); | ||
705 | out_free: | ||
706 | kfree(chsc_resp); | ||
707 | return ret; | ||
708 | } | ||
845 | 709 | ||
846 | static void | 710 | static void |
847 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | 711 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, |
@@ -937,15 +801,23 @@ out: | |||
937 | 801 | ||
938 | int __init chsc_alloc_sei_area(void) | 802 | int __init chsc_alloc_sei_area(void) |
939 | { | 803 | { |
804 | int ret; | ||
805 | |||
940 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 806 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
941 | if (!sei_page) | 807 | if (!sei_page) { |
942 | CIO_MSG_EVENT(0, "Can't allocate page for processing of " | 808 | CIO_MSG_EVENT(0, "Can't allocate page for processing of " |
943 | "chsc machine checks!\n"); | 809 | "chsc machine checks!\n"); |
944 | return (sei_page ? 0 : -ENOMEM); | 810 | return -ENOMEM; |
811 | } | ||
812 | ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw); | ||
813 | if (ret) | ||
814 | kfree(sei_page); | ||
815 | return ret; | ||
945 | } | 816 | } |
946 | 817 | ||
947 | void __init chsc_free_sei_area(void) | 818 | void __init chsc_free_sei_area(void) |
948 | { | 819 | { |
820 | s390_unregister_crw_handler(CRW_RSC_CSS); | ||
949 | kfree(sei_page); | 821 | kfree(sei_page); |
950 | } | 822 | } |
951 | 823 | ||
@@ -1043,3 +915,52 @@ exit: | |||
1043 | 915 | ||
1044 | EXPORT_SYMBOL_GPL(css_general_characteristics); | 916 | EXPORT_SYMBOL_GPL(css_general_characteristics); |
1045 | EXPORT_SYMBOL_GPL(css_chsc_characteristics); | 917 | EXPORT_SYMBOL_GPL(css_chsc_characteristics); |
918 | |||
919 | int chsc_sstpc(void *page, unsigned int op, u16 ctrl) | ||
920 | { | ||
921 | struct { | ||
922 | struct chsc_header request; | ||
923 | unsigned int rsvd0; | ||
924 | unsigned int op : 8; | ||
925 | unsigned int rsvd1 : 8; | ||
926 | unsigned int ctrl : 16; | ||
927 | unsigned int rsvd2[5]; | ||
928 | struct chsc_header response; | ||
929 | unsigned int rsvd3[7]; | ||
930 | } __attribute__ ((packed)) *rr; | ||
931 | int rc; | ||
932 | |||
933 | memset(page, 0, PAGE_SIZE); | ||
934 | rr = page; | ||
935 | rr->request.length = 0x0020; | ||
936 | rr->request.code = 0x0033; | ||
937 | rr->op = op; | ||
938 | rr->ctrl = ctrl; | ||
939 | rc = chsc(rr); | ||
940 | if (rc) | ||
941 | return -EIO; | ||
942 | rc = (rr->response.code == 0x0001) ? 0 : -EIO; | ||
943 | return rc; | ||
944 | } | ||
945 | |||
946 | int chsc_sstpi(void *page, void *result, size_t size) | ||
947 | { | ||
948 | struct { | ||
949 | struct chsc_header request; | ||
950 | unsigned int rsvd0[3]; | ||
951 | struct chsc_header response; | ||
952 | char data[size]; | ||
953 | } __attribute__ ((packed)) *rr; | ||
954 | int rc; | ||
955 | |||
956 | memset(page, 0, PAGE_SIZE); | ||
957 | rr = page; | ||
958 | rr->request.length = 0x0010; | ||
959 | rr->request.code = 0x0038; | ||
960 | rc = chsc(rr); | ||
961 | if (rc) | ||
962 | return -EIO; | ||
963 | memcpy(result, &rr->data, size); | ||
964 | return (rr->response.code == 0x0001) ? 0 : -EIO; | ||
965 | } | ||
966 | |||
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index d1f5db1e69b9..fb6c4d6c45b4 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -4,7 +4,8 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <asm/chpid.h> | 6 | #include <asm/chpid.h> |
7 | #include "schid.h" | 7 | #include <asm/chsc.h> |
8 | #include <asm/schid.h> | ||
8 | 9 | ||
9 | #define CHSC_SDA_OC_MSS 0x2 | 10 | #define CHSC_SDA_OC_MSS 0x2 |
10 | 11 | ||
@@ -36,14 +37,15 @@ struct channel_path_desc { | |||
36 | 37 | ||
37 | struct channel_path; | 38 | struct channel_path; |
38 | 39 | ||
39 | extern void chsc_process_crw(void); | ||
40 | |||
41 | struct css_general_char { | 40 | struct css_general_char { |
42 | u64 : 41; | 41 | u64 : 12; |
42 | u32 dynio : 1; /* bit 12 */ | ||
43 | u32 : 28; | ||
43 | u32 aif : 1; /* bit 41 */ | 44 | u32 aif : 1; /* bit 41 */ |
44 | u32 : 3; | 45 | u32 : 3; |
45 | u32 mcss : 1; /* bit 45 */ | 46 | u32 mcss : 1; /* bit 45 */ |
46 | u32 : 2; | 47 | u32 fcs : 1; /* bit 46 */ |
48 | u32 : 1; | ||
47 | u32 ext_mb : 1; /* bit 48 */ | 49 | u32 ext_mb : 1; /* bit 48 */ |
48 | u32 : 7; | 50 | u32 : 7; |
49 | u32 aif_tdd : 1; /* bit 56 */ | 51 | u32 aif_tdd : 1; /* bit 56 */ |
@@ -51,7 +53,11 @@ struct css_general_char { | |||
51 | u32 qebsm : 1; /* bit 58 */ | 53 | u32 qebsm : 1; /* bit 58 */ |
52 | u32 : 8; | 54 | u32 : 8; |
53 | u32 aif_osa : 1; /* bit 67 */ | 55 | u32 aif_osa : 1; /* bit 67 */ |
54 | u32 : 28; | 56 | u32 : 14; |
57 | u32 cib : 1; /* bit 82 */ | ||
58 | u32 : 5; | ||
59 | u32 fcx : 1; /* bit 88 */ | ||
60 | u32 : 7; | ||
55 | }__attribute__((packed)); | 61 | }__attribute__((packed)); |
56 | 62 | ||
57 | struct css_chsc_char { | 63 | struct css_chsc_char { |
@@ -78,7 +84,6 @@ struct chsc_ssd_info { | |||
78 | extern int chsc_get_ssd_info(struct subchannel_id schid, | 84 | extern int chsc_get_ssd_info(struct subchannel_id schid, |
79 | struct chsc_ssd_info *ssd); | 85 | struct chsc_ssd_info *ssd); |
80 | extern int chsc_determine_css_characteristics(void); | 86 | extern int chsc_determine_css_characteristics(void); |
81 | extern int css_characteristics_avail; | ||
82 | extern int chsc_alloc_sei_area(void); | 87 | extern int chsc_alloc_sei_area(void); |
83 | extern void chsc_free_sei_area(void); | 88 | extern void chsc_free_sei_area(void); |
84 | 89 | ||
@@ -87,8 +92,11 @@ struct channel_subsystem; | |||
87 | extern int chsc_secm(struct channel_subsystem *, int); | 92 | extern int chsc_secm(struct channel_subsystem *, int); |
88 | 93 | ||
89 | int chsc_chp_vary(struct chp_id chpid, int on); | 94 | int chsc_chp_vary(struct chp_id chpid, int on); |
90 | int chsc_determine_channel_path_description(struct chp_id chpid, | 95 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
91 | struct channel_path_desc *desc); | 96 | int c, int m, |
97 | struct chsc_response_struct *resp); | ||
98 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, | ||
99 | struct channel_path_desc *desc); | ||
92 | void chsc_chp_online(struct chp_id chpid); | 100 | void chsc_chp_online(struct chp_id chpid); |
93 | void chsc_chp_offline(struct chp_id chpid); | 101 | void chsc_chp_offline(struct chp_id chpid); |
94 | int chsc_get_channel_measurement_chars(struct channel_path *chp); | 102 | int chsc_get_channel_measurement_chars(struct channel_path *chp); |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c new file mode 100644 index 000000000000..91ca87aa9f97 --- /dev/null +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -0,0 +1,820 @@ | |||
1 | /* | ||
2 | * Driver for s390 chsc subchannels | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/device.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | |||
14 | #include <asm/cio.h> | ||
15 | #include <asm/chsc.h> | ||
16 | #include <asm/isc.h> | ||
17 | |||
18 | #include "cio.h" | ||
19 | #include "cio_debug.h" | ||
20 | #include "css.h" | ||
21 | #include "chsc_sch.h" | ||
22 | #include "ioasm.h" | ||
23 | |||
24 | static debug_info_t *chsc_debug_msg_id; | ||
25 | static debug_info_t *chsc_debug_log_id; | ||
26 | |||
27 | #define CHSC_MSG(imp, args...) do { \ | ||
28 | debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ | ||
29 | } while (0) | ||
30 | |||
31 | #define CHSC_LOG(imp, txt) do { \ | ||
32 | debug_text_event(chsc_debug_log_id, imp , txt); \ | ||
33 | } while (0) | ||
34 | |||
35 | static void CHSC_LOG_HEX(int level, void *data, int length) | ||
36 | { | ||
37 | while (length > 0) { | ||
38 | debug_event(chsc_debug_log_id, level, data, length); | ||
39 | length -= chsc_debug_log_id->buf_size; | ||
40 | data += chsc_debug_log_id->buf_size; | ||
41 | } | ||
42 | } | ||
43 | |||
44 | MODULE_AUTHOR("IBM Corporation"); | ||
45 | MODULE_DESCRIPTION("driver for s390 chsc subchannels"); | ||
46 | MODULE_LICENSE("GPL"); | ||
47 | |||
48 | static void chsc_subchannel_irq(struct subchannel *sch) | ||
49 | { | ||
50 | struct chsc_private *private = sch->private; | ||
51 | struct chsc_request *request = private->request; | ||
52 | struct irb *irb = (struct irb *)__LC_IRB; | ||
53 | |||
54 | CHSC_LOG(4, "irb"); | ||
55 | CHSC_LOG_HEX(4, irb, sizeof(*irb)); | ||
56 | /* Copy irb to provided request and set done. */ | ||
57 | if (!request) { | ||
58 | CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", | ||
59 | sch->schid.ssid, sch->schid.sch_no); | ||
60 | return; | ||
61 | } | ||
62 | private->request = NULL; | ||
63 | memcpy(&request->irb, irb, sizeof(*irb)); | ||
64 | stsch(sch->schid, &sch->schib); | ||
65 | complete(&request->completion); | ||
66 | put_device(&sch->dev); | ||
67 | } | ||
68 | |||
69 | static int chsc_subchannel_probe(struct subchannel *sch) | ||
70 | { | ||
71 | struct chsc_private *private; | ||
72 | int ret; | ||
73 | |||
74 | CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", | ||
75 | sch->schid.ssid, sch->schid.sch_no); | ||
76 | sch->isc = CHSC_SCH_ISC; | ||
77 | private = kzalloc(sizeof(*private), GFP_KERNEL); | ||
78 | if (!private) | ||
79 | return -ENOMEM; | ||
80 | ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); | ||
81 | if (ret) { | ||
82 | CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", | ||
83 | sch->schid.ssid, sch->schid.sch_no, ret); | ||
84 | kfree(private); | ||
85 | } else { | ||
86 | sch->private = private; | ||
87 | if (sch->dev.uevent_suppress) { | ||
88 | sch->dev.uevent_suppress = 0; | ||
89 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); | ||
90 | } | ||
91 | } | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | static int chsc_subchannel_remove(struct subchannel *sch) | ||
96 | { | ||
97 | struct chsc_private *private; | ||
98 | |||
99 | cio_disable_subchannel(sch); | ||
100 | private = sch->private; | ||
101 | sch->private = NULL; | ||
102 | if (private->request) { | ||
103 | complete(&private->request->completion); | ||
104 | put_device(&sch->dev); | ||
105 | } | ||
106 | kfree(private); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static void chsc_subchannel_shutdown(struct subchannel *sch) | ||
111 | { | ||
112 | cio_disable_subchannel(sch); | ||
113 | } | ||
114 | |||
115 | static struct css_device_id chsc_subchannel_ids[] = { | ||
116 | { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, | ||
117 | { /* end of list */ }, | ||
118 | }; | ||
119 | MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); | ||
120 | |||
121 | static struct css_driver chsc_subchannel_driver = { | ||
122 | .owner = THIS_MODULE, | ||
123 | .subchannel_type = chsc_subchannel_ids, | ||
124 | .irq = chsc_subchannel_irq, | ||
125 | .probe = chsc_subchannel_probe, | ||
126 | .remove = chsc_subchannel_remove, | ||
127 | .shutdown = chsc_subchannel_shutdown, | ||
128 | .name = "chsc_subchannel", | ||
129 | }; | ||
130 | |||
131 | static int __init chsc_init_dbfs(void) | ||
132 | { | ||
133 | chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, | ||
134 | 16 * sizeof(long)); | ||
135 | if (!chsc_debug_msg_id) | ||
136 | goto out; | ||
137 | debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); | ||
138 | debug_set_level(chsc_debug_msg_id, 2); | ||
139 | chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); | ||
140 | if (!chsc_debug_log_id) | ||
141 | goto out; | ||
142 | debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); | ||
143 | debug_set_level(chsc_debug_log_id, 2); | ||
144 | return 0; | ||
145 | out: | ||
146 | if (chsc_debug_msg_id) | ||
147 | debug_unregister(chsc_debug_msg_id); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | |||
151 | static void chsc_remove_dbfs(void) | ||
152 | { | ||
153 | debug_unregister(chsc_debug_log_id); | ||
154 | debug_unregister(chsc_debug_msg_id); | ||
155 | } | ||
156 | |||
157 | static int __init chsc_init_sch_driver(void) | ||
158 | { | ||
159 | return css_driver_register(&chsc_subchannel_driver); | ||
160 | } | ||
161 | |||
162 | static void chsc_cleanup_sch_driver(void) | ||
163 | { | ||
164 | css_driver_unregister(&chsc_subchannel_driver); | ||
165 | } | ||
166 | |||
167 | static DEFINE_SPINLOCK(chsc_lock); | ||
168 | |||
169 | static int chsc_subchannel_match_next_free(struct device *dev, void *data) | ||
170 | { | ||
171 | struct subchannel *sch = to_subchannel(dev); | ||
172 | |||
173 | return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); | ||
174 | } | ||
175 | |||
176 | static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) | ||
177 | { | ||
178 | struct device *dev; | ||
179 | |||
180 | dev = driver_find_device(&chsc_subchannel_driver.drv, | ||
181 | sch ? &sch->dev : NULL, NULL, | ||
182 | chsc_subchannel_match_next_free); | ||
183 | return dev ? to_subchannel(dev) : NULL; | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * chsc_async() - try to start a chsc request asynchronously | ||
188 | * @chsc_area: request to be started | ||
189 | * @request: request structure to associate | ||
190 | * | ||
191 | * Tries to start a chsc request on one of the existing chsc subchannels. | ||
192 | * Returns: | ||
193 | * %0 if the request was performed synchronously | ||
194 | * %-EINPROGRESS if the request was successfully started | ||
195 | * %-EBUSY if all chsc subchannels are busy | ||
196 | * %-ENODEV if no chsc subchannels are available | ||
197 | * Context: | ||
198 | * interrupts disabled, chsc_lock held | ||
199 | */ | ||
200 | static int chsc_async(struct chsc_async_area *chsc_area, | ||
201 | struct chsc_request *request) | ||
202 | { | ||
203 | int cc; | ||
204 | struct chsc_private *private; | ||
205 | struct subchannel *sch = NULL; | ||
206 | int ret = -ENODEV; | ||
207 | char dbf[10]; | ||
208 | |||
209 | chsc_area->header.key = PAGE_DEFAULT_KEY; | ||
210 | while ((sch = chsc_get_next_subchannel(sch))) { | ||
211 | spin_lock(sch->lock); | ||
212 | private = sch->private; | ||
213 | if (private->request) { | ||
214 | spin_unlock(sch->lock); | ||
215 | ret = -EBUSY; | ||
216 | continue; | ||
217 | } | ||
218 | chsc_area->header.sid = sch->schid; | ||
219 | CHSC_LOG(2, "schid"); | ||
220 | CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); | ||
221 | cc = chsc(chsc_area); | ||
222 | sprintf(dbf, "cc:%d", cc); | ||
223 | CHSC_LOG(2, dbf); | ||
224 | switch (cc) { | ||
225 | case 0: | ||
226 | ret = 0; | ||
227 | break; | ||
228 | case 1: | ||
229 | sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; | ||
230 | ret = -EINPROGRESS; | ||
231 | private->request = request; | ||
232 | break; | ||
233 | case 2: | ||
234 | ret = -EBUSY; | ||
235 | break; | ||
236 | default: | ||
237 | ret = -ENODEV; | ||
238 | } | ||
239 | spin_unlock(sch->lock); | ||
240 | CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", | ||
241 | sch->schid.ssid, sch->schid.sch_no, cc); | ||
242 | if (ret == -EINPROGRESS) | ||
243 | return -EINPROGRESS; | ||
244 | put_device(&sch->dev); | ||
245 | if (ret == 0) | ||
246 | return 0; | ||
247 | } | ||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | static void chsc_log_command(struct chsc_async_area *chsc_area) | ||
252 | { | ||
253 | char dbf[10]; | ||
254 | |||
255 | sprintf(dbf, "CHSC:%x", chsc_area->header.code); | ||
256 | CHSC_LOG(0, dbf); | ||
257 | CHSC_LOG_HEX(0, chsc_area, 32); | ||
258 | } | ||
259 | |||
260 | static int chsc_examine_irb(struct chsc_request *request) | ||
261 | { | ||
262 | int backed_up; | ||
263 | |||
264 | if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND) | ||
265 | return -EIO; | ||
266 | backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; | ||
267 | request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; | ||
268 | if (scsw_cstat(&request->irb.scsw) == 0) | ||
269 | return 0; | ||
270 | if (!backed_up) | ||
271 | return 0; | ||
272 | if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) | ||
273 | return -EIO; | ||
274 | if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) | ||
275 | return -EPERM; | ||
276 | if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) | ||
277 | return -EAGAIN; | ||
278 | if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) | ||
279 | return -EAGAIN; | ||
280 | return -EIO; | ||
281 | } | ||
282 | |||
283 | static int chsc_ioctl_start(void __user *user_area) | ||
284 | { | ||
285 | struct chsc_request *request; | ||
286 | struct chsc_async_area *chsc_area; | ||
287 | int ret; | ||
288 | char dbf[10]; | ||
289 | |||
290 | if (!css_general_characteristics.dynio) | ||
291 | /* It makes no sense to try. */ | ||
292 | return -EOPNOTSUPP; | ||
293 | chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); | ||
294 | if (!chsc_area) | ||
295 | return -ENOMEM; | ||
296 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
297 | if (!request) { | ||
298 | ret = -ENOMEM; | ||
299 | goto out_free; | ||
300 | } | ||
301 | init_completion(&request->completion); | ||
302 | if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { | ||
303 | ret = -EFAULT; | ||
304 | goto out_free; | ||
305 | } | ||
306 | chsc_log_command(chsc_area); | ||
307 | spin_lock_irq(&chsc_lock); | ||
308 | ret = chsc_async(chsc_area, request); | ||
309 | spin_unlock_irq(&chsc_lock); | ||
310 | if (ret == -EINPROGRESS) { | ||
311 | wait_for_completion(&request->completion); | ||
312 | ret = chsc_examine_irb(request); | ||
313 | } | ||
314 | /* copy area back to user */ | ||
315 | if (!ret) | ||
316 | if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) | ||
317 | ret = -EFAULT; | ||
318 | out_free: | ||
319 | sprintf(dbf, "ret:%d", ret); | ||
320 | CHSC_LOG(0, dbf); | ||
321 | kfree(request); | ||
322 | free_page((unsigned long)chsc_area); | ||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | static int chsc_ioctl_info_channel_path(void __user *user_cd) | ||
327 | { | ||
328 | struct chsc_chp_cd *cd; | ||
329 | int ret, ccode; | ||
330 | struct { | ||
331 | struct chsc_header request; | ||
332 | u32 : 2; | ||
333 | u32 m : 1; | ||
334 | u32 : 1; | ||
335 | u32 fmt1 : 4; | ||
336 | u32 cssid : 8; | ||
337 | u32 : 8; | ||
338 | u32 first_chpid : 8; | ||
339 | u32 : 24; | ||
340 | u32 last_chpid : 8; | ||
341 | u32 : 32; | ||
342 | struct chsc_header response; | ||
343 | u8 data[PAGE_SIZE - 20]; | ||
344 | } __attribute__ ((packed)) *scpcd_area; | ||
345 | |||
346 | scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
347 | if (!scpcd_area) | ||
348 | return -ENOMEM; | ||
349 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
350 | if (!cd) { | ||
351 | ret = -ENOMEM; | ||
352 | goto out_free; | ||
353 | } | ||
354 | if (copy_from_user(cd, user_cd, sizeof(*cd))) { | ||
355 | ret = -EFAULT; | ||
356 | goto out_free; | ||
357 | } | ||
358 | scpcd_area->request.length = 0x0010; | ||
359 | scpcd_area->request.code = 0x0028; | ||
360 | scpcd_area->m = cd->m; | ||
361 | scpcd_area->fmt1 = cd->fmt; | ||
362 | scpcd_area->cssid = cd->chpid.cssid; | ||
363 | scpcd_area->first_chpid = cd->chpid.id; | ||
364 | scpcd_area->last_chpid = cd->chpid.id; | ||
365 | |||
366 | ccode = chsc(scpcd_area); | ||
367 | if (ccode != 0) { | ||
368 | ret = -EIO; | ||
369 | goto out_free; | ||
370 | } | ||
371 | if (scpcd_area->response.code != 0x0001) { | ||
372 | ret = -EIO; | ||
373 | CHSC_MSG(0, "scpcd: response code=%x\n", | ||
374 | scpcd_area->response.code); | ||
375 | goto out_free; | ||
376 | } | ||
377 | memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); | ||
378 | if (copy_to_user(user_cd, cd, sizeof(*cd))) | ||
379 | ret = -EFAULT; | ||
380 | else | ||
381 | ret = 0; | ||
382 | out_free: | ||
383 | kfree(cd); | ||
384 | free_page((unsigned long)scpcd_area); | ||
385 | return ret; | ||
386 | } | ||
387 | |||
388 | static int chsc_ioctl_info_cu(void __user *user_cd) | ||
389 | { | ||
390 | struct chsc_cu_cd *cd; | ||
391 | int ret, ccode; | ||
392 | struct { | ||
393 | struct chsc_header request; | ||
394 | u32 : 2; | ||
395 | u32 m : 1; | ||
396 | u32 : 1; | ||
397 | u32 fmt1 : 4; | ||
398 | u32 cssid : 8; | ||
399 | u32 : 8; | ||
400 | u32 first_cun : 8; | ||
401 | u32 : 24; | ||
402 | u32 last_cun : 8; | ||
403 | u32 : 32; | ||
404 | struct chsc_header response; | ||
405 | u8 data[PAGE_SIZE - 20]; | ||
406 | } __attribute__ ((packed)) *scucd_area; | ||
407 | |||
408 | scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
409 | if (!scucd_area) | ||
410 | return -ENOMEM; | ||
411 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
412 | if (!cd) { | ||
413 | ret = -ENOMEM; | ||
414 | goto out_free; | ||
415 | } | ||
416 | if (copy_from_user(cd, user_cd, sizeof(*cd))) { | ||
417 | ret = -EFAULT; | ||
418 | goto out_free; | ||
419 | } | ||
420 | scucd_area->request.length = 0x0010; | ||
421 | scucd_area->request.code = 0x0028; | ||
422 | scucd_area->m = cd->m; | ||
423 | scucd_area->fmt1 = cd->fmt; | ||
424 | scucd_area->cssid = cd->cssid; | ||
425 | scucd_area->first_cun = cd->cun; | ||
426 | scucd_area->last_cun = cd->cun; | ||
427 | |||
428 | ccode = chsc(scucd_area); | ||
429 | if (ccode != 0) { | ||
430 | ret = -EIO; | ||
431 | goto out_free; | ||
432 | } | ||
433 | if (scucd_area->response.code != 0x0001) { | ||
434 | ret = -EIO; | ||
435 | CHSC_MSG(0, "scucd: response code=%x\n", | ||
436 | scucd_area->response.code); | ||
437 | goto out_free; | ||
438 | } | ||
439 | memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); | ||
440 | if (copy_to_user(user_cd, cd, sizeof(*cd))) | ||
441 | ret = -EFAULT; | ||
442 | else | ||
443 | ret = 0; | ||
444 | out_free: | ||
445 | kfree(cd); | ||
446 | free_page((unsigned long)scucd_area); | ||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | static int chsc_ioctl_info_sch_cu(void __user *user_cud) | ||
451 | { | ||
452 | struct chsc_sch_cud *cud; | ||
453 | int ret, ccode; | ||
454 | struct { | ||
455 | struct chsc_header request; | ||
456 | u32 : 2; | ||
457 | u32 m : 1; | ||
458 | u32 : 5; | ||
459 | u32 fmt1 : 4; | ||
460 | u32 : 2; | ||
461 | u32 ssid : 2; | ||
462 | u32 first_sch : 16; | ||
463 | u32 : 8; | ||
464 | u32 cssid : 8; | ||
465 | u32 last_sch : 16; | ||
466 | u32 : 32; | ||
467 | struct chsc_header response; | ||
468 | u8 data[PAGE_SIZE - 20]; | ||
469 | } __attribute__ ((packed)) *sscud_area; | ||
470 | |||
471 | sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
472 | if (!sscud_area) | ||
473 | return -ENOMEM; | ||
474 | cud = kzalloc(sizeof(*cud), GFP_KERNEL); | ||
475 | if (!cud) { | ||
476 | ret = -ENOMEM; | ||
477 | goto out_free; | ||
478 | } | ||
479 | if (copy_from_user(cud, user_cud, sizeof(*cud))) { | ||
480 | ret = -EFAULT; | ||
481 | goto out_free; | ||
482 | } | ||
483 | sscud_area->request.length = 0x0010; | ||
484 | sscud_area->request.code = 0x0006; | ||
485 | sscud_area->m = cud->schid.m; | ||
486 | sscud_area->fmt1 = cud->fmt; | ||
487 | sscud_area->ssid = cud->schid.ssid; | ||
488 | sscud_area->first_sch = cud->schid.sch_no; | ||
489 | sscud_area->cssid = cud->schid.cssid; | ||
490 | sscud_area->last_sch = cud->schid.sch_no; | ||
491 | |||
492 | ccode = chsc(sscud_area); | ||
493 | if (ccode != 0) { | ||
494 | ret = -EIO; | ||
495 | goto out_free; | ||
496 | } | ||
497 | if (sscud_area->response.code != 0x0001) { | ||
498 | ret = -EIO; | ||
499 | CHSC_MSG(0, "sscud: response code=%x\n", | ||
500 | sscud_area->response.code); | ||
501 | goto out_free; | ||
502 | } | ||
503 | memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); | ||
504 | if (copy_to_user(user_cud, cud, sizeof(*cud))) | ||
505 | ret = -EFAULT; | ||
506 | else | ||
507 | ret = 0; | ||
508 | out_free: | ||
509 | kfree(cud); | ||
510 | free_page((unsigned long)sscud_area); | ||
511 | return ret; | ||
512 | } | ||
513 | |||
514 | static int chsc_ioctl_conf_info(void __user *user_ci) | ||
515 | { | ||
516 | struct chsc_conf_info *ci; | ||
517 | int ret, ccode; | ||
518 | struct { | ||
519 | struct chsc_header request; | ||
520 | u32 : 2; | ||
521 | u32 m : 1; | ||
522 | u32 : 1; | ||
523 | u32 fmt1 : 4; | ||
524 | u32 cssid : 8; | ||
525 | u32 : 6; | ||
526 | u32 ssid : 2; | ||
527 | u32 : 8; | ||
528 | u64 : 64; | ||
529 | struct chsc_header response; | ||
530 | u8 data[PAGE_SIZE - 20]; | ||
531 | } __attribute__ ((packed)) *sci_area; | ||
532 | |||
533 | sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
534 | if (!sci_area) | ||
535 | return -ENOMEM; | ||
536 | ci = kzalloc(sizeof(*ci), GFP_KERNEL); | ||
537 | if (!ci) { | ||
538 | ret = -ENOMEM; | ||
539 | goto out_free; | ||
540 | } | ||
541 | if (copy_from_user(ci, user_ci, sizeof(*ci))) { | ||
542 | ret = -EFAULT; | ||
543 | goto out_free; | ||
544 | } | ||
545 | sci_area->request.length = 0x0010; | ||
546 | sci_area->request.code = 0x0012; | ||
547 | sci_area->m = ci->id.m; | ||
548 | sci_area->fmt1 = ci->fmt; | ||
549 | sci_area->cssid = ci->id.cssid; | ||
550 | sci_area->ssid = ci->id.ssid; | ||
551 | |||
552 | ccode = chsc(sci_area); | ||
553 | if (ccode != 0) { | ||
554 | ret = -EIO; | ||
555 | goto out_free; | ||
556 | } | ||
557 | if (sci_area->response.code != 0x0001) { | ||
558 | ret = -EIO; | ||
559 | CHSC_MSG(0, "sci: response code=%x\n", | ||
560 | sci_area->response.code); | ||
561 | goto out_free; | ||
562 | } | ||
563 | memcpy(&ci->scid, &sci_area->response, sci_area->response.length); | ||
564 | if (copy_to_user(user_ci, ci, sizeof(*ci))) | ||
565 | ret = -EFAULT; | ||
566 | else | ||
567 | ret = 0; | ||
568 | out_free: | ||
569 | kfree(ci); | ||
570 | free_page((unsigned long)sci_area); | ||
571 | return ret; | ||
572 | } | ||
573 | |||
574 | static int chsc_ioctl_conf_comp_list(void __user *user_ccl) | ||
575 | { | ||
576 | struct chsc_comp_list *ccl; | ||
577 | int ret, ccode; | ||
578 | struct { | ||
579 | struct chsc_header request; | ||
580 | u32 ctype : 8; | ||
581 | u32 : 4; | ||
582 | u32 fmt : 4; | ||
583 | u32 : 16; | ||
584 | u64 : 64; | ||
585 | u32 list_parm[2]; | ||
586 | u64 : 64; | ||
587 | struct chsc_header response; | ||
588 | u8 data[PAGE_SIZE - 36]; | ||
589 | } __attribute__ ((packed)) *sccl_area; | ||
590 | struct { | ||
591 | u32 m : 1; | ||
592 | u32 : 31; | ||
593 | u32 cssid : 8; | ||
594 | u32 : 16; | ||
595 | u32 chpid : 8; | ||
596 | } __attribute__ ((packed)) *chpid_parm; | ||
597 | struct { | ||
598 | u32 f_cssid : 8; | ||
599 | u32 l_cssid : 8; | ||
600 | u32 : 16; | ||
601 | u32 res; | ||
602 | } __attribute__ ((packed)) *cssids_parm; | ||
603 | |||
604 | sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
605 | if (!sccl_area) | ||
606 | return -ENOMEM; | ||
607 | ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); | ||
608 | if (!ccl) { | ||
609 | ret = -ENOMEM; | ||
610 | goto out_free; | ||
611 | } | ||
612 | if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { | ||
613 | ret = -EFAULT; | ||
614 | goto out_free; | ||
615 | } | ||
616 | sccl_area->request.length = 0x0020; | ||
617 | sccl_area->request.code = 0x0030; | ||
618 | sccl_area->fmt = ccl->req.fmt; | ||
619 | sccl_area->ctype = ccl->req.ctype; | ||
620 | switch (sccl_area->ctype) { | ||
621 | case CCL_CU_ON_CHP: | ||
622 | case CCL_IOP_CHP: | ||
623 | chpid_parm = (void *)&sccl_area->list_parm; | ||
624 | chpid_parm->m = ccl->req.chpid.m; | ||
625 | chpid_parm->cssid = ccl->req.chpid.chp.cssid; | ||
626 | chpid_parm->chpid = ccl->req.chpid.chp.id; | ||
627 | break; | ||
628 | case CCL_CSS_IMG: | ||
629 | case CCL_CSS_IMG_CONF_CHAR: | ||
630 | cssids_parm = (void *)&sccl_area->list_parm; | ||
631 | cssids_parm->f_cssid = ccl->req.cssids.f_cssid; | ||
632 | cssids_parm->l_cssid = ccl->req.cssids.l_cssid; | ||
633 | break; | ||
634 | } | ||
635 | ccode = chsc(sccl_area); | ||
636 | if (ccode != 0) { | ||
637 | ret = -EIO; | ||
638 | goto out_free; | ||
639 | } | ||
640 | if (sccl_area->response.code != 0x0001) { | ||
641 | ret = -EIO; | ||
642 | CHSC_MSG(0, "sccl: response code=%x\n", | ||
643 | sccl_area->response.code); | ||
644 | goto out_free; | ||
645 | } | ||
646 | memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); | ||
647 | if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) | ||
648 | ret = -EFAULT; | ||
649 | else | ||
650 | ret = 0; | ||
651 | out_free: | ||
652 | kfree(ccl); | ||
653 | free_page((unsigned long)sccl_area); | ||
654 | return ret; | ||
655 | } | ||
656 | |||
657 | static int chsc_ioctl_chpd(void __user *user_chpd) | ||
658 | { | ||
659 | struct chsc_cpd_info *chpd; | ||
660 | int ret; | ||
661 | |||
662 | chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); | ||
663 | if (!chpd) | ||
664 | return -ENOMEM; | ||
665 | if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { | ||
666 | ret = -EFAULT; | ||
667 | goto out_free; | ||
668 | } | ||
669 | ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, | ||
670 | chpd->rfmt, chpd->c, chpd->m, | ||
671 | &chpd->chpdb); | ||
672 | if (ret) | ||
673 | goto out_free; | ||
674 | if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) | ||
675 | ret = -EFAULT; | ||
676 | out_free: | ||
677 | kfree(chpd); | ||
678 | return ret; | ||
679 | } | ||
680 | |||
681 | static int chsc_ioctl_dcal(void __user *user_dcal) | ||
682 | { | ||
683 | struct chsc_dcal *dcal; | ||
684 | int ret, ccode; | ||
685 | struct { | ||
686 | struct chsc_header request; | ||
687 | u32 atype : 8; | ||
688 | u32 : 4; | ||
689 | u32 fmt : 4; | ||
690 | u32 : 16; | ||
691 | u32 res0[2]; | ||
692 | u32 list_parm[2]; | ||
693 | u32 res1[2]; | ||
694 | struct chsc_header response; | ||
695 | u8 data[PAGE_SIZE - 36]; | ||
696 | } __attribute__ ((packed)) *sdcal_area; | ||
697 | |||
698 | sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
699 | if (!sdcal_area) | ||
700 | return -ENOMEM; | ||
701 | dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); | ||
702 | if (!dcal) { | ||
703 | ret = -ENOMEM; | ||
704 | goto out_free; | ||
705 | } | ||
706 | if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { | ||
707 | ret = -EFAULT; | ||
708 | goto out_free; | ||
709 | } | ||
710 | sdcal_area->request.length = 0x0020; | ||
711 | sdcal_area->request.code = 0x0034; | ||
712 | sdcal_area->atype = dcal->req.atype; | ||
713 | sdcal_area->fmt = dcal->req.fmt; | ||
714 | memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, | ||
715 | sizeof(sdcal_area->list_parm)); | ||
716 | |||
717 | ccode = chsc(sdcal_area); | ||
718 | if (ccode != 0) { | ||
719 | ret = -EIO; | ||
720 | goto out_free; | ||
721 | } | ||
722 | if (sdcal_area->response.code != 0x0001) { | ||
723 | ret = -EIO; | ||
724 | CHSC_MSG(0, "sdcal: response code=%x\n", | ||
725 | sdcal_area->response.code); | ||
726 | goto out_free; | ||
727 | } | ||
728 | memcpy(&dcal->sdcal, &sdcal_area->response, | ||
729 | sdcal_area->response.length); | ||
730 | if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) | ||
731 | ret = -EFAULT; | ||
732 | else | ||
733 | ret = 0; | ||
734 | out_free: | ||
735 | kfree(dcal); | ||
736 | free_page((unsigned long)sdcal_area); | ||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | static long chsc_ioctl(struct file *filp, unsigned int cmd, | ||
741 | unsigned long arg) | ||
742 | { | ||
743 | CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); | ||
744 | switch (cmd) { | ||
745 | case CHSC_START: | ||
746 | return chsc_ioctl_start((void __user *)arg); | ||
747 | case CHSC_INFO_CHANNEL_PATH: | ||
748 | return chsc_ioctl_info_channel_path((void __user *)arg); | ||
749 | case CHSC_INFO_CU: | ||
750 | return chsc_ioctl_info_cu((void __user *)arg); | ||
751 | case CHSC_INFO_SCH_CU: | ||
752 | return chsc_ioctl_info_sch_cu((void __user *)arg); | ||
753 | case CHSC_INFO_CI: | ||
754 | return chsc_ioctl_conf_info((void __user *)arg); | ||
755 | case CHSC_INFO_CCL: | ||
756 | return chsc_ioctl_conf_comp_list((void __user *)arg); | ||
757 | case CHSC_INFO_CPD: | ||
758 | return chsc_ioctl_chpd((void __user *)arg); | ||
759 | case CHSC_INFO_DCAL: | ||
760 | return chsc_ioctl_dcal((void __user *)arg); | ||
761 | default: /* unknown ioctl number */ | ||
762 | return -ENOIOCTLCMD; | ||
763 | } | ||
764 | } | ||
765 | |||
766 | static const struct file_operations chsc_fops = { | ||
767 | .owner = THIS_MODULE, | ||
768 | .unlocked_ioctl = chsc_ioctl, | ||
769 | .compat_ioctl = chsc_ioctl, | ||
770 | }; | ||
771 | |||
772 | static struct miscdevice chsc_misc_device = { | ||
773 | .minor = MISC_DYNAMIC_MINOR, | ||
774 | .name = "chsc", | ||
775 | .fops = &chsc_fops, | ||
776 | }; | ||
777 | |||
778 | static int __init chsc_misc_init(void) | ||
779 | { | ||
780 | return misc_register(&chsc_misc_device); | ||
781 | } | ||
782 | |||
783 | static void chsc_misc_cleanup(void) | ||
784 | { | ||
785 | misc_deregister(&chsc_misc_device); | ||
786 | } | ||
787 | |||
788 | static int __init chsc_sch_init(void) | ||
789 | { | ||
790 | int ret; | ||
791 | |||
792 | ret = chsc_init_dbfs(); | ||
793 | if (ret) | ||
794 | return ret; | ||
795 | isc_register(CHSC_SCH_ISC); | ||
796 | ret = chsc_init_sch_driver(); | ||
797 | if (ret) | ||
798 | goto out_dbf; | ||
799 | ret = chsc_misc_init(); | ||
800 | if (ret) | ||
801 | goto out_driver; | ||
802 | return ret; | ||
803 | out_driver: | ||
804 | chsc_cleanup_sch_driver(); | ||
805 | out_dbf: | ||
806 | isc_unregister(CHSC_SCH_ISC); | ||
807 | chsc_remove_dbfs(); | ||
808 | return ret; | ||
809 | } | ||
810 | |||
811 | static void __exit chsc_sch_exit(void) | ||
812 | { | ||
813 | chsc_misc_cleanup(); | ||
814 | chsc_cleanup_sch_driver(); | ||
815 | isc_unregister(CHSC_SCH_ISC); | ||
816 | chsc_remove_dbfs(); | ||
817 | } | ||
818 | |||
819 | module_init(chsc_sch_init); | ||
820 | module_exit(chsc_sch_exit); | ||
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h new file mode 100644 index 000000000000..589ebfad6aad --- /dev/null +++ b/drivers/s390/cio/chsc_sch.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _CHSC_SCH_H | ||
2 | #define _CHSC_SCH_H | ||
3 | |||
4 | struct chsc_request { | ||
5 | struct completion completion; | ||
6 | struct irb irb; | ||
7 | }; | ||
8 | |||
9 | struct chsc_private { | ||
10 | struct chsc_request *request; | ||
11 | }; | ||
12 | |||
13 | #endif | ||
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index b32d7eb3d81a..33bff8fec7d1 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * drivers/s390/cio/cio.c | 2 | * drivers/s390/cio/cio.c |
3 | * S/390 common I/O routines -- low level i/o calls | 3 | * S/390 common I/O routines -- low level i/o calls |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 1999,2006 | 5 | * Copyright IBM Corp. 1999,2008 |
6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
@@ -24,7 +24,9 @@ | |||
24 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
25 | #include <asm/chpid.h> | 25 | #include <asm/chpid.h> |
26 | #include <asm/airq.h> | 26 | #include <asm/airq.h> |
27 | #include <asm/isc.h> | ||
27 | #include <asm/cpu.h> | 28 | #include <asm/cpu.h> |
29 | #include <asm/fcx.h> | ||
28 | #include "cio.h" | 30 | #include "cio.h" |
29 | #include "css.h" | 31 | #include "css.h" |
30 | #include "chsc.h" | 32 | #include "chsc.h" |
@@ -72,7 +74,6 @@ out_unregister: | |||
72 | debug_unregister(cio_debug_trace_id); | 74 | debug_unregister(cio_debug_trace_id); |
73 | if (cio_debug_crw_id) | 75 | if (cio_debug_crw_id) |
74 | debug_unregister(cio_debug_crw_id); | 76 | debug_unregister(cio_debug_crw_id); |
75 | printk(KERN_WARNING"cio: could not initialize debugging\n"); | ||
76 | return -1; | 77 | return -1; |
77 | } | 78 | } |
78 | 79 | ||
@@ -128,7 +129,7 @@ cio_tpi(void) | |||
128 | local_bh_disable(); | 129 | local_bh_disable(); |
129 | irq_enter (); | 130 | irq_enter (); |
130 | spin_lock(sch->lock); | 131 | spin_lock(sch->lock); |
131 | memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); | 132 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); |
132 | if (sch->driver && sch->driver->irq) | 133 | if (sch->driver && sch->driver->irq) |
133 | sch->driver->irq(sch); | 134 | sch->driver->irq(sch); |
134 | spin_unlock(sch->lock); | 135 | spin_unlock(sch->lock); |
@@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ | |||
167 | { | 168 | { |
168 | char dbf_txt[15]; | 169 | char dbf_txt[15]; |
169 | int ccode; | 170 | int ccode; |
170 | struct orb *orb; | 171 | union orb *orb; |
171 | 172 | ||
172 | CIO_TRACE_EVENT(4, "stIO"); | 173 | CIO_TRACE_EVENT(4, "stIO"); |
173 | CIO_TRACE_EVENT(4, sch->dev.bus_id); | 174 | CIO_TRACE_EVENT(4, sch->dev.bus_id); |
174 | 175 | ||
175 | orb = &to_io_private(sch)->orb; | 176 | orb = &to_io_private(sch)->orb; |
176 | /* sch is always under 2G. */ | 177 | /* sch is always under 2G. */ |
177 | orb->intparm = (u32)(addr_t)sch; | 178 | orb->cmd.intparm = (u32)(addr_t)sch; |
178 | orb->fmt = 1; | 179 | orb->cmd.fmt = 1; |
179 | 180 | ||
180 | orb->pfch = sch->options.prefetch == 0; | 181 | orb->cmd.pfch = sch->options.prefetch == 0; |
181 | orb->spnd = sch->options.suspend; | 182 | orb->cmd.spnd = sch->options.suspend; |
182 | orb->ssic = sch->options.suspend && sch->options.inter; | 183 | orb->cmd.ssic = sch->options.suspend && sch->options.inter; |
183 | orb->lpm = (lpm != 0) ? lpm : sch->lpm; | 184 | orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; |
184 | #ifdef CONFIG_64BIT | 185 | #ifdef CONFIG_64BIT |
185 | /* | 186 | /* |
186 | * for 64 bit we always support 64 bit IDAWs with 4k page size only | 187 | * for 64 bit we always support 64 bit IDAWs with 4k page size only |
187 | */ | 188 | */ |
188 | orb->c64 = 1; | 189 | orb->cmd.c64 = 1; |
189 | orb->i2k = 0; | 190 | orb->cmd.i2k = 0; |
190 | #endif | 191 | #endif |
191 | orb->key = key >> 4; | 192 | orb->cmd.key = key >> 4; |
192 | /* issue "Start Subchannel" */ | 193 | /* issue "Start Subchannel" */ |
193 | orb->cpa = (__u32) __pa(cpa); | 194 | orb->cmd.cpa = (__u32) __pa(cpa); |
194 | ccode = ssch(sch->schid, orb); | 195 | ccode = ssch(sch->schid, orb); |
195 | 196 | ||
196 | /* process condition code */ | 197 | /* process condition code */ |
@@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ | |||
202 | /* | 203 | /* |
203 | * initialize device status information | 204 | * initialize device status information |
204 | */ | 205 | */ |
205 | sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; | 206 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; |
206 | return 0; | 207 | return 0; |
207 | case 1: /* status pending */ | 208 | case 1: /* status pending */ |
208 | case 2: /* busy */ | 209 | case 2: /* busy */ |
@@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch) | |||
237 | 238 | ||
238 | switch (ccode) { | 239 | switch (ccode) { |
239 | case 0: | 240 | case 0: |
240 | sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; | 241 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; |
241 | return 0; | 242 | return 0; |
242 | case 1: | 243 | case 1: |
243 | return -EBUSY; | 244 | return -EBUSY; |
@@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch) | |||
277 | 278 | ||
278 | switch (ccode) { | 279 | switch (ccode) { |
279 | case 0: | 280 | case 0: |
280 | sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; | 281 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; |
281 | return 0; | 282 | return 0; |
282 | case 1: /* status pending */ | 283 | case 1: /* status pending */ |
283 | case 2: /* busy */ | 284 | case 2: /* busy */ |
@@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch) | |||
312 | 313 | ||
313 | switch (ccode) { | 314 | switch (ccode) { |
314 | case 0: | 315 | case 0: |
315 | sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; | 316 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; |
316 | return 0; | 317 | return 0; |
317 | default: /* device not operational */ | 318 | default: /* device not operational */ |
318 | return -ENODEV; | 319 | return -ENODEV; |
@@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch) | |||
387 | return ret; | 388 | return ret; |
388 | } | 389 | } |
389 | 390 | ||
390 | /* | 391 | /** |
391 | * Enable subchannel. | 392 | * cio_enable_subchannel - enable a subchannel. |
393 | * @sch: subchannel to be enabled | ||
394 | * @intparm: interruption parameter to set | ||
392 | */ | 395 | */ |
393 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | 396 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) |
394 | { | 397 | { |
@@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | |||
434 | CIO_TRACE_EVENT (2, dbf_txt); | 437 | CIO_TRACE_EVENT (2, dbf_txt); |
435 | return ret; | 438 | return ret; |
436 | } | 439 | } |
440 | EXPORT_SYMBOL_GPL(cio_enable_subchannel); | ||
437 | 441 | ||
438 | /* | 442 | /** |
439 | * Disable subchannel. | 443 | * cio_disable_subchannel - disable a subchannel. |
444 | * @sch: subchannel to disable | ||
440 | */ | 445 | */ |
441 | int | 446 | int cio_disable_subchannel(struct subchannel *sch) |
442 | cio_disable_subchannel (struct subchannel *sch) | ||
443 | { | 447 | { |
444 | char dbf_txt[15]; | 448 | char dbf_txt[15]; |
445 | int ccode; | 449 | int ccode; |
@@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch) | |||
455 | if (ccode == 3) /* Not operational. */ | 459 | if (ccode == 3) /* Not operational. */ |
456 | return -ENODEV; | 460 | return -ENODEV; |
457 | 461 | ||
458 | if (sch->schib.scsw.actl != 0) | 462 | if (scsw_actl(&sch->schib.scsw) != 0) |
459 | /* | 463 | /* |
460 | * the disable function must not be called while there are | 464 | * the disable function must not be called while there are |
461 | * requests pending for completion ! | 465 | * requests pending for completion ! |
@@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch) | |||
484 | CIO_TRACE_EVENT (2, dbf_txt); | 488 | CIO_TRACE_EVENT (2, dbf_txt); |
485 | return ret; | 489 | return ret; |
486 | } | 490 | } |
491 | EXPORT_SYMBOL_GPL(cio_disable_subchannel); | ||
487 | 492 | ||
488 | int cio_create_sch_lock(struct subchannel *sch) | 493 | int cio_create_sch_lock(struct subchannel *sch) |
489 | { | 494 | { |
@@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch) | |||
494 | return 0; | 499 | return 0; |
495 | } | 500 | } |
496 | 501 | ||
497 | /* | 502 | static int cio_check_devno_blacklisted(struct subchannel *sch) |
498 | * cio_validate_subchannel() | 503 | { |
504 | if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { | ||
505 | /* | ||
506 | * This device must not be known to Linux. So we simply | ||
507 | * say that there is no device and return ENODEV. | ||
508 | */ | ||
509 | CIO_MSG_EVENT(6, "Blacklisted device detected " | ||
510 | "at devno %04X, subchannel set %x\n", | ||
511 | sch->schib.pmcw.dev, sch->schid.ssid); | ||
512 | return -ENODEV; | ||
513 | } | ||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static int cio_validate_io_subchannel(struct subchannel *sch) | ||
518 | { | ||
519 | /* Initialization for io subchannels. */ | ||
520 | if (!css_sch_is_valid(&sch->schib)) | ||
521 | return -ENODEV; | ||
522 | |||
523 | /* Devno is valid. */ | ||
524 | return cio_check_devno_blacklisted(sch); | ||
525 | } | ||
526 | |||
527 | static int cio_validate_msg_subchannel(struct subchannel *sch) | ||
528 | { | ||
529 | /* Initialization for message subchannels. */ | ||
530 | if (!css_sch_is_valid(&sch->schib)) | ||
531 | return -ENODEV; | ||
532 | |||
533 | /* Devno is valid. */ | ||
534 | return cio_check_devno_blacklisted(sch); | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * cio_validate_subchannel - basic validation of subchannel | ||
539 | * @sch: subchannel structure to be filled out | ||
540 | * @schid: subchannel id | ||
499 | * | 541 | * |
500 | * Find out subchannel type and initialize struct subchannel. | 542 | * Find out subchannel type and initialize struct subchannel. |
501 | * Return codes: | 543 | * Return codes: |
502 | * SUBCHANNEL_TYPE_IO for a normal io subchannel | 544 | * 0 on success |
503 | * SUBCHANNEL_TYPE_CHSC for a chsc subchannel | ||
504 | * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel | ||
505 | * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel | ||
506 | * -ENXIO for non-defined subchannels | 545 | * -ENXIO for non-defined subchannels |
507 | * -ENODEV for subchannels with invalid device number or blacklisted devices | 546 | * -ENODEV for invalid subchannels or blacklisted devices |
547 | * -EIO for subchannels in an invalid subchannel set | ||
508 | */ | 548 | */ |
509 | int | 549 | int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) |
510 | cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | ||
511 | { | 550 | { |
512 | char dbf_txt[15]; | 551 | char dbf_txt[15]; |
513 | int ccode; | 552 | int ccode; |
514 | int err; | 553 | int err; |
515 | 554 | ||
516 | sprintf (dbf_txt, "valsch%x", schid.sch_no); | 555 | sprintf(dbf_txt, "valsch%x", schid.sch_no); |
517 | CIO_TRACE_EVENT (4, dbf_txt); | 556 | CIO_TRACE_EVENT(4, dbf_txt); |
518 | 557 | ||
519 | /* Nuke all fields. */ | 558 | /* Nuke all fields. */ |
520 | memset(sch, 0, sizeof(struct subchannel)); | 559 | memset(sch, 0, sizeof(struct subchannel)); |
@@ -546,67 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
546 | /* Copy subchannel type from path management control word. */ | 585 | /* Copy subchannel type from path management control word. */ |
547 | sch->st = sch->schib.pmcw.st; | 586 | sch->st = sch->schib.pmcw.st; |
548 | 587 | ||
549 | /* | 588 | switch (sch->st) { |
550 | * ... just being curious we check for non I/O subchannels | 589 | case SUBCHANNEL_TYPE_IO: |
551 | */ | 590 | err = cio_validate_io_subchannel(sch); |
552 | if (sch->st != 0) { | 591 | break; |
553 | CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " | 592 | case SUBCHANNEL_TYPE_MSG: |
554 | "non-I/O subchannel type %04X\n", | 593 | err = cio_validate_msg_subchannel(sch); |
555 | sch->schid.ssid, sch->schid.sch_no, sch->st); | 594 | break; |
556 | /* We stop here for non-io subchannels. */ | 595 | default: |
557 | err = sch->st; | 596 | err = 0; |
558 | goto out; | ||
559 | } | ||
560 | |||
561 | /* Initialization for io subchannels. */ | ||
562 | if (!css_sch_is_valid(&sch->schib)) { | ||
563 | err = -ENODEV; | ||
564 | goto out; | ||
565 | } | 597 | } |
566 | 598 | if (err) | |
567 | /* Devno is valid. */ | ||
568 | if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { | ||
569 | /* | ||
570 | * This device must not be known to Linux. So we simply | ||
571 | * say that there is no device and return ENODEV. | ||
572 | */ | ||
573 | CIO_MSG_EVENT(6, "Blacklisted device detected " | ||
574 | "at devno %04X, subchannel set %x\n", | ||
575 | sch->schib.pmcw.dev, sch->schid.ssid); | ||
576 | err = -ENODEV; | ||
577 | goto out; | 599 | goto out; |
578 | } | ||
579 | if (cio_is_console(sch->schid)) { | ||
580 | sch->opm = 0xff; | ||
581 | sch->isc = 1; | ||
582 | } else { | ||
583 | sch->opm = chp_get_sch_opm(sch); | ||
584 | sch->isc = 3; | ||
585 | } | ||
586 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
587 | |||
588 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " | ||
589 | "- PIM = %02X, PAM = %02X, POM = %02X\n", | ||
590 | sch->schib.pmcw.dev, sch->schid.ssid, | ||
591 | sch->schid.sch_no, sch->schib.pmcw.pim, | ||
592 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); | ||
593 | 600 | ||
594 | /* | 601 | CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", |
595 | * We now have to initially ... | 602 | sch->schid.ssid, sch->schid.sch_no, sch->st); |
596 | * ... enable "concurrent sense" | ||
597 | * ... enable "multipath mode" if more than one | ||
598 | * CHPID is available. This is done regardless | ||
599 | * whether multiple paths are available for us. | ||
600 | */ | ||
601 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | ||
602 | sch->schib.pmcw.ena = 0; | ||
603 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
604 | sch->schib.pmcw.mp = 1; /* multipath mode */ | ||
605 | /* clean up possible residual cmf stuff */ | ||
606 | sch->schib.pmcw.mme = 0; | ||
607 | sch->schib.pmcw.mbfc = 0; | ||
608 | sch->schib.pmcw.mbi = 0; | ||
609 | sch->schib.mba = 0; | ||
610 | return 0; | 603 | return 0; |
611 | out: | 604 | out: |
612 | if (!cio_is_console(schid)) | 605 | if (!cio_is_console(schid)) |
@@ -647,7 +640,7 @@ do_IRQ (struct pt_regs *regs) | |||
647 | */ | 640 | */ |
648 | if (tpi_info->adapter_IO == 1 && | 641 | if (tpi_info->adapter_IO == 1 && |
649 | tpi_info->int_type == IO_INTERRUPT_TYPE) { | 642 | tpi_info->int_type == IO_INTERRUPT_TYPE) { |
650 | do_adapter_IO(); | 643 | do_adapter_IO(tpi_info->isc); |
651 | continue; | 644 | continue; |
652 | } | 645 | } |
653 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | 646 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; |
@@ -706,9 +699,9 @@ void wait_cons_dev(void) | |||
706 | if (!console_subchannel_in_use) | 699 | if (!console_subchannel_in_use) |
707 | return; | 700 | return; |
708 | 701 | ||
709 | /* disable all but isc 1 (console device) */ | 702 | /* disable all but the console isc */ |
710 | __ctl_store (save_cr6, 6, 6); | 703 | __ctl_store (save_cr6, 6, 6); |
711 | cr6 = 0x40000000; | 704 | cr6 = 1UL << (31 - CONSOLE_ISC); |
712 | __ctl_load (cr6, 6, 6); | 705 | __ctl_load (cr6, 6, 6); |
713 | 706 | ||
714 | do { | 707 | do { |
@@ -716,7 +709,7 @@ void wait_cons_dev(void) | |||
716 | if (!cio_tpi()) | 709 | if (!cio_tpi()) |
717 | cpu_relax(); | 710 | cpu_relax(); |
718 | spin_lock(console_subchannel.lock); | 711 | spin_lock(console_subchannel.lock); |
719 | } while (console_subchannel.schib.scsw.actl != 0); | 712 | } while (console_subchannel.schib.scsw.cmd.actl != 0); |
720 | /* | 713 | /* |
721 | * restore previous isc value | 714 | * restore previous isc value |
722 | */ | 715 | */ |
@@ -761,7 +754,6 @@ cio_get_console_sch_no(void) | |||
761 | /* unlike in 2.4, we cannot autoprobe here, since | 754 | /* unlike in 2.4, we cannot autoprobe here, since |
762 | * the channel subsystem is not fully initialized. | 755 | * the channel subsystem is not fully initialized. |
763 | * With some luck, the HWC console can take over */ | 756 | * With some luck, the HWC console can take over */ |
764 | printk(KERN_WARNING "cio: No ccw console found!\n"); | ||
765 | return -1; | 757 | return -1; |
766 | } | 758 | } |
767 | return console_irq; | 759 | return console_irq; |
@@ -778,6 +770,7 @@ cio_probe_console(void) | |||
778 | sch_no = cio_get_console_sch_no(); | 770 | sch_no = cio_get_console_sch_no(); |
779 | if (sch_no == -1) { | 771 | if (sch_no == -1) { |
780 | console_subchannel_in_use = 0; | 772 | console_subchannel_in_use = 0; |
773 | printk(KERN_WARNING "cio: No ccw console found!\n"); | ||
781 | return ERR_PTR(-ENODEV); | 774 | return ERR_PTR(-ENODEV); |
782 | } | 775 | } |
783 | memset(&console_subchannel, 0, sizeof(struct subchannel)); | 776 | memset(&console_subchannel, 0, sizeof(struct subchannel)); |
@@ -790,15 +783,15 @@ cio_probe_console(void) | |||
790 | } | 783 | } |
791 | 784 | ||
792 | /* | 785 | /* |
793 | * enable console I/O-interrupt subclass 1 | 786 | * enable console I/O-interrupt subclass |
794 | */ | 787 | */ |
795 | ctl_set_bit(6, 30); | 788 | isc_register(CONSOLE_ISC); |
796 | console_subchannel.isc = 1; | 789 | console_subchannel.schib.pmcw.isc = CONSOLE_ISC; |
797 | console_subchannel.schib.pmcw.isc = 1; | ||
798 | console_subchannel.schib.pmcw.intparm = | 790 | console_subchannel.schib.pmcw.intparm = |
799 | (u32)(addr_t)&console_subchannel; | 791 | (u32)(addr_t)&console_subchannel; |
800 | ret = cio_modify(&console_subchannel); | 792 | ret = cio_modify(&console_subchannel); |
801 | if (ret) { | 793 | if (ret) { |
794 | isc_unregister(CONSOLE_ISC); | ||
802 | console_subchannel_in_use = 0; | 795 | console_subchannel_in_use = 0; |
803 | return ERR_PTR(ret); | 796 | return ERR_PTR(ret); |
804 | } | 797 | } |
@@ -810,7 +803,7 @@ cio_release_console(void) | |||
810 | { | 803 | { |
811 | console_subchannel.schib.pmcw.intparm = 0; | 804 | console_subchannel.schib.pmcw.intparm = 0; |
812 | cio_modify(&console_subchannel); | 805 | cio_modify(&console_subchannel); |
813 | ctl_clear_bit(6, 24); | 806 | isc_unregister(CONSOLE_ISC); |
814 | console_subchannel_in_use = 0; | 807 | console_subchannel_in_use = 0; |
815 | } | 808 | } |
816 | 809 | ||
@@ -864,7 +857,7 @@ static void udelay_reset(unsigned long usecs) | |||
864 | } | 857 | } |
865 | 858 | ||
866 | static int | 859 | static int |
867 | __clear_subchannel_easy(struct subchannel_id schid) | 860 | __clear_io_subchannel_easy(struct subchannel_id schid) |
868 | { | 861 | { |
869 | int retry; | 862 | int retry; |
870 | 863 | ||
@@ -883,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid) | |||
883 | return -EBUSY; | 876 | return -EBUSY; |
884 | } | 877 | } |
885 | 878 | ||
879 | static void __clear_chsc_subchannel_easy(void) | ||
880 | { | ||
881 | /* It seems we can only wait for a bit here :/ */ | ||
882 | udelay_reset(100); | ||
883 | } | ||
884 | |||
886 | static int pgm_check_occured; | 885 | static int pgm_check_occured; |
887 | 886 | ||
888 | static void cio_reset_pgm_check_handler(void) | 887 | static void cio_reset_pgm_check_handler(void) |
@@ -921,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) | |||
921 | case -ENODEV: | 920 | case -ENODEV: |
922 | break; | 921 | break; |
923 | default: /* -EBUSY */ | 922 | default: /* -EBUSY */ |
924 | if (__clear_subchannel_easy(schid)) | 923 | switch (schib.pmcw.st) { |
925 | break; /* give up... */ | 924 | case SUBCHANNEL_TYPE_IO: |
925 | if (__clear_io_subchannel_easy(schid)) | ||
926 | goto out; /* give up... */ | ||
927 | break; | ||
928 | case SUBCHANNEL_TYPE_CHSC: | ||
929 | __clear_chsc_subchannel_easy(); | ||
930 | break; | ||
931 | default: | ||
932 | /* No default clear strategy */ | ||
933 | break; | ||
934 | } | ||
926 | stsch(schid, &schib); | 935 | stsch(schid, &schib); |
927 | __disable_subchannel_easy(schid, &schib); | 936 | __disable_subchannel_easy(schid, &schib); |
928 | } | 937 | } |
938 | out: | ||
929 | return 0; | 939 | return 0; |
930 | } | 940 | } |
931 | 941 | ||
@@ -1068,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | |||
1068 | iplinfo->is_qdio = schib.pmcw.qf; | 1078 | iplinfo->is_qdio = schib.pmcw.qf; |
1069 | return 0; | 1079 | return 0; |
1070 | } | 1080 | } |
1081 | |||
1082 | /** | ||
1083 | * cio_tm_start_key - perform start function | ||
1084 | * @sch: subchannel on which to perform the start function | ||
1085 | * @tcw: transport-command word to be started | ||
1086 | * @lpm: mask of paths to use | ||
1087 | * @key: storage key to use for storage access | ||
1088 | * | ||
1089 | * Start the tcw on the given subchannel. Return zero on success, non-zero | ||
1090 | * otherwise. | ||
1091 | */ | ||
1092 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) | ||
1093 | { | ||
1094 | int cc; | ||
1095 | union orb *orb = &to_io_private(sch)->orb; | ||
1096 | |||
1097 | memset(orb, 0, sizeof(union orb)); | ||
1098 | orb->tm.intparm = (u32) (addr_t) sch; | ||
1099 | orb->tm.key = key >> 4; | ||
1100 | orb->tm.b = 1; | ||
1101 | orb->tm.lpm = lpm ? lpm : sch->lpm; | ||
1102 | orb->tm.tcw = (u32) (addr_t) tcw; | ||
1103 | cc = ssch(sch->schid, orb); | ||
1104 | switch (cc) { | ||
1105 | case 0: | ||
1106 | return 0; | ||
1107 | case 1: | ||
1108 | case 2: | ||
1109 | return -EBUSY; | ||
1110 | default: | ||
1111 | return cio_start_handle_notoper(sch, lpm); | ||
1112 | } | ||
1113 | } | ||
1114 | |||
1115 | /** | ||
1116 | * cio_tm_intrg - perform interrogate function | ||
1117 | * @sch - subchannel on which to perform the interrogate function | ||
1118 | * | ||
1119 | * If the specified subchannel is running in transport-mode, perform the | ||
1120 | * interrogate function. Return zero on success, non-zero otherwie. | ||
1121 | */ | ||
1122 | int cio_tm_intrg(struct subchannel *sch) | ||
1123 | { | ||
1124 | int cc; | ||
1125 | |||
1126 | if (!to_io_private(sch)->orb.tm.b) | ||
1127 | return -EINVAL; | ||
1128 | cc = xsch(sch->schid); | ||
1129 | switch (cc) { | ||
1130 | case 0: | ||
1131 | case 2: | ||
1132 | return 0; | ||
1133 | case 1: | ||
1134 | return -EBUSY; | ||
1135 | default: | ||
1136 | return -ENODEV; | ||
1137 | } | ||
1138 | } | ||
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 6e933aebe013..3b236d20e835 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -3,9 +3,12 @@ | |||
3 | 3 | ||
4 | #include <linux/mutex.h> | 4 | #include <linux/mutex.h> |
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/mod_devicetable.h> | ||
6 | #include <asm/chpid.h> | 7 | #include <asm/chpid.h> |
8 | #include <asm/cio.h> | ||
9 | #include <asm/fcx.h> | ||
10 | #include <asm/schid.h> | ||
7 | #include "chsc.h" | 11 | #include "chsc.h" |
8 | #include "schid.h" | ||
9 | 12 | ||
10 | /* | 13 | /* |
11 | * path management control word | 14 | * path management control word |
@@ -13,7 +16,7 @@ | |||
13 | struct pmcw { | 16 | struct pmcw { |
14 | u32 intparm; /* interruption parameter */ | 17 | u32 intparm; /* interruption parameter */ |
15 | u32 qf : 1; /* qdio facility */ | 18 | u32 qf : 1; /* qdio facility */ |
16 | u32 res0 : 1; /* reserved zeros */ | 19 | u32 w : 1; |
17 | u32 isc : 3; /* interruption sublass */ | 20 | u32 isc : 3; /* interruption sublass */ |
18 | u32 res5 : 3; /* reserved zeros */ | 21 | u32 res5 : 3; /* reserved zeros */ |
19 | u32 ena : 1; /* enabled */ | 22 | u32 ena : 1; /* enabled */ |
@@ -47,7 +50,7 @@ struct pmcw { | |||
47 | */ | 50 | */ |
48 | struct schib { | 51 | struct schib { |
49 | struct pmcw pmcw; /* path management control word */ | 52 | struct pmcw pmcw; /* path management control word */ |
50 | struct scsw scsw; /* subchannel status word */ | 53 | union scsw scsw; /* subchannel status word */ |
51 | __u64 mba; /* measurement block address */ | 54 | __u64 mba; /* measurement block address */ |
52 | __u8 mda[4]; /* model dependent area */ | 55 | __u8 mda[4]; /* model dependent area */ |
53 | } __attribute__ ((packed,aligned(4))); | 56 | } __attribute__ ((packed,aligned(4))); |
@@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int); | |||
99 | extern int cio_get_options (struct subchannel *); | 102 | extern int cio_get_options (struct subchannel *); |
100 | extern int cio_modify (struct subchannel *); | 103 | extern int cio_modify (struct subchannel *); |
101 | 104 | ||
105 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); | ||
106 | int cio_tm_intrg(struct subchannel *sch); | ||
107 | |||
102 | int cio_create_sch_lock(struct subchannel *); | 108 | int cio_create_sch_lock(struct subchannel *); |
103 | void do_adapter_IO(void); | 109 | void do_adapter_IO(u8 isc); |
104 | void do_IRQ(struct pt_regs *); | 110 | void do_IRQ(struct pt_regs *); |
105 | 111 | ||
106 | /* Use with care. */ | 112 | /* Use with care. */ |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 2808b6833b9e..a90b28c0be57 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev) | |||
341 | if (stsch(sch->schid, &sch->schib)) | 341 | if (stsch(sch->schid, &sch->schib)) |
342 | return -ENODEV; | 342 | return -ENODEV; |
343 | 343 | ||
344 | if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { | 344 | if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { |
345 | /* Don't copy if a start function is in progress. */ | 345 | /* Don't copy if a start function is in progress. */ |
346 | if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && | 346 | if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) && |
347 | (sch->schib.scsw.actl & | 347 | (scsw_actl(&sch->schib.scsw) & |
348 | (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && | 348 | (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && |
349 | (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) | 349 | (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS))) |
350 | return -EBUSY; | 350 | return -EBUSY; |
351 | } | 351 | } |
352 | cmb_data = cdev->private->cmb; | 352 | cmb_data = cdev->private->cmb; |
@@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev) | |||
612 | free_pages((unsigned long)mem, get_order(size)); | 612 | free_pages((unsigned long)mem, get_order(size)); |
613 | } else if (!mem) { | 613 | } else if (!mem) { |
614 | /* no luck */ | 614 | /* no luck */ |
615 | printk(KERN_WARNING "cio: failed to allocate area " | ||
616 | "for measuring %d subchannels\n", | ||
617 | cmb_area.num_channels); | ||
618 | ret = -ENOMEM; | 615 | ret = -ENOMEM; |
619 | goto out; | 616 | goto out; |
620 | } else { | 617 | } else { |
@@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev, | |||
1230 | switch (val) { | 1227 | switch (val) { |
1231 | case 0: | 1228 | case 0: |
1232 | ret = disable_cmf(cdev); | 1229 | ret = disable_cmf(cdev); |
1233 | if (ret) | ||
1234 | dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret); | ||
1235 | break; | 1230 | break; |
1236 | case 1: | 1231 | case 1: |
1237 | ret = enable_cmf(cdev); | 1232 | ret = enable_cmf(cdev); |
1238 | if (ret && ret != -EBUSY) | ||
1239 | dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret); | ||
1240 | break; | 1233 | break; |
1241 | } | 1234 | } |
1242 | 1235 | ||
@@ -1344,8 +1337,7 @@ static int __init init_cmf(void) | |||
1344 | * to basic mode. | 1337 | * to basic mode. |
1345 | */ | 1338 | */ |
1346 | if (format == CMF_AUTODETECT) { | 1339 | if (format == CMF_AUTODETECT) { |
1347 | if (!css_characteristics_avail || | 1340 | if (!css_general_characteristics.ext_mb) { |
1348 | !css_general_characteristics.ext_mb) { | ||
1349 | format = CMF_BASIC; | 1341 | format = CMF_BASIC; |
1350 | } else { | 1342 | } else { |
1351 | format = CMF_EXTENDED; | 1343 | format = CMF_EXTENDED; |
@@ -1365,8 +1357,6 @@ static int __init init_cmf(void) | |||
1365 | cmbops = &cmbops_extended; | 1357 | cmbops = &cmbops_extended; |
1366 | break; | 1358 | break; |
1367 | default: | 1359 | default: |
1368 | printk(KERN_ERR "cio: Invalid format %d for channel " | ||
1369 | "measurement facility\n", format); | ||
1370 | return 1; | 1360 | return 1; |
1371 | } | 1361 | } |
1372 | 1362 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a76956512b2d..46c021d880dc 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -2,8 +2,7 @@ | |||
2 | * drivers/s390/cio/css.c | 2 | * drivers/s390/cio/css.c |
3 | * driver for channel subsystem | 3 | * driver for channel subsystem |
4 | * | 4 | * |
5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 2002,2008 |
6 | * IBM Corporation | ||
7 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
8 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
9 | */ | 8 | */ |
@@ -14,7 +13,9 @@ | |||
14 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
15 | #include <linux/list.h> | 14 | #include <linux/list.h> |
16 | #include <linux/reboot.h> | 15 | #include <linux/reboot.h> |
16 | #include <asm/isc.h> | ||
17 | 17 | ||
18 | #include "../s390mach.h" | ||
18 | #include "css.h" | 19 | #include "css.h" |
19 | #include "cio.h" | 20 | #include "cio.h" |
20 | #include "cio_debug.h" | 21 | #include "cio_debug.h" |
@@ -30,8 +31,6 @@ static int max_ssid = 0; | |||
30 | 31 | ||
31 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; | 32 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; |
32 | 33 | ||
33 | int css_characteristics_avail = 0; | ||
34 | |||
35 | int | 34 | int |
36 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) | 35 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) |
37 | { | 36 | { |
@@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid) | |||
121 | kfree(sch); | 120 | kfree(sch); |
122 | return ERR_PTR(ret); | 121 | return ERR_PTR(ret); |
123 | } | 122 | } |
124 | |||
125 | if (sch->st != SUBCHANNEL_TYPE_IO) { | ||
126 | /* For now we ignore all non-io subchannels. */ | ||
127 | kfree(sch); | ||
128 | return ERR_PTR(-EINVAL); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Set intparm to subchannel address. | ||
133 | * This is fine even on 64bit since the subchannel is always located | ||
134 | * under 2G. | ||
135 | */ | ||
136 | sch->schib.pmcw.intparm = (u32)(addr_t)sch; | ||
137 | ret = cio_modify(sch); | ||
138 | if (ret) { | ||
139 | kfree(sch->lock); | ||
140 | kfree(sch); | ||
141 | return ERR_PTR(ret); | ||
142 | } | ||
143 | return sch; | 123 | return sch; |
144 | } | 124 | } |
145 | 125 | ||
@@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch) | |||
177 | return ret; | 157 | return ret; |
178 | } | 158 | } |
179 | 159 | ||
160 | /** | ||
161 | * css_sch_device_unregister - unregister a subchannel | ||
162 | * @sch: subchannel to be unregistered | ||
163 | */ | ||
180 | void css_sch_device_unregister(struct subchannel *sch) | 164 | void css_sch_device_unregister(struct subchannel *sch) |
181 | { | 165 | { |
182 | mutex_lock(&sch->reg_mutex); | 166 | mutex_lock(&sch->reg_mutex); |
183 | device_unregister(&sch->dev); | 167 | if (device_is_registered(&sch->dev)) |
168 | device_unregister(&sch->dev); | ||
184 | mutex_unlock(&sch->reg_mutex); | 169 | mutex_unlock(&sch->reg_mutex); |
185 | } | 170 | } |
171 | EXPORT_SYMBOL_GPL(css_sch_device_unregister); | ||
186 | 172 | ||
187 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) | 173 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
188 | { | 174 | { |
@@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch) | |||
229 | } | 215 | } |
230 | } | 216 | } |
231 | 217 | ||
218 | static ssize_t type_show(struct device *dev, struct device_attribute *attr, | ||
219 | char *buf) | ||
220 | { | ||
221 | struct subchannel *sch = to_subchannel(dev); | ||
222 | |||
223 | return sprintf(buf, "%01x\n", sch->st); | ||
224 | } | ||
225 | |||
226 | static DEVICE_ATTR(type, 0444, type_show, NULL); | ||
227 | |||
228 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | ||
229 | char *buf) | ||
230 | { | ||
231 | struct subchannel *sch = to_subchannel(dev); | ||
232 | |||
233 | return sprintf(buf, "css:t%01X\n", sch->st); | ||
234 | } | ||
235 | |||
236 | static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); | ||
237 | |||
238 | static struct attribute *subch_attrs[] = { | ||
239 | &dev_attr_type.attr, | ||
240 | &dev_attr_modalias.attr, | ||
241 | NULL, | ||
242 | }; | ||
243 | |||
244 | static struct attribute_group subch_attr_group = { | ||
245 | .attrs = subch_attrs, | ||
246 | }; | ||
247 | |||
248 | static struct attribute_group *default_subch_attr_groups[] = { | ||
249 | &subch_attr_group, | ||
250 | NULL, | ||
251 | }; | ||
252 | |||
232 | static int css_register_subchannel(struct subchannel *sch) | 253 | static int css_register_subchannel(struct subchannel *sch) |
233 | { | 254 | { |
234 | int ret; | 255 | int ret; |
@@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch) | |||
237 | sch->dev.parent = &channel_subsystems[0]->device; | 258 | sch->dev.parent = &channel_subsystems[0]->device; |
238 | sch->dev.bus = &css_bus_type; | 259 | sch->dev.bus = &css_bus_type; |
239 | sch->dev.release = &css_subchannel_release; | 260 | sch->dev.release = &css_subchannel_release; |
240 | sch->dev.groups = subch_attr_groups; | 261 | sch->dev.groups = default_subch_attr_groups; |
241 | /* | 262 | /* |
242 | * We don't want to generate uevents for I/O subchannels that don't | 263 | * We don't want to generate uevents for I/O subchannels that don't |
243 | * have a working ccw device behind them since they will be | 264 | * have a working ccw device behind them since they will be |
244 | * unregistered before they can be used anyway, so we delay the add | 265 | * unregistered before they can be used anyway, so we delay the add |
245 | * uevent until after device recognition was successful. | 266 | * uevent until after device recognition was successful. |
267 | * Note that we suppress the uevent for all subchannel types; | ||
268 | * the subchannel driver can decide itself when it wants to inform | ||
269 | * userspace of its existence. | ||
246 | */ | 270 | */ |
247 | if (!cio_is_console(sch->schid)) | 271 | sch->dev.uevent_suppress = 1; |
248 | /* Console is special, no need to suppress. */ | ||
249 | sch->dev.uevent_suppress = 1; | ||
250 | css_update_ssd_info(sch); | 272 | css_update_ssd_info(sch); |
251 | /* make it known to the system */ | 273 | /* make it known to the system */ |
252 | ret = css_sch_device_register(sch); | 274 | ret = css_sch_device_register(sch); |
@@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch) | |||
255 | sch->schid.ssid, sch->schid.sch_no, ret); | 277 | sch->schid.ssid, sch->schid.sch_no, ret); |
256 | return ret; | 278 | return ret; |
257 | } | 279 | } |
280 | if (!sch->driver) { | ||
281 | /* | ||
282 | * No driver matched. Generate the uevent now so that | ||
283 | * a fitting driver module may be loaded based on the | ||
284 | * modalias. | ||
285 | */ | ||
286 | sch->dev.uevent_suppress = 0; | ||
287 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); | ||
288 | } | ||
258 | return ret; | 289 | return ret; |
259 | } | 290 | } |
260 | 291 | ||
261 | static int css_probe_device(struct subchannel_id schid) | 292 | int css_probe_device(struct subchannel_id schid) |
262 | { | 293 | { |
263 | int ret; | 294 | int ret; |
264 | struct subchannel *sch; | 295 | struct subchannel *sch; |
@@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib) | |||
301 | { | 332 | { |
302 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) | 333 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) |
303 | return 0; | 334 | return 0; |
335 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) | ||
336 | return 0; | ||
304 | return 1; | 337 | return 1; |
305 | } | 338 | } |
306 | EXPORT_SYMBOL_GPL(css_sch_is_valid); | 339 | EXPORT_SYMBOL_GPL(css_sch_is_valid); |
307 | 340 | ||
308 | static int css_get_subchannel_status(struct subchannel *sch) | ||
309 | { | ||
310 | struct schib schib; | ||
311 | |||
312 | if (stsch(sch->schid, &schib)) | ||
313 | return CIO_GONE; | ||
314 | if (!css_sch_is_valid(&schib)) | ||
315 | return CIO_GONE; | ||
316 | if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) | ||
317 | return CIO_REVALIDATE; | ||
318 | if (!sch->lpm) | ||
319 | return CIO_NO_PATH; | ||
320 | return CIO_OPER; | ||
321 | } | ||
322 | |||
323 | static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) | ||
324 | { | ||
325 | int event, ret, disc; | ||
326 | unsigned long flags; | ||
327 | enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; | ||
328 | |||
329 | spin_lock_irqsave(sch->lock, flags); | ||
330 | disc = device_is_disconnected(sch); | ||
331 | if (disc && slow) { | ||
332 | /* Disconnected devices are evaluated directly only.*/ | ||
333 | spin_unlock_irqrestore(sch->lock, flags); | ||
334 | return 0; | ||
335 | } | ||
336 | /* No interrupt after machine check - kill pending timers. */ | ||
337 | device_kill_pending_timer(sch); | ||
338 | if (!disc && !slow) { | ||
339 | /* Non-disconnected devices are evaluated on the slow path. */ | ||
340 | spin_unlock_irqrestore(sch->lock, flags); | ||
341 | return -EAGAIN; | ||
342 | } | ||
343 | event = css_get_subchannel_status(sch); | ||
344 | CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", | ||
345 | sch->schid.ssid, sch->schid.sch_no, event, | ||
346 | disc ? "disconnected" : "normal", | ||
347 | slow ? "slow" : "fast"); | ||
348 | /* Analyze subchannel status. */ | ||
349 | action = NONE; | ||
350 | switch (event) { | ||
351 | case CIO_NO_PATH: | ||
352 | if (disc) { | ||
353 | /* Check if paths have become available. */ | ||
354 | action = REPROBE; | ||
355 | break; | ||
356 | } | ||
357 | /* fall through */ | ||
358 | case CIO_GONE: | ||
359 | /* Prevent unwanted effects when opening lock. */ | ||
360 | cio_disable_subchannel(sch); | ||
361 | device_set_disconnected(sch); | ||
362 | /* Ask driver what to do with device. */ | ||
363 | action = UNREGISTER; | ||
364 | if (sch->driver && sch->driver->notify) { | ||
365 | spin_unlock_irqrestore(sch->lock, flags); | ||
366 | ret = sch->driver->notify(sch, event); | ||
367 | spin_lock_irqsave(sch->lock, flags); | ||
368 | if (ret) | ||
369 | action = NONE; | ||
370 | } | ||
371 | break; | ||
372 | case CIO_REVALIDATE: | ||
373 | /* Device will be removed, so no notify necessary. */ | ||
374 | if (disc) | ||
375 | /* Reprobe because immediate unregister might block. */ | ||
376 | action = REPROBE; | ||
377 | else | ||
378 | action = UNREGISTER_PROBE; | ||
379 | break; | ||
380 | case CIO_OPER: | ||
381 | if (disc) | ||
382 | /* Get device operational again. */ | ||
383 | action = REPROBE; | ||
384 | break; | ||
385 | } | ||
386 | /* Perform action. */ | ||
387 | ret = 0; | ||
388 | switch (action) { | ||
389 | case UNREGISTER: | ||
390 | case UNREGISTER_PROBE: | ||
391 | /* Unregister device (will use subchannel lock). */ | ||
392 | spin_unlock_irqrestore(sch->lock, flags); | ||
393 | css_sch_device_unregister(sch); | ||
394 | spin_lock_irqsave(sch->lock, flags); | ||
395 | |||
396 | /* Reset intparm to zeroes. */ | ||
397 | sch->schib.pmcw.intparm = 0; | ||
398 | cio_modify(sch); | ||
399 | break; | ||
400 | case REPROBE: | ||
401 | device_trigger_reprobe(sch); | ||
402 | break; | ||
403 | default: | ||
404 | break; | ||
405 | } | ||
406 | spin_unlock_irqrestore(sch->lock, flags); | ||
407 | /* Probe if necessary. */ | ||
408 | if (action == UNREGISTER_PROBE) | ||
409 | ret = css_probe_device(sch->schid); | ||
410 | |||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | 341 | static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) |
415 | { | 342 | { |
416 | struct schib schib; | 343 | struct schib schib; |
@@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
429 | return css_probe_device(schid); | 356 | return css_probe_device(schid); |
430 | } | 357 | } |
431 | 358 | ||
359 | static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) | ||
360 | { | ||
361 | int ret = 0; | ||
362 | |||
363 | if (sch->driver) { | ||
364 | if (sch->driver->sch_event) | ||
365 | ret = sch->driver->sch_event(sch, slow); | ||
366 | else | ||
367 | dev_dbg(&sch->dev, | ||
368 | "Got subchannel machine check but " | ||
369 | "no sch_event handler provided.\n"); | ||
370 | } | ||
371 | return ret; | ||
372 | } | ||
373 | |||
432 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) | 374 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
433 | { | 375 | { |
434 | struct subchannel *sch; | 376 | struct subchannel *sch; |
@@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe); | |||
596 | /* | 538 | /* |
597 | * Called from the machine check handler for subchannel report words. | 539 | * Called from the machine check handler for subchannel report words. |
598 | */ | 540 | */ |
599 | void css_process_crw(int rsid1, int rsid2) | 541 | static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) |
600 | { | 542 | { |
601 | struct subchannel_id mchk_schid; | 543 | struct subchannel_id mchk_schid; |
602 | 544 | ||
603 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", | 545 | if (overflow) { |
604 | rsid1, rsid2); | 546 | css_schedule_eval_all(); |
547 | return; | ||
548 | } | ||
549 | CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " | ||
550 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | ||
551 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, | ||
552 | crw0->erc, crw0->rsid); | ||
553 | if (crw1) | ||
554 | CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " | ||
555 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | ||
556 | crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, | ||
557 | crw1->anc, crw1->erc, crw1->rsid); | ||
605 | init_subchannel_id(&mchk_schid); | 558 | init_subchannel_id(&mchk_schid); |
606 | mchk_schid.sch_no = rsid1; | 559 | mchk_schid.sch_no = crw0->rsid; |
607 | if (rsid2 != 0) | 560 | if (crw1) |
608 | mchk_schid.ssid = (rsid2 >> 8) & 3; | 561 | mchk_schid.ssid = (crw1->rsid >> 8) & 3; |
609 | 562 | ||
610 | /* | 563 | /* |
611 | * Since we are always presented with IPI in the CRW, we have to | 564 | * Since we are always presented with IPI in the CRW, we have to |
612 | * use stsch() to find out if the subchannel in question has come | 565 | * use stsch() to find out if the subchannel in question has come |
613 | * or gone. | 566 | * or gone. |
@@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data) | |||
658 | static void __init | 611 | static void __init |
659 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) | 612 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) |
660 | { | 613 | { |
661 | if (css_characteristics_avail && css_general_characteristics.mcss) { | 614 | if (css_general_characteristics.mcss) { |
662 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; | 615 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; |
663 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; | 616 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; |
664 | } else { | 617 | } else { |
@@ -795,8 +748,6 @@ init_channel_subsystem (void) | |||
795 | ret = chsc_determine_css_characteristics(); | 748 | ret = chsc_determine_css_characteristics(); |
796 | if (ret == -ENOMEM) | 749 | if (ret == -ENOMEM) |
797 | goto out; /* No need to continue. */ | 750 | goto out; /* No need to continue. */ |
798 | if (ret == 0) | ||
799 | css_characteristics_avail = 1; | ||
800 | 751 | ||
801 | ret = chsc_alloc_sei_area(); | 752 | ret = chsc_alloc_sei_area(); |
802 | if (ret) | 753 | if (ret) |
@@ -806,6 +757,10 @@ init_channel_subsystem (void) | |||
806 | if (ret) | 757 | if (ret) |
807 | goto out; | 758 | goto out; |
808 | 759 | ||
760 | ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw); | ||
761 | if (ret) | ||
762 | goto out; | ||
763 | |||
809 | if ((ret = bus_register(&css_bus_type))) | 764 | if ((ret = bus_register(&css_bus_type))) |
810 | goto out; | 765 | goto out; |
811 | 766 | ||
@@ -836,8 +791,7 @@ init_channel_subsystem (void) | |||
836 | ret = device_register(&css->device); | 791 | ret = device_register(&css->device); |
837 | if (ret) | 792 | if (ret) |
838 | goto out_free_all; | 793 | goto out_free_all; |
839 | if (css_characteristics_avail && | 794 | if (css_chsc_characteristics.secm) { |
840 | css_chsc_characteristics.secm) { | ||
841 | ret = device_create_file(&css->device, | 795 | ret = device_create_file(&css->device, |
842 | &dev_attr_cm_enable); | 796 | &dev_attr_cm_enable); |
843 | if (ret) | 797 | if (ret) |
@@ -852,7 +806,8 @@ init_channel_subsystem (void) | |||
852 | goto out_pseudo; | 806 | goto out_pseudo; |
853 | css_init_done = 1; | 807 | css_init_done = 1; |
854 | 808 | ||
855 | ctl_set_bit(6, 28); | 809 | /* Enable default isc for I/O subchannels. */ |
810 | isc_register(IO_SCH_ISC); | ||
856 | 811 | ||
857 | for_each_subchannel(__init_channel_subsystem, NULL); | 812 | for_each_subchannel(__init_channel_subsystem, NULL); |
858 | return 0; | 813 | return 0; |
@@ -875,7 +830,7 @@ out_unregister: | |||
875 | i--; | 830 | i--; |
876 | css = channel_subsystems[i]; | 831 | css = channel_subsystems[i]; |
877 | device_unregister(&css->pseudo_subchannel->dev); | 832 | device_unregister(&css->pseudo_subchannel->dev); |
878 | if (css_characteristics_avail && css_chsc_characteristics.secm) | 833 | if (css_chsc_characteristics.secm) |
879 | device_remove_file(&css->device, | 834 | device_remove_file(&css->device, |
880 | &dev_attr_cm_enable); | 835 | &dev_attr_cm_enable); |
881 | device_unregister(&css->device); | 836 | device_unregister(&css->device); |
@@ -883,6 +838,7 @@ out_unregister: | |||
883 | out_bus: | 838 | out_bus: |
884 | bus_unregister(&css_bus_type); | 839 | bus_unregister(&css_bus_type); |
885 | out: | 840 | out: |
841 | s390_unregister_crw_handler(CRW_RSC_CSS); | ||
886 | chsc_free_sei_area(); | 842 | chsc_free_sei_area(); |
887 | kfree(slow_subchannel_set); | 843 | kfree(slow_subchannel_set); |
888 | printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", | 844 | printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", |
@@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch) | |||
895 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; | 851 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; |
896 | } | 852 | } |
897 | 853 | ||
898 | /* | 854 | static int css_bus_match(struct device *dev, struct device_driver *drv) |
899 | * find a driver for a subchannel. They identify by the subchannel | ||
900 | * type with the exception that the console subchannel driver has its own | ||
901 | * subchannel type although the device is an i/o subchannel | ||
902 | */ | ||
903 | static int | ||
904 | css_bus_match (struct device *dev, struct device_driver *drv) | ||
905 | { | 855 | { |
906 | struct subchannel *sch = to_subchannel(dev); | 856 | struct subchannel *sch = to_subchannel(dev); |
907 | struct css_driver *driver = to_cssdriver(drv); | 857 | struct css_driver *driver = to_cssdriver(drv); |
858 | struct css_device_id *id; | ||
908 | 859 | ||
909 | if (sch->st == driver->subchannel_type) | 860 | for (id = driver->subchannel_type; id->match_flags; id++) { |
910 | return 1; | 861 | if (sch->st == id->type) |
862 | return 1; | ||
863 | } | ||
911 | 864 | ||
912 | return 0; | 865 | return 0; |
913 | } | 866 | } |
@@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev) | |||
945 | sch->driver->shutdown(sch); | 898 | sch->driver->shutdown(sch); |
946 | } | 899 | } |
947 | 900 | ||
901 | static int css_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
902 | { | ||
903 | struct subchannel *sch = to_subchannel(dev); | ||
904 | int ret; | ||
905 | |||
906 | ret = add_uevent_var(env, "ST=%01X", sch->st); | ||
907 | if (ret) | ||
908 | return ret; | ||
909 | ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); | ||
910 | return ret; | ||
911 | } | ||
912 | |||
948 | struct bus_type css_bus_type = { | 913 | struct bus_type css_bus_type = { |
949 | .name = "css", | 914 | .name = "css", |
950 | .match = css_bus_match, | 915 | .match = css_bus_match, |
951 | .probe = css_probe, | 916 | .probe = css_probe, |
952 | .remove = css_remove, | 917 | .remove = css_remove, |
953 | .shutdown = css_shutdown, | 918 | .shutdown = css_shutdown, |
919 | .uevent = css_uevent, | ||
954 | }; | 920 | }; |
955 | 921 | ||
956 | /** | 922 | /** |
@@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem); | |||
985 | 951 | ||
986 | MODULE_LICENSE("GPL"); | 952 | MODULE_LICENSE("GPL"); |
987 | EXPORT_SYMBOL(css_bus_type); | 953 | EXPORT_SYMBOL(css_bus_type); |
988 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | ||
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index e1913518f354..57ebf120f825 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -9,8 +9,7 @@ | |||
9 | 9 | ||
10 | #include <asm/cio.h> | 10 | #include <asm/cio.h> |
11 | #include <asm/chpid.h> | 11 | #include <asm/chpid.h> |
12 | 12 | #include <asm/schid.h> | |
13 | #include "schid.h" | ||
14 | 13 | ||
15 | /* | 14 | /* |
16 | * path grouping stuff | 15 | * path grouping stuff |
@@ -58,20 +57,28 @@ struct pgid { | |||
58 | __u32 tod_high; /* high word TOD clock */ | 57 | __u32 tod_high; /* high word TOD clock */ |
59 | } __attribute__ ((packed)); | 58 | } __attribute__ ((packed)); |
60 | 59 | ||
61 | /* | ||
62 | * A css driver handles all subchannels of one type. | ||
63 | * Currently, we only care about I/O subchannels (type 0), these | ||
64 | * have a ccw_device connected to them. | ||
65 | */ | ||
66 | struct subchannel; | 60 | struct subchannel; |
61 | struct chp_link; | ||
62 | /** | ||
63 | * struct css_driver - device driver for subchannels | ||
64 | * @owner: owning module | ||
65 | * @subchannel_type: subchannel type supported by this driver | ||
66 | * @drv: embedded device driver structure | ||
67 | * @irq: called on interrupts | ||
68 | * @chp_event: called for events affecting a channel path | ||
69 | * @sch_event: called for events affecting the subchannel | ||
70 | * @probe: function called on probe | ||
71 | * @remove: function called on remove | ||
72 | * @shutdown: called at device shutdown | ||
73 | * @name: name of the device driver | ||
74 | */ | ||
67 | struct css_driver { | 75 | struct css_driver { |
68 | struct module *owner; | 76 | struct module *owner; |
69 | unsigned int subchannel_type; | 77 | struct css_device_id *subchannel_type; |
70 | struct device_driver drv; | 78 | struct device_driver drv; |
71 | void (*irq)(struct subchannel *); | 79 | void (*irq)(struct subchannel *); |
72 | int (*notify)(struct subchannel *, int); | 80 | int (*chp_event)(struct subchannel *, struct chp_link *, int); |
73 | void (*verify)(struct subchannel *); | 81 | int (*sch_event)(struct subchannel *, int); |
74 | void (*termination)(struct subchannel *); | ||
75 | int (*probe)(struct subchannel *); | 82 | int (*probe)(struct subchannel *); |
76 | int (*remove)(struct subchannel *); | 83 | int (*remove)(struct subchannel *); |
77 | void (*shutdown)(struct subchannel *); | 84 | void (*shutdown)(struct subchannel *); |
@@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *); | |||
89 | extern void css_driver_unregister(struct css_driver *); | 96 | extern void css_driver_unregister(struct css_driver *); |
90 | 97 | ||
91 | extern void css_sch_device_unregister(struct subchannel *); | 98 | extern void css_sch_device_unregister(struct subchannel *); |
92 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 99 | extern int css_probe_device(struct subchannel_id); |
100 | extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); | ||
93 | extern int css_init_done; | 101 | extern int css_init_done; |
94 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), | 102 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), |
95 | int (*fn_unknown)(struct subchannel_id, | 103 | int (*fn_unknown)(struct subchannel_id, |
96 | void *), void *data); | 104 | void *), void *data); |
97 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 105 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
98 | extern void css_process_crw(int, int); | ||
99 | extern void css_reiterate_subchannels(void); | 106 | extern void css_reiterate_subchannels(void); |
100 | void css_update_ssd_info(struct subchannel *sch); | 107 | void css_update_ssd_info(struct subchannel *sch); |
101 | 108 | ||
@@ -121,20 +128,6 @@ struct channel_subsystem { | |||
121 | extern struct bus_type css_bus_type; | 128 | extern struct bus_type css_bus_type; |
122 | extern struct channel_subsystem *channel_subsystems[]; | 129 | extern struct channel_subsystem *channel_subsystems[]; |
123 | 130 | ||
124 | /* Some helper functions for disconnected state. */ | ||
125 | int device_is_disconnected(struct subchannel *); | ||
126 | void device_set_disconnected(struct subchannel *); | ||
127 | void device_trigger_reprobe(struct subchannel *); | ||
128 | |||
129 | /* Helper functions for vary on/off. */ | ||
130 | int device_is_online(struct subchannel *); | ||
131 | void device_kill_io(struct subchannel *); | ||
132 | void device_set_intretry(struct subchannel *sch); | ||
133 | int device_trigger_verify(struct subchannel *sch); | ||
134 | |||
135 | /* Machine check helper function. */ | ||
136 | void device_kill_pending_timer(struct subchannel *); | ||
137 | |||
138 | /* Helper functions to build lists for the slow path. */ | 131 | /* Helper functions to build lists for the slow path. */ |
139 | void css_schedule_eval(struct subchannel_id schid); | 132 | void css_schedule_eval(struct subchannel_id schid); |
140 | void css_schedule_eval_all(void); | 133 | void css_schedule_eval_all(void); |
@@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *); | |||
145 | 138 | ||
146 | extern struct workqueue_struct *slow_path_wq; | 139 | extern struct workqueue_struct *slow_path_wq; |
147 | void css_wait_for_slow_path(void); | 140 | void css_wait_for_slow_path(void); |
148 | |||
149 | extern struct attribute_group *subch_attr_groups[]; | ||
150 | #endif | 141 | #endif |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e22813db74a2..e818d0c54c09 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -2,8 +2,7 @@ | |||
2 | * drivers/s390/cio/device.c | 2 | * drivers/s390/cio/device.c |
3 | * bus driver for ccw devices | 3 | * bus driver for ccw devices |
4 | * | 4 | * |
5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 2002,2008 |
6 | * IBM Corporation | ||
7 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
8 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
9 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
@@ -23,7 +22,9 @@ | |||
23 | #include <asm/cio.h> | 22 | #include <asm/cio.h> |
24 | #include <asm/param.h> /* HZ */ | 23 | #include <asm/param.h> /* HZ */ |
25 | #include <asm/cmb.h> | 24 | #include <asm/cmb.h> |
25 | #include <asm/isc.h> | ||
26 | 26 | ||
27 | #include "chp.h" | ||
27 | #include "cio.h" | 28 | #include "cio.h" |
28 | #include "cio_debug.h" | 29 | #include "cio_debug.h" |
29 | #include "css.h" | 30 | #include "css.h" |
@@ -125,19 +126,24 @@ struct bus_type ccw_bus_type; | |||
125 | static void io_subchannel_irq(struct subchannel *); | 126 | static void io_subchannel_irq(struct subchannel *); |
126 | static int io_subchannel_probe(struct subchannel *); | 127 | static int io_subchannel_probe(struct subchannel *); |
127 | static int io_subchannel_remove(struct subchannel *); | 128 | static int io_subchannel_remove(struct subchannel *); |
128 | static int io_subchannel_notify(struct subchannel *, int); | ||
129 | static void io_subchannel_verify(struct subchannel *); | ||
130 | static void io_subchannel_ioterm(struct subchannel *); | ||
131 | static void io_subchannel_shutdown(struct subchannel *); | 129 | static void io_subchannel_shutdown(struct subchannel *); |
130 | static int io_subchannel_sch_event(struct subchannel *, int); | ||
131 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, | ||
132 | int); | ||
133 | |||
134 | static struct css_device_id io_subchannel_ids[] = { | ||
135 | { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, | ||
136 | { /* end of list */ }, | ||
137 | }; | ||
138 | MODULE_DEVICE_TABLE(css, io_subchannel_ids); | ||
132 | 139 | ||
133 | static struct css_driver io_subchannel_driver = { | 140 | static struct css_driver io_subchannel_driver = { |
134 | .owner = THIS_MODULE, | 141 | .owner = THIS_MODULE, |
135 | .subchannel_type = SUBCHANNEL_TYPE_IO, | 142 | .subchannel_type = io_subchannel_ids, |
136 | .name = "io_subchannel", | 143 | .name = "io_subchannel", |
137 | .irq = io_subchannel_irq, | 144 | .irq = io_subchannel_irq, |
138 | .notify = io_subchannel_notify, | 145 | .sch_event = io_subchannel_sch_event, |
139 | .verify = io_subchannel_verify, | 146 | .chp_event = io_subchannel_chp_event, |
140 | .termination = io_subchannel_ioterm, | ||
141 | .probe = io_subchannel_probe, | 147 | .probe = io_subchannel_probe, |
142 | .remove = io_subchannel_remove, | 148 | .remove = io_subchannel_remove, |
143 | .shutdown = io_subchannel_shutdown, | 149 | .shutdown = io_subchannel_shutdown, |
@@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev) | |||
487 | ccw_device_set_online(cdev); | 493 | ccw_device_set_online(cdev); |
488 | return 0; | 494 | return 0; |
489 | } | 495 | } |
490 | static void online_store_handle_online(struct ccw_device *cdev, int force) | 496 | static int online_store_handle_online(struct ccw_device *cdev, int force) |
491 | { | 497 | { |
492 | int ret; | 498 | int ret; |
493 | 499 | ||
494 | ret = online_store_recog_and_online(cdev); | 500 | ret = online_store_recog_and_online(cdev); |
495 | if (ret) | 501 | if (ret) |
496 | return; | 502 | return ret; |
497 | if (force && cdev->private->state == DEV_STATE_BOXED) { | 503 | if (force && cdev->private->state == DEV_STATE_BOXED) { |
498 | ret = ccw_device_stlck(cdev); | 504 | ret = ccw_device_stlck(cdev); |
499 | if (ret) { | 505 | if (ret) |
500 | dev_warn(&cdev->dev, | 506 | return ret; |
501 | "ccw_device_stlck returned %d!\n", ret); | ||
502 | return; | ||
503 | } | ||
504 | if (cdev->id.cu_type == 0) | 507 | if (cdev->id.cu_type == 0) |
505 | cdev->private->state = DEV_STATE_NOT_OPER; | 508 | cdev->private->state = DEV_STATE_NOT_OPER; |
506 | online_store_recog_and_online(cdev); | 509 | online_store_recog_and_online(cdev); |
507 | } | 510 | } |
508 | 511 | return 0; | |
509 | } | 512 | } |
510 | 513 | ||
511 | static ssize_t online_store (struct device *dev, struct device_attribute *attr, | 514 | static ssize_t online_store (struct device *dev, struct device_attribute *attr, |
@@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, | |||
538 | ret = count; | 541 | ret = count; |
539 | break; | 542 | break; |
540 | case 1: | 543 | case 1: |
541 | online_store_handle_online(cdev, force); | 544 | ret = online_store_handle_online(cdev, force); |
542 | ret = count; | 545 | if (!ret) |
546 | ret = count; | ||
543 | break; | 547 | break; |
544 | default: | 548 | default: |
545 | ret = -EINVAL; | 549 | ret = -EINVAL; |
@@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); | |||
584 | static DEVICE_ATTR(online, 0644, online_show, online_store); | 588 | static DEVICE_ATTR(online, 0644, online_show, online_store); |
585 | static DEVICE_ATTR(availability, 0444, available_show, NULL); | 589 | static DEVICE_ATTR(availability, 0444, available_show, NULL); |
586 | 590 | ||
587 | static struct attribute * subch_attrs[] = { | 591 | static struct attribute *io_subchannel_attrs[] = { |
588 | &dev_attr_chpids.attr, | 592 | &dev_attr_chpids.attr, |
589 | &dev_attr_pimpampom.attr, | 593 | &dev_attr_pimpampom.attr, |
590 | NULL, | 594 | NULL, |
591 | }; | 595 | }; |
592 | 596 | ||
593 | static struct attribute_group subch_attr_group = { | 597 | static struct attribute_group io_subchannel_attr_group = { |
594 | .attrs = subch_attrs, | 598 | .attrs = io_subchannel_attrs, |
595 | }; | ||
596 | |||
597 | struct attribute_group *subch_attr_groups[] = { | ||
598 | &subch_attr_group, | ||
599 | NULL, | ||
600 | }; | 599 | }; |
601 | 600 | ||
602 | static struct attribute * ccwdev_attrs[] = { | 601 | static struct attribute * ccwdev_attrs[] = { |
@@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch, | |||
790 | sch_set_cdev(sch, cdev); | 789 | sch_set_cdev(sch, cdev); |
791 | cdev->private->schid = sch->schid; | 790 | cdev->private->schid = sch->schid; |
792 | cdev->ccwlock = sch->lock; | 791 | cdev->ccwlock = sch->lock; |
793 | device_trigger_reprobe(sch); | 792 | ccw_device_trigger_reprobe(cdev); |
794 | spin_unlock_irq(sch->lock); | 793 | spin_unlock_irq(sch->lock); |
795 | } | 794 | } |
796 | 795 | ||
@@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) | |||
1037 | struct ccw_device_private *priv; | 1036 | struct ccw_device_private *priv; |
1038 | 1037 | ||
1039 | sch_set_cdev(sch, cdev); | 1038 | sch_set_cdev(sch, cdev); |
1040 | sch->driver = &io_subchannel_driver; | ||
1041 | cdev->ccwlock = sch->lock; | 1039 | cdev->ccwlock = sch->lock; |
1042 | 1040 | ||
1043 | /* Init private data. */ | 1041 | /* Init private data. */ |
@@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch) | |||
1122 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | 1120 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); |
1123 | } | 1121 | } |
1124 | 1122 | ||
1125 | static int | 1123 | static void io_subchannel_init_fields(struct subchannel *sch) |
1126 | io_subchannel_probe (struct subchannel *sch) | 1124 | { |
1125 | if (cio_is_console(sch->schid)) | ||
1126 | sch->opm = 0xff; | ||
1127 | else | ||
1128 | sch->opm = chp_get_sch_opm(sch); | ||
1129 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
1130 | sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; | ||
1131 | |||
1132 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" | ||
1133 | " - PIM = %02X, PAM = %02X, POM = %02X\n", | ||
1134 | sch->schib.pmcw.dev, sch->schid.ssid, | ||
1135 | sch->schid.sch_no, sch->schib.pmcw.pim, | ||
1136 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); | ||
1137 | /* Initially set up some fields in the pmcw. */ | ||
1138 | sch->schib.pmcw.ena = 0; | ||
1139 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | ||
1140 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
1141 | sch->schib.pmcw.mp = 1; /* multipath mode */ | ||
1142 | /* clean up possible residual cmf stuff */ | ||
1143 | sch->schib.pmcw.mme = 0; | ||
1144 | sch->schib.pmcw.mbfc = 0; | ||
1145 | sch->schib.pmcw.mbi = 0; | ||
1146 | sch->schib.mba = 0; | ||
1147 | } | ||
1148 | |||
1149 | static int io_subchannel_probe(struct subchannel *sch) | ||
1127 | { | 1150 | { |
1128 | struct ccw_device *cdev; | 1151 | struct ccw_device *cdev; |
1129 | int rc; | 1152 | int rc; |
@@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch) | |||
1132 | 1155 | ||
1133 | cdev = sch_get_cdev(sch); | 1156 | cdev = sch_get_cdev(sch); |
1134 | if (cdev) { | 1157 | if (cdev) { |
1158 | rc = sysfs_create_group(&sch->dev.kobj, | ||
1159 | &io_subchannel_attr_group); | ||
1160 | if (rc) | ||
1161 | CIO_MSG_EVENT(0, "Failed to create io subchannel " | ||
1162 | "attributes for subchannel " | ||
1163 | "0.%x.%04x (rc=%d)\n", | ||
1164 | sch->schid.ssid, sch->schid.sch_no, rc); | ||
1135 | /* | 1165 | /* |
1136 | * This subchannel already has an associated ccw_device. | 1166 | * This subchannel already has an associated ccw_device. |
1137 | * Register it and exit. This happens for all early | 1167 | * Throw the delayed uevent for the subchannel, register |
1138 | * device, e.g. the console. | 1168 | * the ccw_device and exit. This happens for all early |
1169 | * devices, e.g. the console. | ||
1139 | */ | 1170 | */ |
1171 | sch->dev.uevent_suppress = 0; | ||
1172 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); | ||
1140 | cdev->dev.groups = ccwdev_attr_groups; | 1173 | cdev->dev.groups = ccwdev_attr_groups; |
1141 | device_initialize(&cdev->dev); | 1174 | device_initialize(&cdev->dev); |
1142 | ccw_device_register(cdev); | 1175 | ccw_device_register(cdev); |
@@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch) | |||
1152 | get_device(&cdev->dev); | 1185 | get_device(&cdev->dev); |
1153 | return 0; | 1186 | return 0; |
1154 | } | 1187 | } |
1188 | io_subchannel_init_fields(sch); | ||
1155 | /* | 1189 | /* |
1156 | * First check if a fitting device may be found amongst the | 1190 | * First check if a fitting device may be found amongst the |
1157 | * disconnected devices or in the orphanage. | 1191 | * disconnected devices or in the orphanage. |
1158 | */ | 1192 | */ |
1159 | dev_id.devno = sch->schib.pmcw.dev; | 1193 | dev_id.devno = sch->schib.pmcw.dev; |
1160 | dev_id.ssid = sch->schid.ssid; | 1194 | dev_id.ssid = sch->schid.ssid; |
1195 | rc = sysfs_create_group(&sch->dev.kobj, | ||
1196 | &io_subchannel_attr_group); | ||
1197 | if (rc) | ||
1198 | return rc; | ||
1161 | /* Allocate I/O subchannel private data. */ | 1199 | /* Allocate I/O subchannel private data. */ |
1162 | sch->private = kzalloc(sizeof(struct io_subchannel_private), | 1200 | sch->private = kzalloc(sizeof(struct io_subchannel_private), |
1163 | GFP_KERNEL | GFP_DMA); | 1201 | GFP_KERNEL | GFP_DMA); |
1164 | if (!sch->private) | 1202 | if (!sch->private) { |
1165 | return -ENOMEM; | 1203 | rc = -ENOMEM; |
1204 | goto out_err; | ||
1205 | } | ||
1166 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); | 1206 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); |
1167 | if (!cdev) | 1207 | if (!cdev) |
1168 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), | 1208 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), |
@@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch) | |||
1181 | } | 1221 | } |
1182 | cdev = io_subchannel_create_ccwdev(sch); | 1222 | cdev = io_subchannel_create_ccwdev(sch); |
1183 | if (IS_ERR(cdev)) { | 1223 | if (IS_ERR(cdev)) { |
1184 | kfree(sch->private); | 1224 | rc = PTR_ERR(cdev); |
1185 | return PTR_ERR(cdev); | 1225 | goto out_err; |
1186 | } | 1226 | } |
1187 | rc = io_subchannel_recog(cdev, sch); | 1227 | rc = io_subchannel_recog(cdev, sch); |
1188 | if (rc) { | 1228 | if (rc) { |
@@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch) | |||
1191 | spin_unlock_irqrestore(sch->lock, flags); | 1231 | spin_unlock_irqrestore(sch->lock, flags); |
1192 | if (cdev->dev.release) | 1232 | if (cdev->dev.release) |
1193 | cdev->dev.release(&cdev->dev); | 1233 | cdev->dev.release(&cdev->dev); |
1194 | kfree(sch->private); | 1234 | goto out_err; |
1195 | } | 1235 | } |
1196 | 1236 | return 0; | |
1237 | out_err: | ||
1238 | kfree(sch->private); | ||
1239 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | ||
1197 | return rc; | 1240 | return rc; |
1198 | } | 1241 | } |
1199 | 1242 | ||
@@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch) | |||
1214 | ccw_device_unregister(cdev); | 1257 | ccw_device_unregister(cdev); |
1215 | put_device(&cdev->dev); | 1258 | put_device(&cdev->dev); |
1216 | kfree(sch->private); | 1259 | kfree(sch->private); |
1260 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | ||
1217 | return 0; | 1261 | return 0; |
1218 | } | 1262 | } |
1219 | 1263 | ||
@@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event) | |||
1224 | cdev = sch_get_cdev(sch); | 1268 | cdev = sch_get_cdev(sch); |
1225 | if (!cdev) | 1269 | if (!cdev) |
1226 | return 0; | 1270 | return 0; |
1227 | if (!cdev->drv) | 1271 | return ccw_device_notify(cdev, event); |
1228 | return 0; | ||
1229 | if (!cdev->online) | ||
1230 | return 0; | ||
1231 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | ||
1232 | } | 1272 | } |
1233 | 1273 | ||
1234 | static void io_subchannel_verify(struct subchannel *sch) | 1274 | static void io_subchannel_verify(struct subchannel *sch) |
@@ -1240,22 +1280,96 @@ static void io_subchannel_verify(struct subchannel *sch) | |||
1240 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | 1280 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); |
1241 | } | 1281 | } |
1242 | 1282 | ||
1243 | static void io_subchannel_ioterm(struct subchannel *sch) | 1283 | static int check_for_io_on_path(struct subchannel *sch, int mask) |
1244 | { | 1284 | { |
1245 | struct ccw_device *cdev; | 1285 | int cc; |
1246 | 1286 | ||
1247 | cdev = sch_get_cdev(sch); | 1287 | cc = stsch(sch->schid, &sch->schib); |
1248 | if (!cdev) | 1288 | if (cc) |
1249 | return; | 1289 | return 0; |
1250 | /* Internal I/O will be retried by the interrupt handler. */ | 1290 | if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) |
1251 | if (cdev->private->flags.intretry) | 1291 | return 1; |
1292 | return 0; | ||
1293 | } | ||
1294 | |||
1295 | static void terminate_internal_io(struct subchannel *sch, | ||
1296 | struct ccw_device *cdev) | ||
1297 | { | ||
1298 | if (cio_clear(sch)) { | ||
1299 | /* Recheck device in case clear failed. */ | ||
1300 | sch->lpm = 0; | ||
1301 | if (cdev->online) | ||
1302 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
1303 | else | ||
1304 | css_schedule_eval(sch->schid); | ||
1252 | return; | 1305 | return; |
1306 | } | ||
1253 | cdev->private->state = DEV_STATE_CLEAR_VERIFY; | 1307 | cdev->private->state = DEV_STATE_CLEAR_VERIFY; |
1308 | /* Request retry of internal operation. */ | ||
1309 | cdev->private->flags.intretry = 1; | ||
1310 | /* Call handler. */ | ||
1254 | if (cdev->handler) | 1311 | if (cdev->handler) |
1255 | cdev->handler(cdev, cdev->private->intparm, | 1312 | cdev->handler(cdev, cdev->private->intparm, |
1256 | ERR_PTR(-EIO)); | 1313 | ERR_PTR(-EIO)); |
1257 | } | 1314 | } |
1258 | 1315 | ||
1316 | static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) | ||
1317 | { | ||
1318 | struct ccw_device *cdev; | ||
1319 | |||
1320 | cdev = sch_get_cdev(sch); | ||
1321 | if (!cdev) | ||
1322 | return; | ||
1323 | if (check_for_io_on_path(sch, mask)) { | ||
1324 | if (cdev->private->state == DEV_STATE_ONLINE) | ||
1325 | ccw_device_kill_io(cdev); | ||
1326 | else { | ||
1327 | terminate_internal_io(sch, cdev); | ||
1328 | /* Re-start path verification. */ | ||
1329 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
1330 | } | ||
1331 | } else | ||
1332 | /* trigger path verification. */ | ||
1333 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
1334 | |||
1335 | } | ||
1336 | |||
1337 | static int io_subchannel_chp_event(struct subchannel *sch, | ||
1338 | struct chp_link *link, int event) | ||
1339 | { | ||
1340 | int mask; | ||
1341 | |||
1342 | mask = chp_ssd_get_mask(&sch->ssd_info, link); | ||
1343 | if (!mask) | ||
1344 | return 0; | ||
1345 | switch (event) { | ||
1346 | case CHP_VARY_OFF: | ||
1347 | sch->opm &= ~mask; | ||
1348 | sch->lpm &= ~mask; | ||
1349 | io_subchannel_terminate_path(sch, mask); | ||
1350 | break; | ||
1351 | case CHP_VARY_ON: | ||
1352 | sch->opm |= mask; | ||
1353 | sch->lpm |= mask; | ||
1354 | io_subchannel_verify(sch); | ||
1355 | break; | ||
1356 | case CHP_OFFLINE: | ||
1357 | if (stsch(sch->schid, &sch->schib)) | ||
1358 | return -ENXIO; | ||
1359 | if (!css_sch_is_valid(&sch->schib)) | ||
1360 | return -ENODEV; | ||
1361 | io_subchannel_terminate_path(sch, mask); | ||
1362 | break; | ||
1363 | case CHP_ONLINE: | ||
1364 | if (stsch(sch->schid, &sch->schib)) | ||
1365 | return -ENXIO; | ||
1366 | sch->lpm |= mask & sch->opm; | ||
1367 | io_subchannel_verify(sch); | ||
1368 | break; | ||
1369 | } | ||
1370 | return 0; | ||
1371 | } | ||
1372 | |||
1259 | static void | 1373 | static void |
1260 | io_subchannel_shutdown(struct subchannel *sch) | 1374 | io_subchannel_shutdown(struct subchannel *sch) |
1261 | { | 1375 | { |
@@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch) | |||
1285 | cio_disable_subchannel(sch); | 1399 | cio_disable_subchannel(sch); |
1286 | } | 1400 | } |
1287 | 1401 | ||
1402 | static int io_subchannel_get_status(struct subchannel *sch) | ||
1403 | { | ||
1404 | struct schib schib; | ||
1405 | |||
1406 | if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) | ||
1407 | return CIO_GONE; | ||
1408 | if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) | ||
1409 | return CIO_REVALIDATE; | ||
1410 | if (!sch->lpm) | ||
1411 | return CIO_NO_PATH; | ||
1412 | return CIO_OPER; | ||
1413 | } | ||
1414 | |||
1415 | static int device_is_disconnected(struct ccw_device *cdev) | ||
1416 | { | ||
1417 | if (!cdev) | ||
1418 | return 0; | ||
1419 | return (cdev->private->state == DEV_STATE_DISCONNECTED || | ||
1420 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); | ||
1421 | } | ||
1422 | |||
1423 | static int recovery_check(struct device *dev, void *data) | ||
1424 | { | ||
1425 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1426 | int *redo = data; | ||
1427 | |||
1428 | spin_lock_irq(cdev->ccwlock); | ||
1429 | switch (cdev->private->state) { | ||
1430 | case DEV_STATE_DISCONNECTED: | ||
1431 | CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", | ||
1432 | cdev->private->dev_id.ssid, | ||
1433 | cdev->private->dev_id.devno); | ||
1434 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
1435 | *redo = 1; | ||
1436 | break; | ||
1437 | case DEV_STATE_DISCONNECTED_SENSE_ID: | ||
1438 | *redo = 1; | ||
1439 | break; | ||
1440 | } | ||
1441 | spin_unlock_irq(cdev->ccwlock); | ||
1442 | |||
1443 | return 0; | ||
1444 | } | ||
1445 | |||
1446 | static void recovery_work_func(struct work_struct *unused) | ||
1447 | { | ||
1448 | int redo = 0; | ||
1449 | |||
1450 | bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); | ||
1451 | if (redo) { | ||
1452 | spin_lock_irq(&recovery_lock); | ||
1453 | if (!timer_pending(&recovery_timer)) { | ||
1454 | if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) | ||
1455 | recovery_phase++; | ||
1456 | mod_timer(&recovery_timer, jiffies + | ||
1457 | recovery_delay[recovery_phase] * HZ); | ||
1458 | } | ||
1459 | spin_unlock_irq(&recovery_lock); | ||
1460 | } else | ||
1461 | CIO_MSG_EVENT(4, "recovery: end\n"); | ||
1462 | } | ||
1463 | |||
1464 | static DECLARE_WORK(recovery_work, recovery_work_func); | ||
1465 | |||
1466 | static void recovery_func(unsigned long data) | ||
1467 | { | ||
1468 | /* | ||
1469 | * We can't do our recovery in softirq context and it's not | ||
1470 | * performance critical, so we schedule it. | ||
1471 | */ | ||
1472 | schedule_work(&recovery_work); | ||
1473 | } | ||
1474 | |||
1475 | static void ccw_device_schedule_recovery(void) | ||
1476 | { | ||
1477 | unsigned long flags; | ||
1478 | |||
1479 | CIO_MSG_EVENT(4, "recovery: schedule\n"); | ||
1480 | spin_lock_irqsave(&recovery_lock, flags); | ||
1481 | if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { | ||
1482 | recovery_phase = 0; | ||
1483 | mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); | ||
1484 | } | ||
1485 | spin_unlock_irqrestore(&recovery_lock, flags); | ||
1486 | } | ||
1487 | |||
1488 | static void device_set_disconnected(struct ccw_device *cdev) | ||
1489 | { | ||
1490 | if (!cdev) | ||
1491 | return; | ||
1492 | ccw_device_set_timeout(cdev, 0); | ||
1493 | cdev->private->flags.fake_irb = 0; | ||
1494 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
1495 | if (cdev->online) | ||
1496 | ccw_device_schedule_recovery(); | ||
1497 | } | ||
1498 | |||
1499 | static int io_subchannel_sch_event(struct subchannel *sch, int slow) | ||
1500 | { | ||
1501 | int event, ret, disc; | ||
1502 | unsigned long flags; | ||
1503 | enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; | ||
1504 | struct ccw_device *cdev; | ||
1505 | |||
1506 | spin_lock_irqsave(sch->lock, flags); | ||
1507 | cdev = sch_get_cdev(sch); | ||
1508 | disc = device_is_disconnected(cdev); | ||
1509 | if (disc && slow) { | ||
1510 | /* Disconnected devices are evaluated directly only.*/ | ||
1511 | spin_unlock_irqrestore(sch->lock, flags); | ||
1512 | return 0; | ||
1513 | } | ||
1514 | /* No interrupt after machine check - kill pending timers. */ | ||
1515 | if (cdev) | ||
1516 | ccw_device_set_timeout(cdev, 0); | ||
1517 | if (!disc && !slow) { | ||
1518 | /* Non-disconnected devices are evaluated on the slow path. */ | ||
1519 | spin_unlock_irqrestore(sch->lock, flags); | ||
1520 | return -EAGAIN; | ||
1521 | } | ||
1522 | event = io_subchannel_get_status(sch); | ||
1523 | CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", | ||
1524 | sch->schid.ssid, sch->schid.sch_no, event, | ||
1525 | disc ? "disconnected" : "normal", | ||
1526 | slow ? "slow" : "fast"); | ||
1527 | /* Analyze subchannel status. */ | ||
1528 | action = NONE; | ||
1529 | switch (event) { | ||
1530 | case CIO_NO_PATH: | ||
1531 | if (disc) { | ||
1532 | /* Check if paths have become available. */ | ||
1533 | action = REPROBE; | ||
1534 | break; | ||
1535 | } | ||
1536 | /* fall through */ | ||
1537 | case CIO_GONE: | ||
1538 | /* Prevent unwanted effects when opening lock. */ | ||
1539 | cio_disable_subchannel(sch); | ||
1540 | device_set_disconnected(cdev); | ||
1541 | /* Ask driver what to do with device. */ | ||
1542 | action = UNREGISTER; | ||
1543 | spin_unlock_irqrestore(sch->lock, flags); | ||
1544 | ret = io_subchannel_notify(sch, event); | ||
1545 | spin_lock_irqsave(sch->lock, flags); | ||
1546 | if (ret) | ||
1547 | action = NONE; | ||
1548 | break; | ||
1549 | case CIO_REVALIDATE: | ||
1550 | /* Device will be removed, so no notify necessary. */ | ||
1551 | if (disc) | ||
1552 | /* Reprobe because immediate unregister might block. */ | ||
1553 | action = REPROBE; | ||
1554 | else | ||
1555 | action = UNREGISTER_PROBE; | ||
1556 | break; | ||
1557 | case CIO_OPER: | ||
1558 | if (disc) | ||
1559 | /* Get device operational again. */ | ||
1560 | action = REPROBE; | ||
1561 | break; | ||
1562 | } | ||
1563 | /* Perform action. */ | ||
1564 | ret = 0; | ||
1565 | switch (action) { | ||
1566 | case UNREGISTER: | ||
1567 | case UNREGISTER_PROBE: | ||
1568 | /* Unregister device (will use subchannel lock). */ | ||
1569 | spin_unlock_irqrestore(sch->lock, flags); | ||
1570 | css_sch_device_unregister(sch); | ||
1571 | spin_lock_irqsave(sch->lock, flags); | ||
1572 | |||
1573 | /* Reset intparm to zeroes. */ | ||
1574 | sch->schib.pmcw.intparm = 0; | ||
1575 | cio_modify(sch); | ||
1576 | break; | ||
1577 | case REPROBE: | ||
1578 | ccw_device_trigger_reprobe(cdev); | ||
1579 | break; | ||
1580 | default: | ||
1581 | break; | ||
1582 | } | ||
1583 | spin_unlock_irqrestore(sch->lock, flags); | ||
1584 | /* Probe if necessary. */ | ||
1585 | if (action == UNREGISTER_PROBE) | ||
1586 | ret = css_probe_device(sch->schid); | ||
1587 | |||
1588 | return ret; | ||
1589 | } | ||
1590 | |||
1288 | #ifdef CONFIG_CCW_CONSOLE | 1591 | #ifdef CONFIG_CCW_CONSOLE |
1289 | static struct ccw_device console_cdev; | 1592 | static struct ccw_device console_cdev; |
1290 | static struct ccw_device_private console_private; | 1593 | static struct ccw_device_private console_private; |
@@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void) | |||
1297 | return &ccw_console_lock; | 1600 | return &ccw_console_lock; |
1298 | } | 1601 | } |
1299 | 1602 | ||
1300 | static int | 1603 | static int ccw_device_console_enable(struct ccw_device *cdev, |
1301 | ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) | 1604 | struct subchannel *sch) |
1302 | { | 1605 | { |
1303 | int rc; | 1606 | int rc; |
1304 | 1607 | ||
1305 | /* Attach subchannel private data. */ | 1608 | /* Attach subchannel private data. */ |
1306 | sch->private = cio_get_console_priv(); | 1609 | sch->private = cio_get_console_priv(); |
1307 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); | 1610 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); |
1611 | io_subchannel_init_fields(sch); | ||
1612 | sch->driver = &io_subchannel_driver; | ||
1308 | /* Initialize the ccw_device structure. */ | 1613 | /* Initialize the ccw_device structure. */ |
1309 | cdev->dev.parent= &sch->dev; | 1614 | cdev->dev.parent= &sch->dev; |
1310 | rc = io_subchannel_recog(cdev, sch); | 1615 | rc = io_subchannel_recog(cdev, sch); |
@@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev) | |||
1515 | return sch->schid; | 1820 | return sch->schid; |
1516 | } | 1821 | } |
1517 | 1822 | ||
1518 | static int recovery_check(struct device *dev, void *data) | ||
1519 | { | ||
1520 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1521 | int *redo = data; | ||
1522 | |||
1523 | spin_lock_irq(cdev->ccwlock); | ||
1524 | switch (cdev->private->state) { | ||
1525 | case DEV_STATE_DISCONNECTED: | ||
1526 | CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n", | ||
1527 | cdev->private->dev_id.ssid, | ||
1528 | cdev->private->dev_id.devno); | ||
1529 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
1530 | *redo = 1; | ||
1531 | break; | ||
1532 | case DEV_STATE_DISCONNECTED_SENSE_ID: | ||
1533 | *redo = 1; | ||
1534 | break; | ||
1535 | } | ||
1536 | spin_unlock_irq(cdev->ccwlock); | ||
1537 | |||
1538 | return 0; | ||
1539 | } | ||
1540 | |||
1541 | static void recovery_work_func(struct work_struct *unused) | ||
1542 | { | ||
1543 | int redo = 0; | ||
1544 | |||
1545 | bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); | ||
1546 | if (redo) { | ||
1547 | spin_lock_irq(&recovery_lock); | ||
1548 | if (!timer_pending(&recovery_timer)) { | ||
1549 | if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) | ||
1550 | recovery_phase++; | ||
1551 | mod_timer(&recovery_timer, jiffies + | ||
1552 | recovery_delay[recovery_phase] * HZ); | ||
1553 | } | ||
1554 | spin_unlock_irq(&recovery_lock); | ||
1555 | } else | ||
1556 | CIO_MSG_EVENT(4, "recovery: end\n"); | ||
1557 | } | ||
1558 | |||
1559 | static DECLARE_WORK(recovery_work, recovery_work_func); | ||
1560 | |||
1561 | static void recovery_func(unsigned long data) | ||
1562 | { | ||
1563 | /* | ||
1564 | * We can't do our recovery in softirq context and it's not | ||
1565 | * performance critical, so we schedule it. | ||
1566 | */ | ||
1567 | schedule_work(&recovery_work); | ||
1568 | } | ||
1569 | |||
1570 | void ccw_device_schedule_recovery(void) | ||
1571 | { | ||
1572 | unsigned long flags; | ||
1573 | |||
1574 | CIO_MSG_EVENT(4, "recovery: schedule\n"); | ||
1575 | spin_lock_irqsave(&recovery_lock, flags); | ||
1576 | if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { | ||
1577 | recovery_phase = 0; | ||
1578 | mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); | ||
1579 | } | ||
1580 | spin_unlock_irqrestore(&recovery_lock, flags); | ||
1581 | } | ||
1582 | |||
1583 | MODULE_LICENSE("GPL"); | 1823 | MODULE_LICENSE("GPL"); |
1584 | EXPORT_SYMBOL(ccw_device_set_online); | 1824 | EXPORT_SYMBOL(ccw_device_set_online); |
1585 | EXPORT_SYMBOL(ccw_device_set_offline); | 1825 | EXPORT_SYMBOL(ccw_device_set_offline); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index cb08092be39f..9800a8335a3f 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *); | |||
88 | int ccw_device_online(struct ccw_device *); | 88 | int ccw_device_online(struct ccw_device *); |
89 | int ccw_device_offline(struct ccw_device *); | 89 | int ccw_device_offline(struct ccw_device *); |
90 | 90 | ||
91 | void ccw_device_schedule_recovery(void); | ||
92 | |||
93 | /* Function prototypes for device status and basic sense stuff. */ | 91 | /* Function prototypes for device status and basic sense stuff. */ |
94 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); | 92 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); |
95 | void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); | 93 | void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); |
@@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *); | |||
118 | 116 | ||
119 | int ccw_device_stlck(struct ccw_device *); | 117 | int ccw_device_stlck(struct ccw_device *); |
120 | 118 | ||
119 | /* Helper function for machine check handling. */ | ||
120 | void ccw_device_trigger_reprobe(struct ccw_device *); | ||
121 | void ccw_device_kill_io(struct ccw_device *); | ||
122 | int ccw_device_notify(struct ccw_device *, int); | ||
123 | |||
121 | /* qdio needs this. */ | 124 | /* qdio needs this. */ |
122 | void ccw_device_set_timeout(struct ccw_device *, int); | 125 | void ccw_device_set_timeout(struct ccw_device *, int); |
123 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); | 126 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index e268d5a77c12..8b5fe57fb2f3 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -2,8 +2,7 @@ | |||
2 | * drivers/s390/cio/device_fsm.c | 2 | * drivers/s390/cio/device_fsm.c |
3 | * finite state machine for device handling | 3 | * finite state machine for device handling |
4 | * | 4 | * |
5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 2002,2008 |
6 | * IBM Corporation | ||
7 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) | 6 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
9 | */ | 8 | */ |
@@ -27,65 +26,6 @@ | |||
27 | 26 | ||
28 | static int timeout_log_enabled; | 27 | static int timeout_log_enabled; |
29 | 28 | ||
30 | int | ||
31 | device_is_online(struct subchannel *sch) | ||
32 | { | ||
33 | struct ccw_device *cdev; | ||
34 | |||
35 | cdev = sch_get_cdev(sch); | ||
36 | if (!cdev) | ||
37 | return 0; | ||
38 | return (cdev->private->state == DEV_STATE_ONLINE); | ||
39 | } | ||
40 | |||
41 | int | ||
42 | device_is_disconnected(struct subchannel *sch) | ||
43 | { | ||
44 | struct ccw_device *cdev; | ||
45 | |||
46 | cdev = sch_get_cdev(sch); | ||
47 | if (!cdev) | ||
48 | return 0; | ||
49 | return (cdev->private->state == DEV_STATE_DISCONNECTED || | ||
50 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); | ||
51 | } | ||
52 | |||
53 | void | ||
54 | device_set_disconnected(struct subchannel *sch) | ||
55 | { | ||
56 | struct ccw_device *cdev; | ||
57 | |||
58 | cdev = sch_get_cdev(sch); | ||
59 | if (!cdev) | ||
60 | return; | ||
61 | ccw_device_set_timeout(cdev, 0); | ||
62 | cdev->private->flags.fake_irb = 0; | ||
63 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
64 | if (cdev->online) | ||
65 | ccw_device_schedule_recovery(); | ||
66 | } | ||
67 | |||
68 | void device_set_intretry(struct subchannel *sch) | ||
69 | { | ||
70 | struct ccw_device *cdev; | ||
71 | |||
72 | cdev = sch_get_cdev(sch); | ||
73 | if (!cdev) | ||
74 | return; | ||
75 | cdev->private->flags.intretry = 1; | ||
76 | } | ||
77 | |||
78 | int device_trigger_verify(struct subchannel *sch) | ||
79 | { | ||
80 | struct ccw_device *cdev; | ||
81 | |||
82 | cdev = sch_get_cdev(sch); | ||
83 | if (!cdev || !cdev->online) | ||
84 | return -EINVAL; | ||
85 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int __init ccw_timeout_log_setup(char *unused) | 29 | static int __init ccw_timeout_log_setup(char *unused) |
90 | { | 30 | { |
91 | timeout_log_enabled = 1; | 31 | timeout_log_enabled = 1; |
@@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev) | |||
99 | struct schib schib; | 39 | struct schib schib; |
100 | struct subchannel *sch; | 40 | struct subchannel *sch; |
101 | struct io_subchannel_private *private; | 41 | struct io_subchannel_private *private; |
42 | union orb *orb; | ||
102 | int cc; | 43 | int cc; |
103 | 44 | ||
104 | sch = to_subchannel(cdev->dev.parent); | 45 | sch = to_subchannel(cdev->dev.parent); |
105 | private = to_io_private(sch); | 46 | private = to_io_private(sch); |
47 | orb = &private->orb; | ||
106 | cc = stsch(sch->schid, &schib); | 48 | cc = stsch(sch->schid, &schib); |
107 | 49 | ||
108 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " | 50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " |
109 | "device information:\n", get_clock()); | 51 | "device information:\n", get_clock()); |
110 | printk(KERN_WARNING "cio: orb:\n"); | 52 | printk(KERN_WARNING "cio: orb:\n"); |
111 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | 53 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, |
112 | &private->orb, sizeof(private->orb), 0); | 54 | orb, sizeof(*orb), 0); |
113 | printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); | 55 | printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); |
114 | printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); | 56 | printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); |
115 | printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " | 57 | printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " |
116 | "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); | 58 | "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); |
117 | 59 | ||
118 | if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || | 60 | if (orb->tm.b) { |
119 | (void *)(addr_t)private->orb.cpa == cdev->private->iccws) | 61 | printk(KERN_WARNING "cio: orb indicates transport mode\n"); |
120 | printk(KERN_WARNING "cio: last channel program (intern):\n"); | 62 | printk(KERN_WARNING "cio: last tcw:\n"); |
121 | else | 63 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, |
122 | printk(KERN_WARNING "cio: last channel program:\n"); | 64 | (void *)(addr_t)orb->tm.tcw, |
123 | 65 | sizeof(struct tcw), 0); | |
124 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | 66 | } else { |
125 | (void *)(addr_t)private->orb.cpa, | 67 | printk(KERN_WARNING "cio: orb indicates command mode\n"); |
126 | sizeof(struct ccw1), 0); | 68 | if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || |
69 | (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) | ||
70 | printk(KERN_WARNING "cio: last channel program " | ||
71 | "(intern):\n"); | ||
72 | else | ||
73 | printk(KERN_WARNING "cio: last channel program:\n"); | ||
74 | |||
75 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | ||
76 | (void *)(addr_t)orb->cmd.cpa, | ||
77 | sizeof(struct ccw1), 0); | ||
78 | } | ||
127 | printk(KERN_WARNING "cio: ccw device state: %d\n", | 79 | printk(KERN_WARNING "cio: ccw device state: %d\n", |
128 | cdev->private->state); | 80 | cdev->private->state); |
129 | printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); | 81 | printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); |
@@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires) | |||
171 | add_timer(&cdev->private->timer); | 123 | add_timer(&cdev->private->timer); |
172 | } | 124 | } |
173 | 125 | ||
174 | /* Kill any pending timers after machine check. */ | ||
175 | void | ||
176 | device_kill_pending_timer(struct subchannel *sch) | ||
177 | { | ||
178 | struct ccw_device *cdev; | ||
179 | |||
180 | cdev = sch_get_cdev(sch); | ||
181 | if (!cdev) | ||
182 | return; | ||
183 | ccw_device_set_timeout(cdev, 0); | ||
184 | } | ||
185 | |||
186 | /* | 126 | /* |
187 | * Cancel running i/o. This is called repeatedly since halt/clear are | 127 | * Cancel running i/o. This is called repeatedly since halt/clear are |
188 | * asynchronous operations. We do one try with cio_cancel, two tries | 128 | * asynchronous operations. We do one try with cio_cancel, two tries |
@@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
205 | /* Not operational -> done. */ | 145 | /* Not operational -> done. */ |
206 | return 0; | 146 | return 0; |
207 | /* Stage 1: cancel io. */ | 147 | /* Stage 1: cancel io. */ |
208 | if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && | 148 | if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && |
209 | !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { | 149 | !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { |
210 | ret = cio_cancel(sch); | 150 | if (!scsw_is_tm(&sch->schib.scsw)) { |
211 | if (ret != -EINVAL) | 151 | ret = cio_cancel(sch); |
212 | return ret; | 152 | if (ret != -EINVAL) |
213 | /* cancel io unsuccessful. From now on it is asynchronous. */ | 153 | return ret; |
154 | } | ||
155 | /* cancel io unsuccessful or not applicable (transport mode). | ||
156 | * Continue with asynchronous instructions. */ | ||
214 | cdev->private->iretry = 3; /* 3 halt retries. */ | 157 | cdev->private->iretry = 3; /* 3 halt retries. */ |
215 | } | 158 | } |
216 | if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { | 159 | if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { |
217 | /* Stage 2: halt io. */ | 160 | /* Stage 2: halt io. */ |
218 | if (cdev->private->iretry) { | 161 | if (cdev->private->iretry) { |
219 | cdev->private->iretry--; | 162 | cdev->private->iretry--; |
@@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) | |||
388 | } | 331 | } |
389 | } | 332 | } |
390 | 333 | ||
334 | int ccw_device_notify(struct ccw_device *cdev, int event) | ||
335 | { | ||
336 | if (!cdev->drv) | ||
337 | return 0; | ||
338 | if (!cdev->online) | ||
339 | return 0; | ||
340 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | ||
341 | } | ||
342 | |||
391 | static void | 343 | static void |
392 | ccw_device_oper_notify(struct work_struct *work) | 344 | ccw_device_oper_notify(struct work_struct *work) |
393 | { | 345 | { |
394 | struct ccw_device_private *priv; | 346 | struct ccw_device_private *priv; |
395 | struct ccw_device *cdev; | 347 | struct ccw_device *cdev; |
396 | struct subchannel *sch; | ||
397 | int ret; | 348 | int ret; |
398 | unsigned long flags; | ||
399 | 349 | ||
400 | priv = container_of(work, struct ccw_device_private, kick_work); | 350 | priv = container_of(work, struct ccw_device_private, kick_work); |
401 | cdev = priv->cdev; | 351 | cdev = priv->cdev; |
402 | spin_lock_irqsave(cdev->ccwlock, flags); | 352 | ret = ccw_device_notify(cdev, CIO_OPER); |
403 | sch = to_subchannel(cdev->dev.parent); | ||
404 | if (sch->driver && sch->driver->notify) { | ||
405 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
406 | ret = sch->driver->notify(sch, CIO_OPER); | ||
407 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
408 | } else | ||
409 | ret = 0; | ||
410 | if (ret) { | 353 | if (ret) { |
411 | /* Reenable channel measurements, if needed. */ | 354 | /* Reenable channel measurements, if needed. */ |
412 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
413 | cmf_reenable(cdev); | 355 | cmf_reenable(cdev); |
414 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
415 | wake_up(&cdev->private->wait_q); | 356 | wake_up(&cdev->private->wait_q); |
416 | } | 357 | } else |
417 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
418 | if (!ret) | ||
419 | /* Driver doesn't want device back. */ | 358 | /* Driver doesn't want device back. */ |
420 | ccw_device_do_unreg_rereg(work); | 359 | ccw_device_do_unreg_rereg(work); |
421 | } | 360 | } |
@@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
621 | /* Deliver fake irb to device driver, if needed. */ | 560 | /* Deliver fake irb to device driver, if needed. */ |
622 | if (cdev->private->flags.fake_irb) { | 561 | if (cdev->private->flags.fake_irb) { |
623 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 562 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
624 | cdev->private->irb.scsw.cc = 1; | 563 | cdev->private->irb.scsw.cmd.cc = 1; |
625 | cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; | 564 | cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; |
626 | cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; | 565 | cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; |
627 | cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; | 566 | cdev->private->irb.scsw.cmd.stctl = |
567 | SCSW_STCTL_STATUS_PEND; | ||
628 | cdev->private->flags.fake_irb = 0; | 568 | cdev->private->flags.fake_irb = 0; |
629 | if (cdev->handler) | 569 | if (cdev->handler) |
630 | cdev->handler(cdev, cdev->private->intparm, | 570 | cdev->handler(cdev, cdev->private->intparm, |
@@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev) | |||
718 | sch = to_subchannel(cdev->dev.parent); | 658 | sch = to_subchannel(cdev->dev.parent); |
719 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) | 659 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) |
720 | return -ENODEV; | 660 | return -ENODEV; |
721 | if (cdev->private->state != DEV_STATE_ONLINE) { | 661 | if (scsw_actl(&sch->schib.scsw) != 0) |
722 | if (sch->schib.scsw.actl != 0) | ||
723 | return -EBUSY; | ||
724 | return -EINVAL; | ||
725 | } | ||
726 | if (sch->schib.scsw.actl != 0) | ||
727 | return -EBUSY; | 662 | return -EBUSY; |
663 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
664 | return -EINVAL; | ||
728 | /* Are we doing path grouping? */ | 665 | /* Are we doing path grouping? */ |
729 | if (!cdev->private->options.pgroup) { | 666 | if (!cdev->private->options.pgroup) { |
730 | /* No, set state offline immediately. */ | 667 | /* No, set state offline immediately. */ |
@@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
799 | */ | 736 | */ |
800 | stsch(sch->schid, &sch->schib); | 737 | stsch(sch->schid, &sch->schib); |
801 | 738 | ||
802 | if (sch->schib.scsw.actl != 0 || | 739 | if (scsw_actl(&sch->schib.scsw) != 0 || |
803 | (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || | 740 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || |
804 | (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { | 741 | (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { |
805 | /* | 742 | /* |
806 | * No final status yet or final status not yet delivered | 743 | * No final status yet or final status not yet delivered |
807 | * to the device driver. Can't do path verfication now, | 744 | * to the device driver. Can't do path verfication now, |
@@ -823,13 +760,13 @@ static void | |||
823 | ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | 760 | ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) |
824 | { | 761 | { |
825 | struct irb *irb; | 762 | struct irb *irb; |
763 | int is_cmd; | ||
826 | 764 | ||
827 | irb = (struct irb *) __LC_IRB; | 765 | irb = (struct irb *) __LC_IRB; |
766 | is_cmd = !scsw_is_tm(&irb->scsw); | ||
828 | /* Check for unsolicited interrupt. */ | 767 | /* Check for unsolicited interrupt. */ |
829 | if ((irb->scsw.stctl == | 768 | if (!scsw_is_solicited(&irb->scsw)) { |
830 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) | 769 | if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
831 | && (!irb->scsw.cc)) { | ||
832 | if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | ||
833 | !irb->esw.esw0.erw.cons) { | 770 | !irb->esw.esw0.erw.cons) { |
834 | /* Unit check but no sense data. Need basic sense. */ | 771 | /* Unit check but no sense data. Need basic sense. */ |
835 | if (ccw_device_do_sense(cdev, irb) != 0) | 772 | if (ccw_device_do_sense(cdev, irb) != 0) |
@@ -848,7 +785,7 @@ call_handler_unsol: | |||
848 | } | 785 | } |
849 | /* Accumulate status and find out if a basic sense is needed. */ | 786 | /* Accumulate status and find out if a basic sense is needed. */ |
850 | ccw_device_accumulate_irb(cdev, irb); | 787 | ccw_device_accumulate_irb(cdev, irb); |
851 | if (cdev->private->flags.dosense) { | 788 | if (is_cmd && cdev->private->flags.dosense) { |
852 | if (ccw_device_do_sense(cdev, irb) == 0) { | 789 | if (ccw_device_do_sense(cdev, irb) == 0) { |
853 | cdev->private->state = DEV_STATE_W4SENSE; | 790 | cdev->private->state = DEV_STATE_W4SENSE; |
854 | } | 791 | } |
@@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
892 | 829 | ||
893 | irb = (struct irb *) __LC_IRB; | 830 | irb = (struct irb *) __LC_IRB; |
894 | /* Check for unsolicited interrupt. */ | 831 | /* Check for unsolicited interrupt. */ |
895 | if (irb->scsw.stctl == | 832 | if (scsw_stctl(&irb->scsw) == |
896 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 833 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
897 | if (irb->scsw.cc == 1) | 834 | if (scsw_cc(&irb->scsw) == 1) |
898 | /* Basic sense hasn't started. Try again. */ | 835 | /* Basic sense hasn't started. Try again. */ |
899 | ccw_device_do_sense(cdev, irb); | 836 | ccw_device_do_sense(cdev, irb); |
900 | else { | 837 | else { |
@@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
912 | * only deliver the halt/clear interrupt to the device driver as if it | 849 | * only deliver the halt/clear interrupt to the device driver as if it |
913 | * had killed the original request. | 850 | * had killed the original request. |
914 | */ | 851 | */ |
915 | if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { | 852 | if (scsw_fctl(&irb->scsw) & |
853 | (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { | ||
916 | /* Retry Basic Sense if requested. */ | 854 | /* Retry Basic Sense if requested. */ |
917 | if (cdev->private->flags.intretry) { | 855 | if (cdev->private->flags.intretry) { |
918 | cdev->private->flags.intretry = 0; | 856 | cdev->private->flags.intretry = 0; |
@@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
986 | ERR_PTR(-EIO)); | 924 | ERR_PTR(-EIO)); |
987 | } | 925 | } |
988 | 926 | ||
989 | void device_kill_io(struct subchannel *sch) | 927 | void ccw_device_kill_io(struct ccw_device *cdev) |
990 | { | 928 | { |
991 | int ret; | 929 | int ret; |
992 | struct ccw_device *cdev; | ||
993 | 930 | ||
994 | cdev = sch_get_cdev(sch); | ||
995 | ret = ccw_device_cancel_halt_clear(cdev); | 931 | ret = ccw_device_cancel_halt_clear(cdev); |
996 | if (ret == -EBUSY) { | 932 | if (ret == -EBUSY) { |
997 | ccw_device_set_timeout(cdev, 3*HZ); | 933 | ccw_device_set_timeout(cdev, 3*HZ); |
@@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) | |||
1021 | case DEV_EVENT_INTERRUPT: | 957 | case DEV_EVENT_INTERRUPT: |
1022 | irb = (struct irb *) __LC_IRB; | 958 | irb = (struct irb *) __LC_IRB; |
1023 | /* Check for unsolicited interrupt. */ | 959 | /* Check for unsolicited interrupt. */ |
1024 | if ((irb->scsw.stctl == | 960 | if ((scsw_stctl(&irb->scsw) == |
1025 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && | 961 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && |
1026 | (!irb->scsw.cc)) | 962 | (!scsw_cc(&irb->scsw))) |
1027 | /* FIXME: we should restart stlck here, but this | 963 | /* FIXME: we should restart stlck here, but this |
1028 | * is extremely unlikely ... */ | 964 | * is extremely unlikely ... */ |
1029 | goto out_wakeup; | 965 | goto out_wakeup; |
@@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) | |||
1055 | ccw_device_sense_id_start(cdev); | 991 | ccw_device_sense_id_start(cdev); |
1056 | } | 992 | } |
1057 | 993 | ||
1058 | void | 994 | void ccw_device_trigger_reprobe(struct ccw_device *cdev) |
1059 | device_trigger_reprobe(struct subchannel *sch) | ||
1060 | { | 995 | { |
1061 | struct ccw_device *cdev; | 996 | struct subchannel *sch; |
1062 | 997 | ||
1063 | cdev = sch_get_cdev(sch); | ||
1064 | if (!cdev) | ||
1065 | return; | ||
1066 | if (cdev->private->state != DEV_STATE_DISCONNECTED) | 998 | if (cdev->private->state != DEV_STATE_DISCONNECTED) |
1067 | return; | 999 | return; |
1068 | 1000 | ||
1001 | sch = to_subchannel(cdev->dev.parent); | ||
1069 | /* Update some values. */ | 1002 | /* Update some values. */ |
1070 | if (stsch(sch->schid, &sch->schib)) | 1003 | if (stsch(sch->schid, &sch->schib)) |
1071 | return; | 1004 | return; |
@@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch) | |||
1081 | sch->schib.pmcw.ena = 0; | 1014 | sch->schib.pmcw.ena = 0; |
1082 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 1015 | if ((sch->lpm & (sch->lpm - 1)) != 0) |
1083 | sch->schib.pmcw.mp = 1; | 1016 | sch->schib.pmcw.mp = 1; |
1084 | sch->schib.pmcw.intparm = (u32)(addr_t)sch; | ||
1085 | /* We should also udate ssd info, but this has to wait. */ | 1017 | /* We should also udate ssd info, but this has to wait. */ |
1086 | /* Check if this is another device which appeared on the same sch. */ | 1018 | /* Check if this is another device which appeared on the same sch. */ |
1087 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1019 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index cba7020517ed..1bdaa614e34f 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c | |||
@@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
196 | irb = &cdev->private->irb; | 196 | irb = &cdev->private->irb; |
197 | 197 | ||
198 | /* Check the error cases. */ | 198 | /* Check the error cases. */ |
199 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 199 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
200 | /* Retry Sense ID if requested. */ | 200 | /* Retry Sense ID if requested. */ |
201 | if (cdev->private->flags.intretry) { | 201 | if (cdev->private->flags.intretry) { |
202 | cdev->private->flags.intretry = 0; | 202 | cdev->private->flags.intretry = 0; |
@@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
234 | irb->ecw[6], irb->ecw[7]); | 234 | irb->ecw[6], irb->ecw[7]); |
235 | return -EAGAIN; | 235 | return -EAGAIN; |
236 | } | 236 | } |
237 | if (irb->scsw.cc == 3) { | 237 | if (irb->scsw.cmd.cc == 3) { |
238 | u8 lpm; | 238 | u8 lpm; |
239 | 239 | ||
240 | lpm = to_io_private(sch)->orb.lpm; | 240 | lpm = to_io_private(sch)->orb.cmd.lpm; |
241 | if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) | 241 | if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) |
242 | CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " | 242 | CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " |
243 | "on subchannel 0.%x.%04x is " | 243 | "on subchannel 0.%x.%04x is " |
@@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
248 | } | 248 | } |
249 | 249 | ||
250 | /* Did we get a proper answer ? */ | 250 | /* Did we get a proper answer ? */ |
251 | if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && | 251 | if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && |
252 | cdev->private->senseid.reserved == 0xFF) { | 252 | cdev->private->senseid.reserved == 0xFF) { |
253 | if (irb->scsw.count < sizeof(struct senseid) - 8) | 253 | if (irb->scsw.cmd.count < sizeof(struct senseid) - 8) |
254 | cdev->private->flags.esid = 1; | 254 | cdev->private->flags.esid = 1; |
255 | return 0; /* Success */ | 255 | return 0; /* Success */ |
256 | } | 256 | } |
@@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
260 | "subchannel 0.%x.%04x returns status %02X%02X\n", | 260 | "subchannel 0.%x.%04x returns status %02X%02X\n", |
261 | cdev->private->dev_id.devno, sch->schid.ssid, | 261 | cdev->private->dev_id.devno, sch->schid.ssid, |
262 | sch->schid.sch_no, | 262 | sch->schid.sch_no, |
263 | irb->scsw.dstat, irb->scsw.cstat); | 263 | irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); |
264 | return -EAGAIN; | 264 | return -EAGAIN; |
265 | } | 265 | } |
266 | 266 | ||
@@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
277 | sch = to_subchannel(cdev->dev.parent); | 277 | sch = to_subchannel(cdev->dev.parent); |
278 | irb = (struct irb *) __LC_IRB; | 278 | irb = (struct irb *) __LC_IRB; |
279 | /* Retry sense id, if needed. */ | 279 | /* Retry sense id, if needed. */ |
280 | if (irb->scsw.stctl == | 280 | if (irb->scsw.cmd.stctl == |
281 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 281 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
282 | if ((irb->scsw.cc == 1) || !irb->scsw.actl) { | 282 | if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) { |
283 | ret = __ccw_device_sense_id_start(cdev); | 283 | ret = __ccw_device_sense_id_start(cdev); |
284 | if (ret && ret != -EBUSY) | 284 | if (ret && ret != -EBUSY) |
285 | ccw_device_sense_id_done(cdev, ret); | 285 | ccw_device_sense_id_done(cdev, ret); |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index f308ad55a6d5..ee1a28310fbb 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/ccwdev.h> | 17 | #include <asm/ccwdev.h> |
18 | #include <asm/idals.h> | 18 | #include <asm/idals.h> |
19 | #include <asm/chpid.h> | 19 | #include <asm/chpid.h> |
20 | #include <asm/fcx.h> | ||
20 | 21 | ||
21 | #include "cio.h" | 22 | #include "cio.h" |
22 | #include "cio_debug.h" | 23 | #include "cio_debug.h" |
@@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
179 | return -EBUSY; | 180 | return -EBUSY; |
180 | } | 181 | } |
181 | if (cdev->private->state != DEV_STATE_ONLINE || | 182 | if (cdev->private->state != DEV_STATE_ONLINE || |
182 | ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && | 183 | ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && |
183 | !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || | 184 | !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) || |
184 | cdev->private->flags.doverify) | 185 | cdev->private->flags.doverify) |
185 | return -EBUSY; | 186 | return -EBUSY; |
186 | ret = cio_set_options (sch, flags); | 187 | ret = cio_set_options (sch, flags); |
@@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev) | |||
379 | if (cdev->private->state == DEV_STATE_NOT_OPER) | 380 | if (cdev->private->state == DEV_STATE_NOT_OPER) |
380 | return -ENODEV; | 381 | return -ENODEV; |
381 | if (cdev->private->state != DEV_STATE_ONLINE || | 382 | if (cdev->private->state != DEV_STATE_ONLINE || |
382 | !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) | 383 | !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) |
383 | return -EINVAL; | 384 | return -EINVAL; |
384 | return cio_resume(sch); | 385 | return cio_resume(sch); |
385 | } | 386 | } |
@@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev) | |||
404 | * - fast notification was requested (primary status) | 405 | * - fast notification was requested (primary status) |
405 | * - unsolicited interrupts | 406 | * - unsolicited interrupts |
406 | */ | 407 | */ |
407 | stctl = cdev->private->irb.scsw.stctl; | 408 | stctl = scsw_stctl(&cdev->private->irb.scsw); |
408 | ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || | 409 | ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || |
409 | (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || | 410 | (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || |
410 | (stctl == SCSW_STCTL_STATUS_PEND); | 411 | (stctl == SCSW_STCTL_STATUS_PEND); |
@@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev) | |||
528 | cio_disable_subchannel(sch); //FIXME: return code? | 529 | cio_disable_subchannel(sch); //FIXME: return code? |
529 | goto out_unlock; | 530 | goto out_unlock; |
530 | } | 531 | } |
531 | cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; | 532 | cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; |
532 | spin_unlock_irqrestore(sch->lock, flags); | 533 | spin_unlock_irqrestore(sch->lock, flags); |
533 | wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); | 534 | wait_event(cdev->private->wait_q, |
535 | cdev->private->irb.scsw.cmd.actl == 0); | ||
534 | spin_lock_irqsave(sch->lock, flags); | 536 | spin_lock_irqsave(sch->lock, flags); |
535 | cio_disable_subchannel(sch); //FIXME: return code? | 537 | cio_disable_subchannel(sch); //FIXME: return code? |
536 | if ((cdev->private->irb.scsw.dstat != | 538 | if ((cdev->private->irb.scsw.cmd.dstat != |
537 | (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || | 539 | (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || |
538 | (cdev->private->irb.scsw.cstat != 0)) | 540 | (cdev->private->irb.scsw.cmd.cstat != 0)) |
539 | ret = -EIO; | 541 | ret = -EIO; |
540 | /* Clear irb. */ | 542 | /* Clear irb. */ |
541 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 543 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
@@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) | |||
568 | } | 570 | } |
569 | EXPORT_SYMBOL(ccw_device_get_id); | 571 | EXPORT_SYMBOL(ccw_device_get_id); |
570 | 572 | ||
573 | /** | ||
574 | * ccw_device_tm_start_key - perform start function | ||
575 | * @cdev: ccw device on which to perform the start function | ||
576 | * @tcw: transport-command word to be started | ||
577 | * @intparm: user defined parameter to be passed to the interrupt handler | ||
578 | * @lpm: mask of paths to use | ||
579 | * @key: storage key to use for storage access | ||
580 | * | ||
581 | * Start the tcw on the given ccw device. Return zero on success, non-zero | ||
582 | * otherwise. | ||
583 | */ | ||
584 | int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, | ||
585 | unsigned long intparm, u8 lpm, u8 key) | ||
586 | { | ||
587 | struct subchannel *sch; | ||
588 | int rc; | ||
589 | |||
590 | sch = to_subchannel(cdev->dev.parent); | ||
591 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
592 | return -EIO; | ||
593 | /* Adjust requested path mask to excluded varied off paths. */ | ||
594 | if (lpm) { | ||
595 | lpm &= sch->opm; | ||
596 | if (lpm == 0) | ||
597 | return -EACCES; | ||
598 | } | ||
599 | rc = cio_tm_start_key(sch, tcw, lpm, key); | ||
600 | if (rc == 0) | ||
601 | cdev->private->intparm = intparm; | ||
602 | return rc; | ||
603 | } | ||
604 | EXPORT_SYMBOL(ccw_device_tm_start_key); | ||
605 | |||
606 | /** | ||
607 | * ccw_device_tm_start_timeout_key - perform start function | ||
608 | * @cdev: ccw device on which to perform the start function | ||
609 | * @tcw: transport-command word to be started | ||
610 | * @intparm: user defined parameter to be passed to the interrupt handler | ||
611 | * @lpm: mask of paths to use | ||
612 | * @key: storage key to use for storage access | ||
613 | * @expires: time span in jiffies after which to abort request | ||
614 | * | ||
615 | * Start the tcw on the given ccw device. Return zero on success, non-zero | ||
616 | * otherwise. | ||
617 | */ | ||
618 | int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, | ||
619 | unsigned long intparm, u8 lpm, u8 key, | ||
620 | int expires) | ||
621 | { | ||
622 | int ret; | ||
623 | |||
624 | ccw_device_set_timeout(cdev, expires); | ||
625 | ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); | ||
626 | if (ret != 0) | ||
627 | ccw_device_set_timeout(cdev, 0); | ||
628 | return ret; | ||
629 | } | ||
630 | EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); | ||
631 | |||
632 | /** | ||
633 | * ccw_device_tm_start - perform start function | ||
634 | * @cdev: ccw device on which to perform the start function | ||
635 | * @tcw: transport-command word to be started | ||
636 | * @intparm: user defined parameter to be passed to the interrupt handler | ||
637 | * @lpm: mask of paths to use | ||
638 | * | ||
639 | * Start the tcw on the given ccw device. Return zero on success, non-zero | ||
640 | * otherwise. | ||
641 | */ | ||
642 | int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw, | ||
643 | unsigned long intparm, u8 lpm) | ||
644 | { | ||
645 | return ccw_device_tm_start_key(cdev, tcw, intparm, lpm, | ||
646 | PAGE_DEFAULT_KEY); | ||
647 | } | ||
648 | EXPORT_SYMBOL(ccw_device_tm_start); | ||
649 | |||
650 | /** | ||
651 | * ccw_device_tm_start_timeout - perform start function | ||
652 | * @cdev: ccw device on which to perform the start function | ||
653 | * @tcw: transport-command word to be started | ||
654 | * @intparm: user defined parameter to be passed to the interrupt handler | ||
655 | * @lpm: mask of paths to use | ||
656 | * @expires: time span in jiffies after which to abort request | ||
657 | * | ||
658 | * Start the tcw on the given ccw device. Return zero on success, non-zero | ||
659 | * otherwise. | ||
660 | */ | ||
661 | int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw, | ||
662 | unsigned long intparm, u8 lpm, int expires) | ||
663 | { | ||
664 | return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, | ||
665 | PAGE_DEFAULT_KEY, expires); | ||
666 | } | ||
667 | EXPORT_SYMBOL(ccw_device_tm_start_timeout); | ||
668 | |||
669 | /** | ||
670 | * ccw_device_tm_intrg - perform interrogate function | ||
671 | * @cdev: ccw device on which to perform the interrogate function | ||
672 | * | ||
673 | * Perform an interrogate function on the given ccw device. Return zero on | ||
674 | * success, non-zero otherwise. | ||
675 | */ | ||
676 | int ccw_device_tm_intrg(struct ccw_device *cdev) | ||
677 | { | ||
678 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
679 | |||
680 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
681 | return -EIO; | ||
682 | if (!scsw_is_tm(&sch->schib.scsw) || | ||
683 | !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND)) | ||
684 | return -EINVAL; | ||
685 | return cio_tm_intrg(sch); | ||
686 | } | ||
687 | EXPORT_SYMBOL(ccw_device_tm_intrg); | ||
688 | |||
571 | // FIXME: these have to go: | 689 | // FIXME: these have to go: |
572 | 690 | ||
573 | int | 691 | int |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 5cf7be008e98..86bc94eb607f 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -28,13 +28,13 @@ | |||
28 | * Helper function called from interrupt context to decide whether an | 28 | * Helper function called from interrupt context to decide whether an |
29 | * operation should be tried again. | 29 | * operation should be tried again. |
30 | */ | 30 | */ |
31 | static int __ccw_device_should_retry(struct scsw *scsw) | 31 | static int __ccw_device_should_retry(union scsw *scsw) |
32 | { | 32 | { |
33 | /* CC is only valid if start function bit is set. */ | 33 | /* CC is only valid if start function bit is set. */ |
34 | if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) | 34 | if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1) |
35 | return 1; | 35 | return 1; |
36 | /* No more activity. For sense and set PGID we stubbornly try again. */ | 36 | /* No more activity. For sense and set PGID we stubbornly try again. */ |
37 | if (!scsw->actl) | 37 | if (!scsw->cmd.actl) |
38 | return 1; | 38 | return 1; |
39 | return 0; | 39 | return 0; |
40 | } | 40 | } |
@@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) | |||
125 | 125 | ||
126 | sch = to_subchannel(cdev->dev.parent); | 126 | sch = to_subchannel(cdev->dev.parent); |
127 | irb = &cdev->private->irb; | 127 | irb = &cdev->private->irb; |
128 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 128 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
129 | /* Retry Sense PGID if requested. */ | 129 | /* Retry Sense PGID if requested. */ |
130 | if (cdev->private->flags.intretry) { | 130 | if (cdev->private->flags.intretry) { |
131 | cdev->private->flags.intretry = 0; | 131 | cdev->private->flags.intretry = 0; |
@@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) | |||
155 | irb->ecw[6], irb->ecw[7]); | 155 | irb->ecw[6], irb->ecw[7]); |
156 | return -EAGAIN; | 156 | return -EAGAIN; |
157 | } | 157 | } |
158 | if (irb->scsw.cc == 3) { | 158 | if (irb->scsw.cmd.cc == 3) { |
159 | u8 lpm; | 159 | u8 lpm; |
160 | 160 | ||
161 | lpm = to_io_private(sch)->orb.lpm; | 161 | lpm = to_io_private(sch)->orb.cmd.lpm; |
162 | CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," | 162 | CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," |
163 | " lpm %02X, became 'not operational'\n", | 163 | " lpm %02X, became 'not operational'\n", |
164 | cdev->private->dev_id.devno, sch->schid.ssid, | 164 | cdev->private->dev_id.devno, sch->schid.ssid, |
@@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
188 | 188 | ||
189 | irb = (struct irb *) __LC_IRB; | 189 | irb = (struct irb *) __LC_IRB; |
190 | 190 | ||
191 | if (irb->scsw.stctl == | 191 | if (irb->scsw.cmd.stctl == |
192 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 192 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
193 | if (__ccw_device_should_retry(&irb->scsw)) { | 193 | if (__ccw_device_should_retry(&irb->scsw)) { |
194 | ret = __ccw_device_sense_pgid_start(cdev); | 194 | ret = __ccw_device_sense_pgid_start(cdev); |
@@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) | |||
331 | 331 | ||
332 | sch = to_subchannel(cdev->dev.parent); | 332 | sch = to_subchannel(cdev->dev.parent); |
333 | irb = &cdev->private->irb; | 333 | irb = &cdev->private->irb; |
334 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 334 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
335 | /* Retry Set PGID if requested. */ | 335 | /* Retry Set PGID if requested. */ |
336 | if (cdev->private->flags.intretry) { | 336 | if (cdev->private->flags.intretry) { |
337 | cdev->private->flags.intretry = 0; | 337 | cdev->private->flags.intretry = 0; |
@@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) | |||
355 | irb->ecw[6], irb->ecw[7]); | 355 | irb->ecw[6], irb->ecw[7]); |
356 | return -EAGAIN; | 356 | return -EAGAIN; |
357 | } | 357 | } |
358 | if (irb->scsw.cc == 3) { | 358 | if (irb->scsw.cmd.cc == 3) { |
359 | CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," | 359 | CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," |
360 | " lpm %02X, became 'not operational'\n", | 360 | " lpm %02X, became 'not operational'\n", |
361 | cdev->private->dev_id.devno, sch->schid.ssid, | 361 | cdev->private->dev_id.devno, sch->schid.ssid, |
@@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) | |||
376 | 376 | ||
377 | sch = to_subchannel(cdev->dev.parent); | 377 | sch = to_subchannel(cdev->dev.parent); |
378 | irb = &cdev->private->irb; | 378 | irb = &cdev->private->irb; |
379 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 379 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
380 | /* Retry NOP if requested. */ | 380 | /* Retry NOP if requested. */ |
381 | if (cdev->private->flags.intretry) { | 381 | if (cdev->private->flags.intretry) { |
382 | cdev->private->flags.intretry = 0; | 382 | cdev->private->flags.intretry = 0; |
@@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) | |||
384 | } | 384 | } |
385 | return -ETIME; | 385 | return -ETIME; |
386 | } | 386 | } |
387 | if (irb->scsw.cc == 3) { | 387 | if (irb->scsw.cmd.cc == 3) { |
388 | CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," | 388 | CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," |
389 | " lpm %02X, became 'not operational'\n", | 389 | " lpm %02X, became 'not operational'\n", |
390 | cdev->private->dev_id.devno, sch->schid.ssid, | 390 | cdev->private->dev_id.devno, sch->schid.ssid, |
@@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
438 | 438 | ||
439 | irb = (struct irb *) __LC_IRB; | 439 | irb = (struct irb *) __LC_IRB; |
440 | 440 | ||
441 | if (irb->scsw.stctl == | 441 | if (irb->scsw.cmd.stctl == |
442 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 442 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
443 | if (__ccw_device_should_retry(&irb->scsw)) | 443 | if (__ccw_device_should_retry(&irb->scsw)) |
444 | __ccw_device_verify_start(cdev); | 444 | __ccw_device_verify_start(cdev); |
@@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
544 | 544 | ||
545 | irb = (struct irb *) __LC_IRB; | 545 | irb = (struct irb *) __LC_IRB; |
546 | 546 | ||
547 | if (irb->scsw.stctl == | 547 | if (irb->scsw.cmd.stctl == |
548 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 548 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
549 | if (__ccw_device_should_retry(&irb->scsw)) | 549 | if (__ccw_device_should_retry(&irb->scsw)) |
550 | __ccw_device_disband_start(cdev); | 550 | __ccw_device_disband_start(cdev); |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 4a38993000f2..1b03c5423be2 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -29,9 +29,11 @@ | |||
29 | static void | 29 | static void |
30 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | 30 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) |
31 | { | 31 | { |
32 | if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | | 32 | char dbf_text[15]; |
33 | SCHN_STAT_CHN_CTRL_CHK | | 33 | |
34 | SCHN_STAT_INTF_CTRL_CHK))) | 34 | if (!scsw_is_valid_cstat(&irb->scsw) || |
35 | !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK | | ||
36 | SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK))) | ||
35 | return; | 37 | return; |
36 | CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " | 38 | CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " |
37 | "received" | 39 | "received" |
@@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | |||
39 | ": %02X sch_stat : %02X\n", | 41 | ": %02X sch_stat : %02X\n", |
40 | cdev->private->dev_id.devno, cdev->private->schid.ssid, | 42 | cdev->private->dev_id.devno, cdev->private->schid.ssid, |
41 | cdev->private->schid.sch_no, | 43 | cdev->private->schid.sch_no, |
42 | irb->scsw.dstat, irb->scsw.cstat); | 44 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); |
43 | 45 | sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); | |
44 | if (irb->scsw.cc != 3) { | 46 | CIO_TRACE_EVENT(0, dbf_text); |
45 | char dbf_text[15]; | 47 | CIO_HEX_EVENT(0, irb, sizeof(struct irb)); |
46 | |||
47 | sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); | ||
48 | CIO_TRACE_EVENT(0, dbf_text); | ||
49 | CIO_HEX_EVENT(0, irb, sizeof (struct irb)); | ||
50 | } | ||
51 | } | 48 | } |
52 | 49 | ||
53 | /* | 50 | /* |
@@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | |||
81 | * are condition that have to be met for the extended control | 78 | * are condition that have to be met for the extended control |
82 | * bit to have meaning. Sick. | 79 | * bit to have meaning. Sick. |
83 | */ | 80 | */ |
84 | cdev->private->irb.scsw.ectl = 0; | 81 | cdev->private->irb.scsw.cmd.ectl = 0; |
85 | if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && | 82 | if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) && |
86 | !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) | 83 | !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS)) |
87 | cdev->private->irb.scsw.ectl = irb->scsw.ectl; | 84 | cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; |
88 | /* Check if extended control word is valid. */ | 85 | /* Check if extended control word is valid. */ |
89 | if (!cdev->private->irb.scsw.ectl) | 86 | if (!cdev->private->irb.scsw.cmd.ectl) |
90 | return; | 87 | return; |
91 | /* Copy concurrent sense / model dependent information. */ | 88 | /* Copy concurrent sense / model dependent information. */ |
92 | memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); | 89 | memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); |
@@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | |||
98 | static int | 95 | static int |
99 | ccw_device_accumulate_esw_valid(struct irb *irb) | 96 | ccw_device_accumulate_esw_valid(struct irb *irb) |
100 | { | 97 | { |
101 | if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) | 98 | if (!irb->scsw.cmd.eswf && |
99 | (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND)) | ||
102 | return 0; | 100 | return 0; |
103 | if (irb->scsw.stctl == | 101 | if (irb->scsw.cmd.stctl == |
104 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && | 102 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && |
105 | !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) | 103 | !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) |
106 | return 0; | 104 | return 0; |
107 | return 1; | 105 | return 1; |
108 | } | 106 | } |
@@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | |||
125 | cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; | 123 | cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; |
126 | 124 | ||
127 | /* Copy subchannel logout information if esw is of format 0. */ | 125 | /* Copy subchannel logout information if esw is of format 0. */ |
128 | if (irb->scsw.eswf) { | 126 | if (irb->scsw.cmd.eswf) { |
129 | cdev_sublog = &cdev_irb->esw.esw0.sublog; | 127 | cdev_sublog = &cdev_irb->esw.esw0.sublog; |
130 | sublog = &irb->esw.esw0.sublog; | 128 | sublog = &irb->esw.esw0.sublog; |
131 | /* Copy extended status flags. */ | 129 | /* Copy extended status flags. */ |
@@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | |||
134 | * Copy fields that have a meaning for channel data check | 132 | * Copy fields that have a meaning for channel data check |
135 | * channel control check and interface control check. | 133 | * channel control check and interface control check. |
136 | */ | 134 | */ |
137 | if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | | 135 | if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK | |
138 | SCHN_STAT_CHN_CTRL_CHK | | 136 | SCHN_STAT_CHN_CTRL_CHK | |
139 | SCHN_STAT_INTF_CTRL_CHK)) { | 137 | SCHN_STAT_INTF_CTRL_CHK)) { |
140 | /* Copy ancillary report bit. */ | 138 | /* Copy ancillary report bit. */ |
@@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | |||
155 | /* Copy i/o-error alert. */ | 153 | /* Copy i/o-error alert. */ |
156 | cdev_sublog->ioerr = sublog->ioerr; | 154 | cdev_sublog->ioerr = sublog->ioerr; |
157 | /* Copy channel path timeout bit. */ | 155 | /* Copy channel path timeout bit. */ |
158 | if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) | 156 | if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK) |
159 | cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; | 157 | cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; |
160 | /* Copy failing storage address validity flag. */ | 158 | /* Copy failing storage address validity flag. */ |
161 | cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; | 159 | cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; |
@@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) | |||
200 | * If not, the remaining bit have no meaning and we must ignore them. | 198 | * If not, the remaining bit have no meaning and we must ignore them. |
201 | * The esw is not meaningful as well... | 199 | * The esw is not meaningful as well... |
202 | */ | 200 | */ |
203 | if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) | 201 | if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) |
204 | return; | 202 | return; |
205 | 203 | ||
206 | /* Check for channel checks and interface control checks. */ | 204 | /* Check for channel checks and interface control checks. */ |
207 | ccw_device_msg_control_check(cdev, irb); | 205 | ccw_device_msg_control_check(cdev, irb); |
208 | 206 | ||
209 | /* Check for path not operational. */ | 207 | /* Check for path not operational. */ |
210 | if (irb->scsw.pno && irb->scsw.fctl != 0 && | 208 | if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) |
211 | (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || | ||
212 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) | ||
213 | ccw_device_path_notoper(cdev); | 209 | ccw_device_path_notoper(cdev); |
214 | 210 | /* No irb accumulation for transport mode irbs. */ | |
211 | if (scsw_is_tm(&irb->scsw)) { | ||
212 | memcpy(&cdev->private->irb, irb, sizeof(struct irb)); | ||
213 | return; | ||
214 | } | ||
215 | /* | 215 | /* |
216 | * Don't accumulate unsolicited interrupts. | 216 | * Don't accumulate unsolicited interrupts. |
217 | */ | 217 | */ |
218 | if ((irb->scsw.stctl == | 218 | if (!scsw_is_solicited(&irb->scsw)) |
219 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && | ||
220 | (!irb->scsw.cc)) | ||
221 | return; | 219 | return; |
222 | 220 | ||
223 | cdev_irb = &cdev->private->irb; | 221 | cdev_irb = &cdev->private->irb; |
@@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) | |||
227 | * status at the subchannel has been cleared and we must not pass | 225 | * status at the subchannel has been cleared and we must not pass |
228 | * intermediate accumulated status to the device driver. | 226 | * intermediate accumulated status to the device driver. |
229 | */ | 227 | */ |
230 | if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) | 228 | if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) |
231 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 229 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
232 | 230 | ||
233 | /* Copy bits which are valid only for the start function. */ | 231 | /* Copy bits which are valid only for the start function. */ |
234 | if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { | 232 | if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) { |
235 | /* Copy key. */ | 233 | /* Copy key. */ |
236 | cdev_irb->scsw.key = irb->scsw.key; | 234 | cdev_irb->scsw.cmd.key = irb->scsw.cmd.key; |
237 | /* Copy suspend control bit. */ | 235 | /* Copy suspend control bit. */ |
238 | cdev_irb->scsw.sctl = irb->scsw.sctl; | 236 | cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl; |
239 | /* Accumulate deferred condition code. */ | 237 | /* Accumulate deferred condition code. */ |
240 | cdev_irb->scsw.cc |= irb->scsw.cc; | 238 | cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc; |
241 | /* Copy ccw format bit. */ | 239 | /* Copy ccw format bit. */ |
242 | cdev_irb->scsw.fmt = irb->scsw.fmt; | 240 | cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt; |
243 | /* Copy prefetch bit. */ | 241 | /* Copy prefetch bit. */ |
244 | cdev_irb->scsw.pfch = irb->scsw.pfch; | 242 | cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch; |
245 | /* Copy initial-status-interruption-control. */ | 243 | /* Copy initial-status-interruption-control. */ |
246 | cdev_irb->scsw.isic = irb->scsw.isic; | 244 | cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic; |
247 | /* Copy address limit checking control. */ | 245 | /* Copy address limit checking control. */ |
248 | cdev_irb->scsw.alcc = irb->scsw.alcc; | 246 | cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc; |
249 | /* Copy suppress suspend bit. */ | 247 | /* Copy suppress suspend bit. */ |
250 | cdev_irb->scsw.ssi = irb->scsw.ssi; | 248 | cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi; |
251 | } | 249 | } |
252 | 250 | ||
253 | /* Take care of the extended control bit and extended control word. */ | 251 | /* Take care of the extended control bit and extended control word. */ |
254 | ccw_device_accumulate_ecw(cdev, irb); | 252 | ccw_device_accumulate_ecw(cdev, irb); |
255 | 253 | ||
256 | /* Accumulate function control. */ | 254 | /* Accumulate function control. */ |
257 | cdev_irb->scsw.fctl |= irb->scsw.fctl; | 255 | cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl; |
258 | /* Copy activity control. */ | 256 | /* Copy activity control. */ |
259 | cdev_irb->scsw.actl= irb->scsw.actl; | 257 | cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl; |
260 | /* Accumulate status control. */ | 258 | /* Accumulate status control. */ |
261 | cdev_irb->scsw.stctl |= irb->scsw.stctl; | 259 | cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl; |
262 | /* | 260 | /* |
263 | * Copy ccw address if it is valid. This is a bit simplified | 261 | * Copy ccw address if it is valid. This is a bit simplified |
264 | * but should be close enough for all practical purposes. | 262 | * but should be close enough for all practical purposes. |
265 | */ | 263 | */ |
266 | if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || | 264 | if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) || |
267 | ((irb->scsw.stctl == | 265 | ((irb->scsw.cmd.stctl == |
268 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && | 266 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && |
269 | (irb->scsw.actl & SCSW_ACTL_DEVACT) && | 267 | (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) && |
270 | (irb->scsw.actl & SCSW_ACTL_SCHACT)) || | 268 | (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) || |
271 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) | 269 | (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) |
272 | cdev_irb->scsw.cpa = irb->scsw.cpa; | 270 | cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa; |
273 | /* Accumulate device status, but not the device busy flag. */ | 271 | /* Accumulate device status, but not the device busy flag. */ |
274 | cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; | 272 | cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY; |
275 | /* dstat is not always valid. */ | 273 | /* dstat is not always valid. */ |
276 | if (irb->scsw.stctl & | 274 | if (irb->scsw.cmd.stctl & |
277 | (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS | 275 | (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS |
278 | | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) | 276 | | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) |
279 | cdev_irb->scsw.dstat |= irb->scsw.dstat; | 277 | cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat; |
280 | /* Accumulate subchannel status. */ | 278 | /* Accumulate subchannel status. */ |
281 | cdev_irb->scsw.cstat |= irb->scsw.cstat; | 279 | cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat; |
282 | /* Copy residual count if it is valid. */ | 280 | /* Copy residual count if it is valid. */ |
283 | if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && | 281 | if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && |
284 | (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) | 282 | (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) |
285 | cdev_irb->scsw.count = irb->scsw.count; | 283 | == 0) |
284 | cdev_irb->scsw.cmd.count = irb->scsw.cmd.count; | ||
286 | 285 | ||
287 | /* Take care of bits in the extended status word. */ | 286 | /* Take care of bits in the extended status word. */ |
288 | ccw_device_accumulate_esw(cdev, irb); | 287 | ccw_device_accumulate_esw(cdev, irb); |
@@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) | |||
299 | * sense facility available/supported when enabling the | 298 | * sense facility available/supported when enabling the |
300 | * concurrent sense facility. | 299 | * concurrent sense facility. |
301 | */ | 300 | */ |
302 | if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | 301 | if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
303 | !(cdev_irb->esw.esw0.erw.cons)) | 302 | !(cdev_irb->esw.esw0.erw.cons)) |
304 | cdev->private->flags.dosense = 1; | 303 | cdev->private->flags.dosense = 1; |
305 | } | 304 | } |
@@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) | |||
317 | sch = to_subchannel(cdev->dev.parent); | 316 | sch = to_subchannel(cdev->dev.parent); |
318 | 317 | ||
319 | /* A sense is required, can we do it now ? */ | 318 | /* A sense is required, can we do it now ? */ |
320 | if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) | 319 | if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) |
321 | /* | 320 | /* |
322 | * we received an Unit Check but we have no final | 321 | * we received an Unit Check but we have no final |
323 | * status yet, therefore we must delay the SENSE | 322 | * status yet, therefore we must delay the SENSE |
@@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) | |||
355 | * If not, the remaining bit have no meaning and we must ignore them. | 354 | * If not, the remaining bit have no meaning and we must ignore them. |
356 | * The esw is not meaningful as well... | 355 | * The esw is not meaningful as well... |
357 | */ | 356 | */ |
358 | if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) | 357 | if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) |
359 | return; | 358 | return; |
360 | 359 | ||
361 | /* Check for channel checks and interface control checks. */ | 360 | /* Check for channel checks and interface control checks. */ |
362 | ccw_device_msg_control_check(cdev, irb); | 361 | ccw_device_msg_control_check(cdev, irb); |
363 | 362 | ||
364 | /* Check for path not operational. */ | 363 | /* Check for path not operational. */ |
365 | if (irb->scsw.pno && irb->scsw.fctl != 0 && | 364 | if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) |
366 | (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || | ||
367 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) | ||
368 | ccw_device_path_notoper(cdev); | 365 | ccw_device_path_notoper(cdev); |
369 | 366 | ||
370 | if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | 367 | if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
371 | (irb->scsw.dstat & DEV_STAT_CHN_END)) { | 368 | (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) { |
372 | cdev->private->irb.esw.esw0.erw.cons = 1; | 369 | cdev->private->irb.esw.esw0.erw.cons = 1; |
373 | cdev->private->flags.dosense = 0; | 370 | cdev->private->flags.dosense = 0; |
374 | } | 371 | } |
@@ -386,11 +383,11 @@ int | |||
386 | ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) | 383 | ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) |
387 | { | 384 | { |
388 | ccw_device_accumulate_irb(cdev, irb); | 385 | ccw_device_accumulate_irb(cdev, irb); |
389 | if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) | 386 | if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) |
390 | return -EBUSY; | 387 | return -EBUSY; |
391 | /* Check for basic sense. */ | 388 | /* Check for basic sense. */ |
392 | if (cdev->private->flags.dosense && | 389 | if (cdev->private->flags.dosense && |
393 | !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { | 390 | !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) { |
394 | cdev->private->irb.esw.esw0.erw.cons = 1; | 391 | cdev->private->irb.esw.esw0.erw.cons = 1; |
395 | cdev->private->flags.dosense = 0; | 392 | cdev->private->flags.dosense = 0; |
396 | return 0; | 393 | return 0; |
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c new file mode 100644 index 000000000000..61677dfbdc9b --- /dev/null +++ b/drivers/s390/cio/fcx.c | |||
@@ -0,0 +1,350 @@ | |||
1 | /* | ||
2 | * Functions for assembling fcx enabled I/O control blocks. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <asm/fcx.h> | ||
15 | #include "cio.h" | ||
16 | |||
17 | /** | ||
18 | * tcw_get_intrg - return pointer to associated interrogate tcw | ||
19 | * @tcw: pointer to the original tcw | ||
20 | * | ||
21 | * Return a pointer to the interrogate tcw associated with the specified tcw | ||
22 | * or %NULL if there is no associated interrogate tcw. | ||
23 | */ | ||
24 | struct tcw *tcw_get_intrg(struct tcw *tcw) | ||
25 | { | ||
26 | return (struct tcw *) ((addr_t) tcw->intrg); | ||
27 | } | ||
28 | EXPORT_SYMBOL(tcw_get_intrg); | ||
29 | |||
30 | /** | ||
31 | * tcw_get_data - return pointer to input/output data associated with tcw | ||
32 | * @tcw: pointer to the tcw | ||
33 | * | ||
34 | * Return the input or output data address specified in the tcw depending | ||
35 | * on whether the r-bit or the w-bit is set. If neither bit is set, return | ||
36 | * %NULL. | ||
37 | */ | ||
38 | void *tcw_get_data(struct tcw *tcw) | ||
39 | { | ||
40 | if (tcw->r) | ||
41 | return (void *) ((addr_t) tcw->input); | ||
42 | if (tcw->w) | ||
43 | return (void *) ((addr_t) tcw->output); | ||
44 | return NULL; | ||
45 | } | ||
46 | EXPORT_SYMBOL(tcw_get_data); | ||
47 | |||
48 | /** | ||
49 | * tcw_get_tccb - return pointer to tccb associated with tcw | ||
50 | * @tcw: pointer to the tcw | ||
51 | * | ||
52 | * Return pointer to the tccb associated with this tcw. | ||
53 | */ | ||
54 | struct tccb *tcw_get_tccb(struct tcw *tcw) | ||
55 | { | ||
56 | return (struct tccb *) ((addr_t) tcw->tccb); | ||
57 | } | ||
58 | EXPORT_SYMBOL(tcw_get_tccb); | ||
59 | |||
60 | /** | ||
61 | * tcw_get_tsb - return pointer to tsb associated with tcw | ||
62 | * @tcw: pointer to the tcw | ||
63 | * | ||
64 | * Return pointer to the tsb associated with this tcw. | ||
65 | */ | ||
66 | struct tsb *tcw_get_tsb(struct tcw *tcw) | ||
67 | { | ||
68 | return (struct tsb *) ((addr_t) tcw->tsb); | ||
69 | } | ||
70 | EXPORT_SYMBOL(tcw_get_tsb); | ||
71 | |||
72 | /** | ||
73 | * tcw_init - initialize tcw data structure | ||
74 | * @tcw: pointer to the tcw to be initialized | ||
75 | * @r: initial value of the r-bit | ||
76 | * @w: initial value of the w-bit | ||
77 | * | ||
78 | * Initialize all fields of the specified tcw data structure with zero and | ||
79 | * fill in the format, flags, r and w fields. | ||
80 | */ | ||
81 | void tcw_init(struct tcw *tcw, int r, int w) | ||
82 | { | ||
83 | memset(tcw, 0, sizeof(struct tcw)); | ||
84 | tcw->format = TCW_FORMAT_DEFAULT; | ||
85 | tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT); | ||
86 | if (r) | ||
87 | tcw->r = 1; | ||
88 | if (w) | ||
89 | tcw->w = 1; | ||
90 | } | ||
91 | EXPORT_SYMBOL(tcw_init); | ||
92 | |||
93 | static inline size_t tca_size(struct tccb *tccb) | ||
94 | { | ||
95 | return tccb->tcah.tcal - 12; | ||
96 | } | ||
97 | |||
98 | static u32 calc_dcw_count(struct tccb *tccb) | ||
99 | { | ||
100 | int offset; | ||
101 | struct dcw *dcw; | ||
102 | u32 count = 0; | ||
103 | size_t size; | ||
104 | |||
105 | size = tca_size(tccb); | ||
106 | for (offset = 0; offset < size;) { | ||
107 | dcw = (struct dcw *) &tccb->tca[offset]; | ||
108 | count += dcw->count; | ||
109 | if (!(dcw->flags & DCW_FLAGS_CC)) | ||
110 | break; | ||
111 | offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4); | ||
112 | } | ||
113 | return count; | ||
114 | } | ||
115 | |||
116 | static u32 calc_cbc_size(struct tidaw *tidaw, int num) | ||
117 | { | ||
118 | int i; | ||
119 | u32 cbc_data; | ||
120 | u32 cbc_count = 0; | ||
121 | u64 data_count = 0; | ||
122 | |||
123 | for (i = 0; i < num; i++) { | ||
124 | if (tidaw[i].flags & TIDAW_FLAGS_LAST) | ||
125 | break; | ||
126 | /* TODO: find out if padding applies to total of data | ||
127 | * transferred or data transferred by this tidaw. Assumption: | ||
128 | * applies to total. */ | ||
129 | data_count += tidaw[i].count; | ||
130 | if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) { | ||
131 | cbc_data = 4 + ALIGN(data_count, 4) - data_count; | ||
132 | cbc_count += cbc_data; | ||
133 | data_count += cbc_data; | ||
134 | } | ||
135 | } | ||
136 | return cbc_count; | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * tcw_finalize - finalize tcw length fields and tidaw list | ||
141 | * @tcw: pointer to the tcw | ||
142 | * @num_tidaws: the number of tidaws used to address input/output data or zero | ||
143 | * if no tida is used | ||
144 | * | ||
145 | * Calculate the input-/output-count and tccbl field in the tcw, add a | ||
146 | * tcat the tccb and terminate the data tidaw list if used. | ||
147 | * | ||
148 | * Note: in case input- or output-tida is used, the tidaw-list must be stored | ||
149 | * in contiguous storage (no ttic). The tcal field in the tccb must be | ||
150 | * up-to-date. | ||
151 | */ | ||
152 | void tcw_finalize(struct tcw *tcw, int num_tidaws) | ||
153 | { | ||
154 | struct tidaw *tidaw; | ||
155 | struct tccb *tccb; | ||
156 | struct tccb_tcat *tcat; | ||
157 | u32 count; | ||
158 | |||
159 | /* Terminate tidaw list. */ | ||
160 | tidaw = tcw_get_data(tcw); | ||
161 | if (num_tidaws > 0) | ||
162 | tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST; | ||
163 | /* Add tcat to tccb. */ | ||
164 | tccb = tcw_get_tccb(tcw); | ||
165 | tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; | ||
166 | memset(tcat, 0, sizeof(tcat)); | ||
167 | /* Calculate tcw input/output count and tcat transport count. */ | ||
168 | count = calc_dcw_count(tccb); | ||
169 | if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) | ||
170 | count += calc_cbc_size(tidaw, num_tidaws); | ||
171 | if (tcw->r) | ||
172 | tcw->input_count = count; | ||
173 | else if (tcw->w) | ||
174 | tcw->output_count = count; | ||
175 | tcat->count = ALIGN(count, 4) + 4; | ||
176 | /* Calculate tccbl. */ | ||
177 | tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) + | ||
178 | sizeof(struct tccb_tcat) - 20) >> 2; | ||
179 | } | ||
180 | EXPORT_SYMBOL(tcw_finalize); | ||
181 | |||
182 | /** | ||
183 | * tcw_set_intrg - set the interrogate tcw address of a tcw | ||
184 | * @tcw: the tcw address | ||
185 | * @intrg_tcw: the address of the interrogate tcw | ||
186 | * | ||
187 | * Set the address of the interrogate tcw in the specified tcw. | ||
188 | */ | ||
189 | void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw) | ||
190 | { | ||
191 | tcw->intrg = (u32) ((addr_t) intrg_tcw); | ||
192 | } | ||
193 | EXPORT_SYMBOL(tcw_set_intrg); | ||
194 | |||
195 | /** | ||
196 | * tcw_set_data - set data address and tida flag of a tcw | ||
197 | * @tcw: the tcw address | ||
198 | * @data: the data address | ||
199 | * @use_tidal: zero of the data address specifies a contiguous block of data, | ||
200 | * non-zero if it specifies a list if tidaws. | ||
201 | * | ||
202 | * Set the input/output data address of a tcw (depending on the value of the | ||
203 | * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag | ||
204 | * is set as well. | ||
205 | */ | ||
206 | void tcw_set_data(struct tcw *tcw, void *data, int use_tidal) | ||
207 | { | ||
208 | if (tcw->r) { | ||
209 | tcw->input = (u64) ((addr_t) data); | ||
210 | if (use_tidal) | ||
211 | tcw->flags |= TCW_FLAGS_INPUT_TIDA; | ||
212 | } else if (tcw->w) { | ||
213 | tcw->output = (u64) ((addr_t) data); | ||
214 | if (use_tidal) | ||
215 | tcw->flags |= TCW_FLAGS_OUTPUT_TIDA; | ||
216 | } | ||
217 | } | ||
218 | EXPORT_SYMBOL(tcw_set_data); | ||
219 | |||
220 | /** | ||
221 | * tcw_set_tccb - set tccb address of a tcw | ||
222 | * @tcw: the tcw address | ||
223 | * @tccb: the tccb address | ||
224 | * | ||
225 | * Set the address of the tccb in the specified tcw. | ||
226 | */ | ||
227 | void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb) | ||
228 | { | ||
229 | tcw->tccb = (u64) ((addr_t) tccb); | ||
230 | } | ||
231 | EXPORT_SYMBOL(tcw_set_tccb); | ||
232 | |||
233 | /** | ||
234 | * tcw_set_tsb - set tsb address of a tcw | ||
235 | * @tcw: the tcw address | ||
236 | * @tsb: the tsb address | ||
237 | * | ||
238 | * Set the address of the tsb in the specified tcw. | ||
239 | */ | ||
240 | void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb) | ||
241 | { | ||
242 | tcw->tsb = (u64) ((addr_t) tsb); | ||
243 | } | ||
244 | EXPORT_SYMBOL(tcw_set_tsb); | ||
245 | |||
246 | /** | ||
247 | * tccb_init - initialize tccb | ||
248 | * @tccb: the tccb address | ||
249 | * @size: the maximum size of the tccb | ||
250 | * @sac: the service-action-code to be user | ||
251 | * | ||
252 | * Initialize the header of the specified tccb by resetting all values to zero | ||
253 | * and filling in defaults for format, sac and initial tcal fields. | ||
254 | */ | ||
255 | void tccb_init(struct tccb *tccb, size_t size, u32 sac) | ||
256 | { | ||
257 | memset(tccb, 0, size); | ||
258 | tccb->tcah.format = TCCB_FORMAT_DEFAULT; | ||
259 | tccb->tcah.sac = sac; | ||
260 | tccb->tcah.tcal = 12; | ||
261 | } | ||
262 | EXPORT_SYMBOL(tccb_init); | ||
263 | |||
264 | /** | ||
265 | * tsb_init - initialize tsb | ||
266 | * @tsb: the tsb address | ||
267 | * | ||
268 | * Initialize the specified tsb by resetting all values to zero. | ||
269 | */ | ||
270 | void tsb_init(struct tsb *tsb) | ||
271 | { | ||
272 | memset(tsb, 0, sizeof(tsb)); | ||
273 | } | ||
274 | EXPORT_SYMBOL(tsb_init); | ||
275 | |||
276 | /** | ||
277 | * tccb_add_dcw - add a dcw to the tccb | ||
278 | * @tccb: the tccb address | ||
279 | * @tccb_size: the maximum tccb size | ||
280 | * @cmd: the dcw command | ||
281 | * @flags: flags for the dcw | ||
282 | * @cd: pointer to control data for this dcw or NULL if none is required | ||
283 | * @cd_count: number of control data bytes for this dcw | ||
284 | * @count: number of data bytes for this dcw | ||
285 | * | ||
286 | * Add a new dcw to the specified tccb by writing the dcw information specified | ||
287 | * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return | ||
288 | * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw | ||
289 | * would exceed the available space as defined by @tccb_size. | ||
290 | * | ||
291 | * Note: the tcal field of the tccb header will be updates to reflect added | ||
292 | * content. | ||
293 | */ | ||
294 | struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags, | ||
295 | void *cd, u8 cd_count, u32 count) | ||
296 | { | ||
297 | struct dcw *dcw; | ||
298 | int size; | ||
299 | int tca_offset; | ||
300 | |||
301 | /* Check for space. */ | ||
302 | tca_offset = tca_size(tccb); | ||
303 | size = ALIGN(sizeof(struct dcw) + cd_count, 4); | ||
304 | if (sizeof(struct tccb_tcah) + tca_offset + size + | ||
305 | sizeof(struct tccb_tcat) > tccb_size) | ||
306 | return ERR_PTR(-ENOSPC); | ||
307 | /* Add dcw to tca. */ | ||
308 | dcw = (struct dcw *) &tccb->tca[tca_offset]; | ||
309 | memset(dcw, 0, size); | ||
310 | dcw->cmd = cmd; | ||
311 | dcw->flags = flags; | ||
312 | dcw->count = count; | ||
313 | dcw->cd_count = cd_count; | ||
314 | if (cd) | ||
315 | memcpy(&dcw->cd[0], cd, cd_count); | ||
316 | tccb->tcah.tcal += size; | ||
317 | return dcw; | ||
318 | } | ||
319 | EXPORT_SYMBOL(tccb_add_dcw); | ||
320 | |||
321 | /** | ||
322 | * tcw_add_tidaw - add a tidaw to a tcw | ||
323 | * @tcw: the tcw address | ||
324 | * @num_tidaws: the current number of tidaws | ||
325 | * @flags: flags for the new tidaw | ||
326 | * @addr: address value for the new tidaw | ||
327 | * @count: count value for the new tidaw | ||
328 | * | ||
329 | * Add a new tidaw to the input/output data tidaw-list of the specified tcw | ||
330 | * (depending on the value of the r-flag and w-flag) and return a pointer to | ||
331 | * the new tidaw. | ||
332 | * | ||
333 | * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller | ||
334 | * must ensure that there is enough space for the new tidaw. The last-tidaw | ||
335 | * flag for the last tidaw in the list will be set by tcw_finalize. | ||
336 | */ | ||
337 | struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, | ||
338 | void *addr, u32 count) | ||
339 | { | ||
340 | struct tidaw *tidaw; | ||
341 | |||
342 | /* Add tidaw to tidaw-list. */ | ||
343 | tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws; | ||
344 | memset(tidaw, 0, sizeof(struct tidaw)); | ||
345 | tidaw->flags = flags; | ||
346 | tidaw->count = count; | ||
347 | tidaw->addr = (u64) ((addr_t) addr); | ||
348 | return tidaw; | ||
349 | } | ||
350 | EXPORT_SYMBOL(tcw_add_tidaw); | ||
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index 144466ab8c15..528065cb5021 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #ifndef S390_IDSET_H | 8 | #ifndef S390_IDSET_H |
9 | #define S390_IDSET_H S390_IDSET_H | 9 | #define S390_IDSET_H S390_IDSET_H |
10 | 10 | ||
11 | #include "schid.h" | 11 | #include <asm/schid.h> |
12 | 12 | ||
13 | struct idset; | 13 | struct idset; |
14 | 14 | ||
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8c613160bfce..3f8f1cf69c76 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -1,12 +1,12 @@ | |||
1 | #ifndef S390_IO_SCH_H | 1 | #ifndef S390_IO_SCH_H |
2 | #define S390_IO_SCH_H | 2 | #define S390_IO_SCH_H |
3 | 3 | ||
4 | #include "schid.h" | 4 | #include <asm/schid.h> |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * operation request block | 7 | * command-mode operation request block |
8 | */ | 8 | */ |
9 | struct orb { | 9 | struct cmd_orb { |
10 | u32 intparm; /* interruption parameter */ | 10 | u32 intparm; /* interruption parameter */ |
11 | u32 key : 4; /* flags, like key, suspend control, etc. */ | 11 | u32 key : 4; /* flags, like key, suspend control, etc. */ |
12 | u32 spnd : 1; /* suspend control */ | 12 | u32 spnd : 1; /* suspend control */ |
@@ -28,8 +28,36 @@ struct orb { | |||
28 | u32 cpa; /* channel program address */ | 28 | u32 cpa; /* channel program address */ |
29 | } __attribute__ ((packed, aligned(4))); | 29 | } __attribute__ ((packed, aligned(4))); |
30 | 30 | ||
31 | /* | ||
32 | * transport-mode operation request block | ||
33 | */ | ||
34 | struct tm_orb { | ||
35 | u32 intparm; | ||
36 | u32 key:4; | ||
37 | u32 :9; | ||
38 | u32 b:1; | ||
39 | u32 :2; | ||
40 | u32 lpm:8; | ||
41 | u32 :7; | ||
42 | u32 x:1; | ||
43 | u32 tcw; | ||
44 | u32 prio:8; | ||
45 | u32 :8; | ||
46 | u32 rsvpgm:8; | ||
47 | u32 :8; | ||
48 | u32 :32; | ||
49 | u32 :32; | ||
50 | u32 :32; | ||
51 | u32 :32; | ||
52 | } __attribute__ ((packed, aligned(4))); | ||
53 | |||
54 | union orb { | ||
55 | struct cmd_orb cmd; | ||
56 | struct tm_orb tm; | ||
57 | } __attribute__ ((packed, aligned(4))); | ||
58 | |||
31 | struct io_subchannel_private { | 59 | struct io_subchannel_private { |
32 | struct orb orb; /* operation request block */ | 60 | union orb orb; /* operation request block */ |
33 | struct ccw1 sense_ccw; /* static ccw for sense command */ | 61 | struct ccw1 sense_ccw; /* static ccw for sense command */ |
34 | } __attribute__ ((aligned(8))); | 62 | } __attribute__ ((aligned(8))); |
35 | 63 | ||
@@ -95,16 +123,18 @@ struct ccw_device_private { | |||
95 | void *cmb_wait; /* deferred cmb enable/disable */ | 123 | void *cmb_wait; /* deferred cmb enable/disable */ |
96 | }; | 124 | }; |
97 | 125 | ||
98 | static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) | 126 | static inline int ssch(struct subchannel_id schid, volatile union orb *addr) |
99 | { | 127 | { |
100 | register struct subchannel_id reg1 asm("1") = schid; | 128 | register struct subchannel_id reg1 asm("1") = schid; |
101 | int ccode; | 129 | int ccode = -EIO; |
102 | 130 | ||
103 | asm volatile( | 131 | asm volatile( |
104 | " ssch 0(%2)\n" | 132 | " ssch 0(%2)\n" |
105 | " ipm %0\n" | 133 | "0: ipm %0\n" |
106 | " srl %0,28" | 134 | " srl %0,28\n" |
107 | : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); | 135 | "1:\n" |
136 | EX_TABLE(0b, 1b) | ||
137 | : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); | ||
108 | return ccode; | 138 | return ccode; |
109 | } | 139 | } |
110 | 140 | ||
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 652ea3625f9d..9fa2ac13ac85 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define S390_CIO_IOASM_H | 2 | #define S390_CIO_IOASM_H |
3 | 3 | ||
4 | #include <asm/chpid.h> | 4 | #include <asm/chpid.h> |
5 | #include "schid.h" | 5 | #include <asm/schid.h> |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * TPI info structure | 8 | * TPI info structure |
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c new file mode 100644 index 000000000000..c592087be0f1 --- /dev/null +++ b/drivers/s390/cio/isc.c | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Functions for registration of I/O interruption subclasses on s390. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <asm/isc.h> | ||
11 | |||
12 | static unsigned int isc_refs[MAX_ISC + 1]; | ||
13 | static DEFINE_SPINLOCK(isc_ref_lock); | ||
14 | |||
15 | |||
16 | /** | ||
17 | * isc_register - register an I/O interruption subclass. | ||
18 | * @isc: I/O interruption subclass to register | ||
19 | * | ||
20 | * The number of users for @isc is increased. If this is the first user to | ||
21 | * register @isc, the corresponding I/O interruption subclass mask is enabled. | ||
22 | * | ||
23 | * Context: | ||
24 | * This function must not be called in interrupt context. | ||
25 | */ | ||
26 | void isc_register(unsigned int isc) | ||
27 | { | ||
28 | if (isc > MAX_ISC) { | ||
29 | WARN_ON(1); | ||
30 | return; | ||
31 | } | ||
32 | |||
33 | spin_lock(&isc_ref_lock); | ||
34 | if (isc_refs[isc] == 0) | ||
35 | ctl_set_bit(6, 31 - isc); | ||
36 | isc_refs[isc]++; | ||
37 | spin_unlock(&isc_ref_lock); | ||
38 | } | ||
39 | EXPORT_SYMBOL_GPL(isc_register); | ||
40 | |||
41 | /** | ||
42 | * isc_unregister - unregister an I/O interruption subclass. | ||
43 | * @isc: I/O interruption subclass to unregister | ||
44 | * | ||
45 | * The number of users for @isc is decreased. If this is the last user to | ||
46 | * unregister @isc, the corresponding I/O interruption subclass mask is | ||
47 | * disabled. | ||
48 | * Note: This function must not be called if isc_register() hasn't been called | ||
49 | * before by the driver for @isc. | ||
50 | * | ||
51 | * Context: | ||
52 | * This function must not be called in interrupt context. | ||
53 | */ | ||
54 | void isc_unregister(unsigned int isc) | ||
55 | { | ||
56 | spin_lock(&isc_ref_lock); | ||
57 | /* check for misuse */ | ||
58 | if (isc > MAX_ISC || isc_refs[isc] == 0) { | ||
59 | WARN_ON(1); | ||
60 | goto out_unlock; | ||
61 | } | ||
62 | if (isc_refs[isc] == 1) | ||
63 | ctl_clear_bit(6, 31 - isc); | ||
64 | isc_refs[isc]--; | ||
65 | out_unlock: | ||
66 | spin_unlock(&isc_ref_lock); | ||
67 | } | ||
68 | EXPORT_SYMBOL_GPL(isc_unregister); | ||
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c new file mode 100644 index 000000000000..17da9ab932ed --- /dev/null +++ b/drivers/s390/cio/itcw.c | |||
@@ -0,0 +1,327 @@ | |||
1 | /* | ||
2 | * Functions for incremental construction of fcx enabled I/O control blocks. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <asm/fcx.h> | ||
15 | #include <asm/itcw.h> | ||
16 | |||
17 | /** | ||
18 | * struct itcw - incremental tcw helper data type | ||
19 | * | ||
20 | * This structure serves as a handle for the incremental construction of a | ||
21 | * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate | ||
22 | * tcw and associated data. The data structures are contained inside a single | ||
23 | * contiguous buffer provided by the user. | ||
24 | * | ||
25 | * The itcw construction functions take care of overall data integrity: | ||
26 | * - reset unused fields to zero | ||
27 | * - fill in required pointers | ||
28 | * - ensure required alignment for data structures | ||
29 | * - prevent data structures to cross 4k-byte boundary where required | ||
30 | * - calculate tccb-related length fields | ||
31 | * - optionally provide ready-made interrogate tcw and associated structures | ||
32 | * | ||
33 | * Restrictions apply to the itcws created with these construction functions: | ||
34 | * - tida only supported for data address, not for tccb | ||
35 | * - only contiguous tidaw-lists (no ttic) | ||
36 | * - total number of bytes required per itcw may not exceed 4k bytes | ||
37 | * - either read or write operation (may not work with r=0 and w=0) | ||
38 | * | ||
39 | * Example: | ||
40 | * struct itcw *itcw; | ||
41 | * void *buffer; | ||
42 | * size_t size; | ||
43 | * | ||
44 | * size = itcw_calc_size(1, 2, 0); | ||
45 | * buffer = kmalloc(size, GFP_DMA); | ||
46 | * if (!buffer) | ||
47 | * return -ENOMEM; | ||
48 | * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0); | ||
49 | * if (IS_ERR(itcw)) | ||
50 | * return PTR_ER(itcw); | ||
51 | * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72); | ||
52 | * itcw_add_tidaw(itcw, 0, 0x30000, 20); | ||
53 | * itcw_add_tidaw(itcw, 0, 0x40000, 52); | ||
54 | * itcw_finalize(itcw); | ||
55 | * | ||
56 | */ | ||
57 | struct itcw { | ||
58 | struct tcw *tcw; | ||
59 | struct tcw *intrg_tcw; | ||
60 | int num_tidaws; | ||
61 | int max_tidaws; | ||
62 | int intrg_num_tidaws; | ||
63 | int intrg_max_tidaws; | ||
64 | }; | ||
65 | |||
66 | /** | ||
67 | * itcw_get_tcw - return pointer to tcw associated with the itcw | ||
68 | * @itcw: address of the itcw | ||
69 | * | ||
70 | * Return pointer to the tcw associated with the itcw. | ||
71 | */ | ||
72 | struct tcw *itcw_get_tcw(struct itcw *itcw) | ||
73 | { | ||
74 | return itcw->tcw; | ||
75 | } | ||
76 | EXPORT_SYMBOL(itcw_get_tcw); | ||
77 | |||
78 | /** | ||
79 | * itcw_calc_size - return the size of an itcw with the given parameters | ||
80 | * @intrg: if non-zero, add an interrogate tcw | ||
81 | * @max_tidaws: maximum number of tidaws to be used for data addressing or zero | ||
82 | * if no tida is to be used. | ||
83 | * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing | ||
84 | * by the interrogate tcw, if specified | ||
85 | * | ||
86 | * Calculate and return the number of bytes required to hold an itcw with the | ||
87 | * given parameters and assuming tccbs with maximum size. | ||
88 | * | ||
89 | * Note that the resulting size also contains bytes needed for alignment | ||
90 | * padding as well as padding to ensure that data structures don't cross a | ||
91 | * 4k-boundary where required. | ||
92 | */ | ||
93 | size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) | ||
94 | { | ||
95 | size_t len; | ||
96 | |||
97 | /* Main data. */ | ||
98 | len = sizeof(struct itcw); | ||
99 | len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + | ||
100 | /* TSB */ sizeof(struct tsb) + | ||
101 | /* TIDAL */ max_tidaws * sizeof(struct tidaw); | ||
102 | /* Interrogate data. */ | ||
103 | if (intrg) { | ||
104 | len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + | ||
105 | /* TSB */ sizeof(struct tsb) + | ||
106 | /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); | ||
107 | } | ||
108 | /* Maximum required alignment padding. */ | ||
109 | len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; | ||
110 | /* Maximum padding for structures that may not cross 4k boundary. */ | ||
111 | if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) | ||
112 | len += max(max_tidaws, intrg_max_tidaws) * | ||
113 | sizeof(struct tidaw) - 1; | ||
114 | return len; | ||
115 | } | ||
116 | EXPORT_SYMBOL(itcw_calc_size); | ||
117 | |||
118 | #define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095)) | ||
119 | |||
120 | static inline void *fit_chunk(addr_t *start, addr_t end, size_t len, | ||
121 | int align, int check_4k) | ||
122 | { | ||
123 | addr_t addr; | ||
124 | |||
125 | addr = ALIGN(*start, align); | ||
126 | if (check_4k && CROSS4K(addr, len)) { | ||
127 | addr = ALIGN(addr, 4096); | ||
128 | addr = ALIGN(addr, align); | ||
129 | } | ||
130 | if (addr + len > end) | ||
131 | return ERR_PTR(-ENOSPC); | ||
132 | *start = addr + len; | ||
133 | return (void *) addr; | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * itcw_init - initialize incremental tcw data structure | ||
138 | * @buffer: address of buffer to use for data structures | ||
139 | * @size: number of bytes in buffer | ||
140 | * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write | ||
141 | * operation tcw | ||
142 | * @intrg: if non-zero, add and initialize an interrogate tcw | ||
143 | * @max_tidaws: maximum number of tidaws to be used for data addressing or zero | ||
144 | * if no tida is to be used. | ||
145 | * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing | ||
146 | * by the interrogate tcw, if specified | ||
147 | * | ||
148 | * Prepare the specified buffer to be used as an incremental tcw, i.e. a | ||
149 | * helper data structure that can be used to construct a valid tcw by | ||
150 | * successive calls to other helper functions. Note: the buffer needs to be | ||
151 | * located below the 2G address limit. The resulting tcw has the following | ||
152 | * restrictions: | ||
153 | * - no tccb tidal | ||
154 | * - input/output tidal is contiguous (no ttic) | ||
155 | * - total data should not exceed 4k | ||
156 | * - tcw specifies either read or write operation | ||
157 | * | ||
158 | * On success, return pointer to the resulting incremental tcw data structure, | ||
159 | * ERR_PTR otherwise. | ||
160 | */ | ||
161 | struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, | ||
162 | int max_tidaws, int intrg_max_tidaws) | ||
163 | { | ||
164 | struct itcw *itcw; | ||
165 | void *chunk; | ||
166 | addr_t start; | ||
167 | addr_t end; | ||
168 | |||
169 | /* Check for 2G limit. */ | ||
170 | start = (addr_t) buffer; | ||
171 | end = start + size; | ||
172 | if (end > (1 << 31)) | ||
173 | return ERR_PTR(-EINVAL); | ||
174 | memset(buffer, 0, size); | ||
175 | /* ITCW. */ | ||
176 | chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); | ||
177 | if (IS_ERR(chunk)) | ||
178 | return chunk; | ||
179 | itcw = chunk; | ||
180 | itcw->max_tidaws = max_tidaws; | ||
181 | itcw->intrg_max_tidaws = intrg_max_tidaws; | ||
182 | /* Main TCW. */ | ||
183 | chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); | ||
184 | if (IS_ERR(chunk)) | ||
185 | return chunk; | ||
186 | itcw->tcw = chunk; | ||
187 | tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0, | ||
188 | (op == ITCW_OP_WRITE) ? 1 : 0); | ||
189 | /* Interrogate TCW. */ | ||
190 | if (intrg) { | ||
191 | chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); | ||
192 | if (IS_ERR(chunk)) | ||
193 | return chunk; | ||
194 | itcw->intrg_tcw = chunk; | ||
195 | tcw_init(itcw->intrg_tcw, 1, 0); | ||
196 | tcw_set_intrg(itcw->tcw, itcw->intrg_tcw); | ||
197 | } | ||
198 | /* Data TIDAL. */ | ||
199 | if (max_tidaws > 0) { | ||
200 | chunk = fit_chunk(&start, end, sizeof(struct tidaw) * | ||
201 | max_tidaws, 16, 1); | ||
202 | if (IS_ERR(chunk)) | ||
203 | return chunk; | ||
204 | tcw_set_data(itcw->tcw, chunk, 1); | ||
205 | } | ||
206 | /* Interrogate data TIDAL. */ | ||
207 | if (intrg && (intrg_max_tidaws > 0)) { | ||
208 | chunk = fit_chunk(&start, end, sizeof(struct tidaw) * | ||
209 | intrg_max_tidaws, 16, 1); | ||
210 | if (IS_ERR(chunk)) | ||
211 | return chunk; | ||
212 | tcw_set_data(itcw->intrg_tcw, chunk, 1); | ||
213 | } | ||
214 | /* TSB. */ | ||
215 | chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); | ||
216 | if (IS_ERR(chunk)) | ||
217 | return chunk; | ||
218 | tsb_init(chunk); | ||
219 | tcw_set_tsb(itcw->tcw, chunk); | ||
220 | /* Interrogate TSB. */ | ||
221 | if (intrg) { | ||
222 | chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); | ||
223 | if (IS_ERR(chunk)) | ||
224 | return chunk; | ||
225 | tsb_init(chunk); | ||
226 | tcw_set_tsb(itcw->intrg_tcw, chunk); | ||
227 | } | ||
228 | /* TCCB. */ | ||
229 | chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); | ||
230 | if (IS_ERR(chunk)) | ||
231 | return chunk; | ||
232 | tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT); | ||
233 | tcw_set_tccb(itcw->tcw, chunk); | ||
234 | /* Interrogate TCCB. */ | ||
235 | if (intrg) { | ||
236 | chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); | ||
237 | if (IS_ERR(chunk)) | ||
238 | return chunk; | ||
239 | tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG); | ||
240 | tcw_set_tccb(itcw->intrg_tcw, chunk); | ||
241 | tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL, | ||
242 | sizeof(struct dcw_intrg_data), 0); | ||
243 | tcw_finalize(itcw->intrg_tcw, 0); | ||
244 | } | ||
245 | return itcw; | ||
246 | } | ||
247 | EXPORT_SYMBOL(itcw_init); | ||
248 | |||
249 | /** | ||
250 | * itcw_add_dcw - add a dcw to the itcw | ||
251 | * @itcw: address of the itcw | ||
252 | * @cmd: the dcw command | ||
253 | * @flags: flags for the dcw | ||
254 | * @cd: address of control data for this dcw or NULL if none is required | ||
255 | * @cd_count: number of control data bytes for this dcw | ||
256 | * @count: number of data bytes for this dcw | ||
257 | * | ||
258 | * Add a new dcw to the specified itcw by writing the dcw information specified | ||
259 | * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return | ||
260 | * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw | ||
261 | * would exceed the available space. | ||
262 | * | ||
263 | * Note: the tcal field of the tccb header will be updated to reflect added | ||
264 | * content. | ||
265 | */ | ||
266 | struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, | ||
267 | u8 cd_count, u32 count) | ||
268 | { | ||
269 | return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd, | ||
270 | flags, cd, cd_count, count); | ||
271 | } | ||
272 | EXPORT_SYMBOL(itcw_add_dcw); | ||
273 | |||
274 | /** | ||
275 | * itcw_add_tidaw - add a tidaw to the itcw | ||
276 | * @itcw: address of the itcw | ||
277 | * @flags: flags for the new tidaw | ||
278 | * @addr: address value for the new tidaw | ||
279 | * @count: count value for the new tidaw | ||
280 | * | ||
281 | * Add a new tidaw to the input/output data tidaw-list of the specified itcw | ||
282 | * (depending on the value of the r-flag and w-flag). Return a pointer to | ||
283 | * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the | ||
284 | * available space. | ||
285 | * | ||
286 | * Note: the tidaw-list is assumed to be contiguous with no ttics. The | ||
287 | * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. | ||
288 | */ | ||
289 | struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) | ||
290 | { | ||
291 | if (itcw->num_tidaws >= itcw->max_tidaws) | ||
292 | return ERR_PTR(-ENOSPC); | ||
293 | return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); | ||
294 | } | ||
295 | EXPORT_SYMBOL(itcw_add_tidaw); | ||
296 | |||
297 | /** | ||
298 | * itcw_set_data - set data address and tida flag of the itcw | ||
299 | * @itcw: address of the itcw | ||
300 | * @addr: the data address | ||
301 | * @use_tidal: zero of the data address specifies a contiguous block of data, | ||
302 | * non-zero if it specifies a list if tidaws. | ||
303 | * | ||
304 | * Set the input/output data address of the itcw (depending on the value of the | ||
305 | * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag | ||
306 | * is set as well. | ||
307 | */ | ||
308 | void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal) | ||
309 | { | ||
310 | tcw_set_data(itcw->tcw, addr, use_tidal); | ||
311 | } | ||
312 | EXPORT_SYMBOL(itcw_set_data); | ||
313 | |||
314 | /** | ||
315 | * itcw_finalize - calculate length and count fields of the itcw | ||
316 | * @itcw: address of the itcw | ||
317 | * | ||
318 | * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb. | ||
319 | * In case input- or output-tida is used, the tidaw-list must be stored in | ||
320 | * continuous storage (no ttic). The tcal field in the tccb must be | ||
321 | * up-to-date. | ||
322 | */ | ||
323 | void itcw_finalize(struct itcw *itcw) | ||
324 | { | ||
325 | tcw_finalize(itcw->tcw, itcw->num_tidaws); | ||
326 | } | ||
327 | EXPORT_SYMBOL(itcw_finalize); | ||
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 445cf364e461..2bf36e14b102 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -2082,7 +2082,6 @@ qdio_timeout_handler(struct ccw_device *cdev) | |||
2082 | default: | 2082 | default: |
2083 | BUG(); | 2083 | BUG(); |
2084 | } | 2084 | } |
2085 | ccw_device_set_timeout(cdev, 0); | ||
2086 | wake_up(&cdev->private->wait_q); | 2085 | wake_up(&cdev->private->wait_q); |
2087 | } | 2086 | } |
2088 | 2087 | ||
@@ -2121,6 +2120,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
2121 | case -EIO: | 2120 | case -EIO: |
2122 | QDIO_PRINT_ERR("i/o error on device %s\n", | 2121 | QDIO_PRINT_ERR("i/o error on device %s\n", |
2123 | cdev->dev.bus_id); | 2122 | cdev->dev.bus_id); |
2123 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | ||
2124 | wake_up(&cdev->private->wait_q); | ||
2124 | return; | 2125 | return; |
2125 | case -ETIMEDOUT: | 2126 | case -ETIMEDOUT: |
2126 | qdio_timeout_handler(cdev); | 2127 | qdio_timeout_handler(cdev); |
@@ -2139,8 +2140,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
2139 | QDIO_DBF_TEXT4(0, trace, dbf_text); | 2140 | QDIO_DBF_TEXT4(0, trace, dbf_text); |
2140 | #endif /* CONFIG_QDIO_DEBUG */ | 2141 | #endif /* CONFIG_QDIO_DEBUG */ |
2141 | 2142 | ||
2142 | cstat = irb->scsw.cstat; | 2143 | cstat = irb->scsw.cmd.cstat; |
2143 | dstat = irb->scsw.dstat; | 2144 | dstat = irb->scsw.cmd.dstat; |
2144 | 2145 | ||
2145 | switch (irq_ptr->state) { | 2146 | switch (irq_ptr->state) { |
2146 | case QDIO_IRQ_STATE_INACTIVE: | 2147 | case QDIO_IRQ_STATE_INACTIVE: |
@@ -2353,9 +2354,6 @@ tiqdio_check_chsc_availability(void) | |||
2353 | { | 2354 | { |
2354 | char dbf_text[15]; | 2355 | char dbf_text[15]; |
2355 | 2356 | ||
2356 | if (!css_characteristics_avail) | ||
2357 | return -EIO; | ||
2358 | |||
2359 | /* Check for bit 41. */ | 2357 | /* Check for bit 41. */ |
2360 | if (!css_general_characteristics.aif) { | 2358 | if (!css_general_characteristics.aif) { |
2361 | QDIO_PRINT_WARN("Adapter interruption facility not " \ | 2359 | QDIO_PRINT_WARN("Adapter interruption facility not " \ |
@@ -2667,12 +2665,12 @@ qdio_shutdown(struct ccw_device *cdev, int how) | |||
2667 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 2665 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); |
2668 | } else if (rc == 0) { | 2666 | } else if (rc == 0) { |
2669 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); | 2667 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); |
2670 | ccw_device_set_timeout(cdev, timeout); | ||
2671 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); | 2668 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); |
2672 | 2669 | ||
2673 | wait_event(cdev->private->wait_q, | 2670 | wait_event_interruptible_timeout(cdev->private->wait_q, |
2674 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || | 2671 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || |
2675 | irq_ptr->state == QDIO_IRQ_STATE_ERR); | 2672 | irq_ptr->state == QDIO_IRQ_STATE_ERR, |
2673 | timeout); | ||
2676 | } else { | 2674 | } else { |
2677 | QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " | 2675 | QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " |
2678 | "device %s\n", result, cdev->dev.bus_id); | 2676 | "device %s\n", result, cdev->dev.bus_id); |
@@ -2692,7 +2690,6 @@ qdio_shutdown(struct ccw_device *cdev, int how) | |||
2692 | 2690 | ||
2693 | /* Ignore errors. */ | 2691 | /* Ignore errors. */ |
2694 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | 2692 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); |
2695 | ccw_device_set_timeout(cdev, 0); | ||
2696 | out: | 2693 | out: |
2697 | up(&irq_ptr->setting_up_sema); | 2694 | up(&irq_ptr->setting_up_sema); |
2698 | return result; | 2695 | return result; |
@@ -2907,13 +2904,10 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) | |||
2907 | QDIO_DBF_TEXT0(0,setup,dbf_text); | 2904 | QDIO_DBF_TEXT0(0,setup,dbf_text); |
2908 | QDIO_DBF_TEXT0(0,trace,dbf_text); | 2905 | QDIO_DBF_TEXT0(0,trace,dbf_text); |
2909 | 2906 | ||
2910 | if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { | 2907 | if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) |
2911 | ccw_device_set_timeout(cdev, 0); | ||
2912 | return; | 2908 | return; |
2913 | } | ||
2914 | 2909 | ||
2915 | qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); | 2910 | qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); |
2916 | ccw_device_set_timeout(cdev, 0); | ||
2917 | } | 2911 | } |
2918 | 2912 | ||
2919 | int | 2913 | int |
@@ -3196,8 +3190,6 @@ qdio_establish(struct qdio_initialize *init_data) | |||
3196 | irq_ptr->schid.ssid, irq_ptr->schid.sch_no, | 3190 | irq_ptr->schid.ssid, irq_ptr->schid.sch_no, |
3197 | result, result2); | 3191 | result, result2); |
3198 | result=result2; | 3192 | result=result2; |
3199 | if (result) | ||
3200 | ccw_device_set_timeout(cdev, 0); | ||
3201 | } | 3193 | } |
3202 | 3194 | ||
3203 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); | 3195 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); |
@@ -3279,7 +3271,6 @@ qdio_activate(struct ccw_device *cdev, int flags) | |||
3279 | 3271 | ||
3280 | spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); | 3272 | spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); |
3281 | 3273 | ||
3282 | ccw_device_set_timeout(cdev, 0); | ||
3283 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); | 3274 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); |
3284 | result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, | 3275 | result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, |
3285 | 0, DOIO_DENY_PREFETCH); | 3276 | 0, DOIO_DENY_PREFETCH); |
@@ -3722,7 +3713,8 @@ tiqdio_register_thinints(void) | |||
3722 | char dbf_text[20]; | 3713 | char dbf_text[20]; |
3723 | 3714 | ||
3724 | tiqdio_ind = | 3715 | tiqdio_ind = |
3725 | s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL); | 3716 | s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL, |
3717 | TIQDIO_THININT_ISC); | ||
3726 | if (IS_ERR(tiqdio_ind)) { | 3718 | if (IS_ERR(tiqdio_ind)) { |
3727 | sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); | 3719 | sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); |
3728 | QDIO_DBF_TEXT0(0,setup,dbf_text); | 3720 | QDIO_DBF_TEXT0(0,setup,dbf_text); |
@@ -3738,7 +3730,8 @@ static void | |||
3738 | tiqdio_unregister_thinints(void) | 3730 | tiqdio_unregister_thinints(void) |
3739 | { | 3731 | { |
3740 | if (tiqdio_ind) | 3732 | if (tiqdio_ind) |
3741 | s390_unregister_adapter_interrupt(tiqdio_ind); | 3733 | s390_unregister_adapter_interrupt(tiqdio_ind, |
3734 | TIQDIO_THININT_ISC); | ||
3742 | } | 3735 | } |
3743 | 3736 | ||
3744 | static int | 3737 | static int |
@@ -3899,6 +3892,7 @@ init_QDIO(void) | |||
3899 | qdio_mempool_alloc, | 3892 | qdio_mempool_alloc, |
3900 | qdio_mempool_free, NULL); | 3893 | qdio_mempool_free, NULL); |
3901 | 3894 | ||
3895 | isc_register(QDIO_AIRQ_ISC); | ||
3902 | if (tiqdio_check_chsc_availability()) | 3896 | if (tiqdio_check_chsc_availability()) |
3903 | QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); | 3897 | QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); |
3904 | 3898 | ||
@@ -3911,6 +3905,7 @@ static void __exit | |||
3911 | cleanup_QDIO(void) | 3905 | cleanup_QDIO(void) |
3912 | { | 3906 | { |
3913 | tiqdio_unregister_thinints(); | 3907 | tiqdio_unregister_thinints(); |
3908 | isc_unregister(QDIO_AIRQ_ISC); | ||
3914 | qdio_remove_procfs_entry(); | 3909 | qdio_remove_procfs_entry(); |
3915 | qdio_release_qdio_memory(); | 3910 | qdio_release_qdio_memory(); |
3916 | qdio_unregister_dbf_views(); | 3911 | qdio_unregister_dbf_views(); |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index c3df6b2c38b7..7656081a24d2 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -2,8 +2,8 @@ | |||
2 | #define _CIO_QDIO_H | 2 | #define _CIO_QDIO_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | 5 | #include <asm/isc.h> | |
6 | #include "schid.h" | 6 | #include <asm/schid.h> |
7 | 7 | ||
8 | #ifdef CONFIG_QDIO_DEBUG | 8 | #ifdef CONFIG_QDIO_DEBUG |
9 | #define QDIO_VERBOSE_LEVEL 9 | 9 | #define QDIO_VERBOSE_LEVEL 9 |
@@ -26,7 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | #define IQDIO_FILL_LEVEL_TO_POLL 4 | 27 | #define IQDIO_FILL_LEVEL_TO_POLL 4 |
28 | 28 | ||
29 | #define TIQDIO_THININT_ISC 3 | 29 | #define TIQDIO_THININT_ISC QDIO_AIRQ_ISC |
30 | #define TIQDIO_DELAY_TARGET 0 | 30 | #define TIQDIO_DELAY_TARGET 0 |
31 | #define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ | 31 | #define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ |
32 | #define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ | 32 | #define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ |
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h deleted file mode 100644 index 54328fec5ade..000000000000 --- a/drivers/s390/cio/schid.h +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | #ifndef S390_SCHID_H | ||
2 | #define S390_SCHID_H | ||
3 | |||
4 | struct subchannel_id { | ||
5 | __u32 reserved:13; | ||
6 | __u32 ssid:2; | ||
7 | __u32 one:1; | ||
8 | __u32 sch_no:16; | ||
9 | } __attribute__ ((packed,aligned(4))); | ||
10 | |||
11 | |||
12 | /* Helper function for sane state of pre-allocated subchannel_id. */ | ||
13 | static inline void | ||
14 | init_subchannel_id(struct subchannel_id *schid) | ||
15 | { | ||
16 | memset(schid, 0, sizeof(struct subchannel_id)); | ||
17 | schid->one = 1; | ||
18 | } | ||
19 | |||
20 | static inline int | ||
21 | schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2) | ||
22 | { | ||
23 | return !memcmp(schid1, schid2, sizeof(struct subchannel_id)); | ||
24 | } | ||
25 | |||
26 | #endif /* S390_SCHID_H */ | ||
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c new file mode 100644 index 000000000000..f8da25ab576d --- /dev/null +++ b/drivers/s390/cio/scsw.c | |||
@@ -0,0 +1,843 @@ | |||
1 | /* | ||
2 | * Helper functions for scsw access. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <asm/cio.h> | ||
11 | #include "css.h" | ||
12 | #include "chsc.h" | ||
13 | |||
14 | /** | ||
15 | * scsw_is_tm - check for transport mode scsw | ||
16 | * @scsw: pointer to scsw | ||
17 | * | ||
18 | * Return non-zero if the specified scsw is a transport mode scsw, zero | ||
19 | * otherwise. | ||
20 | */ | ||
21 | int scsw_is_tm(union scsw *scsw) | ||
22 | { | ||
23 | return css_general_characteristics.fcx && (scsw->tm.x == 1); | ||
24 | } | ||
25 | EXPORT_SYMBOL(scsw_is_tm); | ||
26 | |||
27 | /** | ||
28 | * scsw_key - return scsw key field | ||
29 | * @scsw: pointer to scsw | ||
30 | * | ||
31 | * Return the value of the key field of the specified scsw, regardless of | ||
32 | * whether it is a transport mode or command mode scsw. | ||
33 | */ | ||
34 | u32 scsw_key(union scsw *scsw) | ||
35 | { | ||
36 | if (scsw_is_tm(scsw)) | ||
37 | return scsw->tm.key; | ||
38 | else | ||
39 | return scsw->cmd.key; | ||
40 | } | ||
41 | EXPORT_SYMBOL(scsw_key); | ||
42 | |||
43 | /** | ||
44 | * scsw_eswf - return scsw eswf field | ||
45 | * @scsw: pointer to scsw | ||
46 | * | ||
47 | * Return the value of the eswf field of the specified scsw, regardless of | ||
48 | * whether it is a transport mode or command mode scsw. | ||
49 | */ | ||
50 | u32 scsw_eswf(union scsw *scsw) | ||
51 | { | ||
52 | if (scsw_is_tm(scsw)) | ||
53 | return scsw->tm.eswf; | ||
54 | else | ||
55 | return scsw->cmd.eswf; | ||
56 | } | ||
57 | EXPORT_SYMBOL(scsw_eswf); | ||
58 | |||
59 | /** | ||
60 | * scsw_cc - return scsw cc field | ||
61 | * @scsw: pointer to scsw | ||
62 | * | ||
63 | * Return the value of the cc field of the specified scsw, regardless of | ||
64 | * whether it is a transport mode or command mode scsw. | ||
65 | */ | ||
66 | u32 scsw_cc(union scsw *scsw) | ||
67 | { | ||
68 | if (scsw_is_tm(scsw)) | ||
69 | return scsw->tm.cc; | ||
70 | else | ||
71 | return scsw->cmd.cc; | ||
72 | } | ||
73 | EXPORT_SYMBOL(scsw_cc); | ||
74 | |||
75 | /** | ||
76 | * scsw_ectl - return scsw ectl field | ||
77 | * @scsw: pointer to scsw | ||
78 | * | ||
79 | * Return the value of the ectl field of the specified scsw, regardless of | ||
80 | * whether it is a transport mode or command mode scsw. | ||
81 | */ | ||
82 | u32 scsw_ectl(union scsw *scsw) | ||
83 | { | ||
84 | if (scsw_is_tm(scsw)) | ||
85 | return scsw->tm.ectl; | ||
86 | else | ||
87 | return scsw->cmd.ectl; | ||
88 | } | ||
89 | EXPORT_SYMBOL(scsw_ectl); | ||
90 | |||
91 | /** | ||
92 | * scsw_pno - return scsw pno field | ||
93 | * @scsw: pointer to scsw | ||
94 | * | ||
95 | * Return the value of the pno field of the specified scsw, regardless of | ||
96 | * whether it is a transport mode or command mode scsw. | ||
97 | */ | ||
98 | u32 scsw_pno(union scsw *scsw) | ||
99 | { | ||
100 | if (scsw_is_tm(scsw)) | ||
101 | return scsw->tm.pno; | ||
102 | else | ||
103 | return scsw->cmd.pno; | ||
104 | } | ||
105 | EXPORT_SYMBOL(scsw_pno); | ||
106 | |||
107 | /** | ||
108 | * scsw_fctl - return scsw fctl field | ||
109 | * @scsw: pointer to scsw | ||
110 | * | ||
111 | * Return the value of the fctl field of the specified scsw, regardless of | ||
112 | * whether it is a transport mode or command mode scsw. | ||
113 | */ | ||
114 | u32 scsw_fctl(union scsw *scsw) | ||
115 | { | ||
116 | if (scsw_is_tm(scsw)) | ||
117 | return scsw->tm.fctl; | ||
118 | else | ||
119 | return scsw->cmd.fctl; | ||
120 | } | ||
121 | EXPORT_SYMBOL(scsw_fctl); | ||
122 | |||
123 | /** | ||
124 | * scsw_actl - return scsw actl field | ||
125 | * @scsw: pointer to scsw | ||
126 | * | ||
127 | * Return the value of the actl field of the specified scsw, regardless of | ||
128 | * whether it is a transport mode or command mode scsw. | ||
129 | */ | ||
130 | u32 scsw_actl(union scsw *scsw) | ||
131 | { | ||
132 | if (scsw_is_tm(scsw)) | ||
133 | return scsw->tm.actl; | ||
134 | else | ||
135 | return scsw->cmd.actl; | ||
136 | } | ||
137 | EXPORT_SYMBOL(scsw_actl); | ||
138 | |||
139 | /** | ||
140 | * scsw_stctl - return scsw stctl field | ||
141 | * @scsw: pointer to scsw | ||
142 | * | ||
143 | * Return the value of the stctl field of the specified scsw, regardless of | ||
144 | * whether it is a transport mode or command mode scsw. | ||
145 | */ | ||
146 | u32 scsw_stctl(union scsw *scsw) | ||
147 | { | ||
148 | if (scsw_is_tm(scsw)) | ||
149 | return scsw->tm.stctl; | ||
150 | else | ||
151 | return scsw->cmd.stctl; | ||
152 | } | ||
153 | EXPORT_SYMBOL(scsw_stctl); | ||
154 | |||
155 | /** | ||
156 | * scsw_dstat - return scsw dstat field | ||
157 | * @scsw: pointer to scsw | ||
158 | * | ||
159 | * Return the value of the dstat field of the specified scsw, regardless of | ||
160 | * whether it is a transport mode or command mode scsw. | ||
161 | */ | ||
162 | u32 scsw_dstat(union scsw *scsw) | ||
163 | { | ||
164 | if (scsw_is_tm(scsw)) | ||
165 | return scsw->tm.dstat; | ||
166 | else | ||
167 | return scsw->cmd.dstat; | ||
168 | } | ||
169 | EXPORT_SYMBOL(scsw_dstat); | ||
170 | |||
171 | /** | ||
172 | * scsw_cstat - return scsw cstat field | ||
173 | * @scsw: pointer to scsw | ||
174 | * | ||
175 | * Return the value of the cstat field of the specified scsw, regardless of | ||
176 | * whether it is a transport mode or command mode scsw. | ||
177 | */ | ||
178 | u32 scsw_cstat(union scsw *scsw) | ||
179 | { | ||
180 | if (scsw_is_tm(scsw)) | ||
181 | return scsw->tm.cstat; | ||
182 | else | ||
183 | return scsw->cmd.cstat; | ||
184 | } | ||
185 | EXPORT_SYMBOL(scsw_cstat); | ||
186 | |||
187 | /** | ||
188 | * scsw_cmd_is_valid_key - check key field validity | ||
189 | * @scsw: pointer to scsw | ||
190 | * | ||
191 | * Return non-zero if the key field of the specified command mode scsw is | ||
192 | * valid, zero otherwise. | ||
193 | */ | ||
194 | int scsw_cmd_is_valid_key(union scsw *scsw) | ||
195 | { | ||
196 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
197 | } | ||
198 | EXPORT_SYMBOL(scsw_cmd_is_valid_key); | ||
199 | |||
200 | /** | ||
201 | * scsw_cmd_is_valid_sctl - check fctl field validity | ||
202 | * @scsw: pointer to scsw | ||
203 | * | ||
204 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
205 | * valid, zero otherwise. | ||
206 | */ | ||
207 | int scsw_cmd_is_valid_sctl(union scsw *scsw) | ||
208 | { | ||
209 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
210 | } | ||
211 | EXPORT_SYMBOL(scsw_cmd_is_valid_sctl); | ||
212 | |||
213 | /** | ||
214 | * scsw_cmd_is_valid_eswf - check eswf field validity | ||
215 | * @scsw: pointer to scsw | ||
216 | * | ||
217 | * Return non-zero if the eswf field of the specified command mode scsw is | ||
218 | * valid, zero otherwise. | ||
219 | */ | ||
220 | int scsw_cmd_is_valid_eswf(union scsw *scsw) | ||
221 | { | ||
222 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
223 | } | ||
224 | EXPORT_SYMBOL(scsw_cmd_is_valid_eswf); | ||
225 | |||
226 | /** | ||
227 | * scsw_cmd_is_valid_cc - check cc field validity | ||
228 | * @scsw: pointer to scsw | ||
229 | * | ||
230 | * Return non-zero if the cc field of the specified command mode scsw is | ||
231 | * valid, zero otherwise. | ||
232 | */ | ||
233 | int scsw_cmd_is_valid_cc(union scsw *scsw) | ||
234 | { | ||
235 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
236 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
237 | } | ||
238 | EXPORT_SYMBOL(scsw_cmd_is_valid_cc); | ||
239 | |||
240 | /** | ||
241 | * scsw_cmd_is_valid_fmt - check fmt field validity | ||
242 | * @scsw: pointer to scsw | ||
243 | * | ||
244 | * Return non-zero if the fmt field of the specified command mode scsw is | ||
245 | * valid, zero otherwise. | ||
246 | */ | ||
247 | int scsw_cmd_is_valid_fmt(union scsw *scsw) | ||
248 | { | ||
249 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
250 | } | ||
251 | EXPORT_SYMBOL(scsw_cmd_is_valid_fmt); | ||
252 | |||
253 | /** | ||
254 | * scsw_cmd_is_valid_pfch - check pfch field validity | ||
255 | * @scsw: pointer to scsw | ||
256 | * | ||
257 | * Return non-zero if the pfch field of the specified command mode scsw is | ||
258 | * valid, zero otherwise. | ||
259 | */ | ||
260 | int scsw_cmd_is_valid_pfch(union scsw *scsw) | ||
261 | { | ||
262 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
263 | } | ||
264 | EXPORT_SYMBOL(scsw_cmd_is_valid_pfch); | ||
265 | |||
266 | /** | ||
267 | * scsw_cmd_is_valid_isic - check isic field validity | ||
268 | * @scsw: pointer to scsw | ||
269 | * | ||
270 | * Return non-zero if the isic field of the specified command mode scsw is | ||
271 | * valid, zero otherwise. | ||
272 | */ | ||
273 | int scsw_cmd_is_valid_isic(union scsw *scsw) | ||
274 | { | ||
275 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
276 | } | ||
277 | EXPORT_SYMBOL(scsw_cmd_is_valid_isic); | ||
278 | |||
279 | /** | ||
280 | * scsw_cmd_is_valid_alcc - check alcc field validity | ||
281 | * @scsw: pointer to scsw | ||
282 | * | ||
283 | * Return non-zero if the alcc field of the specified command mode scsw is | ||
284 | * valid, zero otherwise. | ||
285 | */ | ||
286 | int scsw_cmd_is_valid_alcc(union scsw *scsw) | ||
287 | { | ||
288 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
289 | } | ||
290 | EXPORT_SYMBOL(scsw_cmd_is_valid_alcc); | ||
291 | |||
292 | /** | ||
293 | * scsw_cmd_is_valid_ssi - check ssi field validity | ||
294 | * @scsw: pointer to scsw | ||
295 | * | ||
296 | * Return non-zero if the ssi field of the specified command mode scsw is | ||
297 | * valid, zero otherwise. | ||
298 | */ | ||
299 | int scsw_cmd_is_valid_ssi(union scsw *scsw) | ||
300 | { | ||
301 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
302 | } | ||
303 | EXPORT_SYMBOL(scsw_cmd_is_valid_ssi); | ||
304 | |||
305 | /** | ||
306 | * scsw_cmd_is_valid_zcc - check zcc field validity | ||
307 | * @scsw: pointer to scsw | ||
308 | * | ||
309 | * Return non-zero if the zcc field of the specified command mode scsw is | ||
310 | * valid, zero otherwise. | ||
311 | */ | ||
312 | int scsw_cmd_is_valid_zcc(union scsw *scsw) | ||
313 | { | ||
314 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
315 | (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); | ||
316 | } | ||
317 | EXPORT_SYMBOL(scsw_cmd_is_valid_zcc); | ||
318 | |||
319 | /** | ||
320 | * scsw_cmd_is_valid_ectl - check ectl field validity | ||
321 | * @scsw: pointer to scsw | ||
322 | * | ||
323 | * Return non-zero if the ectl field of the specified command mode scsw is | ||
324 | * valid, zero otherwise. | ||
325 | */ | ||
326 | int scsw_cmd_is_valid_ectl(union scsw *scsw) | ||
327 | { | ||
328 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
329 | !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
330 | (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); | ||
331 | } | ||
332 | EXPORT_SYMBOL(scsw_cmd_is_valid_ectl); | ||
333 | |||
334 | /** | ||
335 | * scsw_cmd_is_valid_pno - check pno field validity | ||
336 | * @scsw: pointer to scsw | ||
337 | * | ||
338 | * Return non-zero if the pno field of the specified command mode scsw is | ||
339 | * valid, zero otherwise. | ||
340 | */ | ||
341 | int scsw_cmd_is_valid_pno(union scsw *scsw) | ||
342 | { | ||
343 | return (scsw->cmd.fctl != 0) && | ||
344 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
345 | (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || | ||
346 | ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
347 | (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); | ||
348 | } | ||
349 | EXPORT_SYMBOL(scsw_cmd_is_valid_pno); | ||
350 | |||
351 | /** | ||
352 | * scsw_cmd_is_valid_fctl - check fctl field validity | ||
353 | * @scsw: pointer to scsw | ||
354 | * | ||
355 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
356 | * valid, zero otherwise. | ||
357 | */ | ||
358 | int scsw_cmd_is_valid_fctl(union scsw *scsw) | ||
359 | { | ||
360 | /* Only valid if pmcw.dnv == 1*/ | ||
361 | return 1; | ||
362 | } | ||
363 | EXPORT_SYMBOL(scsw_cmd_is_valid_fctl); | ||
364 | |||
365 | /** | ||
366 | * scsw_cmd_is_valid_actl - check actl field validity | ||
367 | * @scsw: pointer to scsw | ||
368 | * | ||
369 | * Return non-zero if the actl field of the specified command mode scsw is | ||
370 | * valid, zero otherwise. | ||
371 | */ | ||
372 | int scsw_cmd_is_valid_actl(union scsw *scsw) | ||
373 | { | ||
374 | /* Only valid if pmcw.dnv == 1*/ | ||
375 | return 1; | ||
376 | } | ||
377 | EXPORT_SYMBOL(scsw_cmd_is_valid_actl); | ||
378 | |||
379 | /** | ||
380 | * scsw_cmd_is_valid_stctl - check stctl field validity | ||
381 | * @scsw: pointer to scsw | ||
382 | * | ||
383 | * Return non-zero if the stctl field of the specified command mode scsw is | ||
384 | * valid, zero otherwise. | ||
385 | */ | ||
386 | int scsw_cmd_is_valid_stctl(union scsw *scsw) | ||
387 | { | ||
388 | /* Only valid if pmcw.dnv == 1*/ | ||
389 | return 1; | ||
390 | } | ||
391 | EXPORT_SYMBOL(scsw_cmd_is_valid_stctl); | ||
392 | |||
393 | /** | ||
394 | * scsw_cmd_is_valid_dstat - check dstat field validity | ||
395 | * @scsw: pointer to scsw | ||
396 | * | ||
397 | * Return non-zero if the dstat field of the specified command mode scsw is | ||
398 | * valid, zero otherwise. | ||
399 | */ | ||
400 | int scsw_cmd_is_valid_dstat(union scsw *scsw) | ||
401 | { | ||
402 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
403 | (scsw->cmd.cc != 3); | ||
404 | } | ||
405 | EXPORT_SYMBOL(scsw_cmd_is_valid_dstat); | ||
406 | |||
407 | /** | ||
408 | * scsw_cmd_is_valid_cstat - check cstat field validity | ||
409 | * @scsw: pointer to scsw | ||
410 | * | ||
411 | * Return non-zero if the cstat field of the specified command mode scsw is | ||
412 | * valid, zero otherwise. | ||
413 | */ | ||
414 | int scsw_cmd_is_valid_cstat(union scsw *scsw) | ||
415 | { | ||
416 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
417 | (scsw->cmd.cc != 3); | ||
418 | } | ||
419 | EXPORT_SYMBOL(scsw_cmd_is_valid_cstat); | ||
420 | |||
421 | /** | ||
422 | * scsw_tm_is_valid_key - check key field validity | ||
423 | * @scsw: pointer to scsw | ||
424 | * | ||
425 | * Return non-zero if the key field of the specified transport mode scsw is | ||
426 | * valid, zero otherwise. | ||
427 | */ | ||
428 | int scsw_tm_is_valid_key(union scsw *scsw) | ||
429 | { | ||
430 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); | ||
431 | } | ||
432 | EXPORT_SYMBOL(scsw_tm_is_valid_key); | ||
433 | |||
434 | /** | ||
435 | * scsw_tm_is_valid_eswf - check eswf field validity | ||
436 | * @scsw: pointer to scsw | ||
437 | * | ||
438 | * Return non-zero if the eswf field of the specified transport mode scsw is | ||
439 | * valid, zero otherwise. | ||
440 | */ | ||
441 | int scsw_tm_is_valid_eswf(union scsw *scsw) | ||
442 | { | ||
443 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
444 | } | ||
445 | EXPORT_SYMBOL(scsw_tm_is_valid_eswf); | ||
446 | |||
447 | /** | ||
448 | * scsw_tm_is_valid_cc - check cc field validity | ||
449 | * @scsw: pointer to scsw | ||
450 | * | ||
451 | * Return non-zero if the cc field of the specified transport mode scsw is | ||
452 | * valid, zero otherwise. | ||
453 | */ | ||
454 | int scsw_tm_is_valid_cc(union scsw *scsw) | ||
455 | { | ||
456 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && | ||
457 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
458 | } | ||
459 | EXPORT_SYMBOL(scsw_tm_is_valid_cc); | ||
460 | |||
461 | /** | ||
462 | * scsw_tm_is_valid_fmt - check fmt field validity | ||
463 | * @scsw: pointer to scsw | ||
464 | * | ||
465 | * Return non-zero if the fmt field of the specified transport mode scsw is | ||
466 | * valid, zero otherwise. | ||
467 | */ | ||
468 | int scsw_tm_is_valid_fmt(union scsw *scsw) | ||
469 | { | ||
470 | return 1; | ||
471 | } | ||
472 | EXPORT_SYMBOL(scsw_tm_is_valid_fmt); | ||
473 | |||
474 | /** | ||
475 | * scsw_tm_is_valid_x - check x field validity | ||
476 | * @scsw: pointer to scsw | ||
477 | * | ||
478 | * Return non-zero if the x field of the specified transport mode scsw is | ||
479 | * valid, zero otherwise. | ||
480 | */ | ||
481 | int scsw_tm_is_valid_x(union scsw *scsw) | ||
482 | { | ||
483 | return 1; | ||
484 | } | ||
485 | EXPORT_SYMBOL(scsw_tm_is_valid_x); | ||
486 | |||
487 | /** | ||
488 | * scsw_tm_is_valid_q - check q field validity | ||
489 | * @scsw: pointer to scsw | ||
490 | * | ||
491 | * Return non-zero if the q field of the specified transport mode scsw is | ||
492 | * valid, zero otherwise. | ||
493 | */ | ||
494 | int scsw_tm_is_valid_q(union scsw *scsw) | ||
495 | { | ||
496 | return 1; | ||
497 | } | ||
498 | EXPORT_SYMBOL(scsw_tm_is_valid_q); | ||
499 | |||
500 | /** | ||
501 | * scsw_tm_is_valid_ectl - check ectl field validity | ||
502 | * @scsw: pointer to scsw | ||
503 | * | ||
504 | * Return non-zero if the ectl field of the specified transport mode scsw is | ||
505 | * valid, zero otherwise. | ||
506 | */ | ||
507 | int scsw_tm_is_valid_ectl(union scsw *scsw) | ||
508 | { | ||
509 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
510 | !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
511 | (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); | ||
512 | } | ||
513 | EXPORT_SYMBOL(scsw_tm_is_valid_ectl); | ||
514 | |||
515 | /** | ||
516 | * scsw_tm_is_valid_pno - check pno field validity | ||
517 | * @scsw: pointer to scsw | ||
518 | * | ||
519 | * Return non-zero if the pno field of the specified transport mode scsw is | ||
520 | * valid, zero otherwise. | ||
521 | */ | ||
522 | int scsw_tm_is_valid_pno(union scsw *scsw) | ||
523 | { | ||
524 | return (scsw->tm.fctl != 0) && | ||
525 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
526 | (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || | ||
527 | ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
528 | (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); | ||
529 | } | ||
530 | EXPORT_SYMBOL(scsw_tm_is_valid_pno); | ||
531 | |||
532 | /** | ||
533 | * scsw_tm_is_valid_fctl - check fctl field validity | ||
534 | * @scsw: pointer to scsw | ||
535 | * | ||
536 | * Return non-zero if the fctl field of the specified transport mode scsw is | ||
537 | * valid, zero otherwise. | ||
538 | */ | ||
539 | int scsw_tm_is_valid_fctl(union scsw *scsw) | ||
540 | { | ||
541 | /* Only valid if pmcw.dnv == 1*/ | ||
542 | return 1; | ||
543 | } | ||
544 | EXPORT_SYMBOL(scsw_tm_is_valid_fctl); | ||
545 | |||
546 | /** | ||
547 | * scsw_tm_is_valid_actl - check actl field validity | ||
548 | * @scsw: pointer to scsw | ||
549 | * | ||
550 | * Return non-zero if the actl field of the specified transport mode scsw is | ||
551 | * valid, zero otherwise. | ||
552 | */ | ||
553 | int scsw_tm_is_valid_actl(union scsw *scsw) | ||
554 | { | ||
555 | /* Only valid if pmcw.dnv == 1*/ | ||
556 | return 1; | ||
557 | } | ||
558 | EXPORT_SYMBOL(scsw_tm_is_valid_actl); | ||
559 | |||
560 | /** | ||
561 | * scsw_tm_is_valid_stctl - check stctl field validity | ||
562 | * @scsw: pointer to scsw | ||
563 | * | ||
564 | * Return non-zero if the stctl field of the specified transport mode scsw is | ||
565 | * valid, zero otherwise. | ||
566 | */ | ||
567 | int scsw_tm_is_valid_stctl(union scsw *scsw) | ||
568 | { | ||
569 | /* Only valid if pmcw.dnv == 1*/ | ||
570 | return 1; | ||
571 | } | ||
572 | EXPORT_SYMBOL(scsw_tm_is_valid_stctl); | ||
573 | |||
574 | /** | ||
575 | * scsw_tm_is_valid_dstat - check dstat field validity | ||
576 | * @scsw: pointer to scsw | ||
577 | * | ||
578 | * Return non-zero if the dstat field of the specified transport mode scsw is | ||
579 | * valid, zero otherwise. | ||
580 | */ | ||
581 | int scsw_tm_is_valid_dstat(union scsw *scsw) | ||
582 | { | ||
583 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
584 | (scsw->tm.cc != 3); | ||
585 | } | ||
586 | EXPORT_SYMBOL(scsw_tm_is_valid_dstat); | ||
587 | |||
588 | /** | ||
589 | * scsw_tm_is_valid_cstat - check cstat field validity | ||
590 | * @scsw: pointer to scsw | ||
591 | * | ||
592 | * Return non-zero if the cstat field of the specified transport mode scsw is | ||
593 | * valid, zero otherwise. | ||
594 | */ | ||
595 | int scsw_tm_is_valid_cstat(union scsw *scsw) | ||
596 | { | ||
597 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
598 | (scsw->tm.cc != 3); | ||
599 | } | ||
600 | EXPORT_SYMBOL(scsw_tm_is_valid_cstat); | ||
601 | |||
602 | /** | ||
603 | * scsw_tm_is_valid_fcxs - check fcxs field validity | ||
604 | * @scsw: pointer to scsw | ||
605 | * | ||
606 | * Return non-zero if the fcxs field of the specified transport mode scsw is | ||
607 | * valid, zero otherwise. | ||
608 | */ | ||
609 | int scsw_tm_is_valid_fcxs(union scsw *scsw) | ||
610 | { | ||
611 | return 1; | ||
612 | } | ||
613 | EXPORT_SYMBOL(scsw_tm_is_valid_fcxs); | ||
614 | |||
615 | /** | ||
616 | * scsw_tm_is_valid_schxs - check schxs field validity | ||
617 | * @scsw: pointer to scsw | ||
618 | * | ||
619 | * Return non-zero if the schxs field of the specified transport mode scsw is | ||
620 | * valid, zero otherwise. | ||
621 | */ | ||
622 | int scsw_tm_is_valid_schxs(union scsw *scsw) | ||
623 | { | ||
624 | return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | | ||
625 | SCHN_STAT_INTF_CTRL_CHK | | ||
626 | SCHN_STAT_PROT_CHECK | | ||
627 | SCHN_STAT_CHN_DATA_CHK)); | ||
628 | } | ||
629 | EXPORT_SYMBOL(scsw_tm_is_valid_schxs); | ||
630 | |||
631 | /** | ||
632 | * scsw_is_valid_actl - check actl field validity | ||
633 | * @scsw: pointer to scsw | ||
634 | * | ||
635 | * Return non-zero if the actl field of the specified scsw is valid, | ||
636 | * regardless of whether it is a transport mode or command mode scsw. | ||
637 | * Return zero if the field does not contain a valid value. | ||
638 | */ | ||
639 | int scsw_is_valid_actl(union scsw *scsw) | ||
640 | { | ||
641 | if (scsw_is_tm(scsw)) | ||
642 | return scsw_tm_is_valid_actl(scsw); | ||
643 | else | ||
644 | return scsw_cmd_is_valid_actl(scsw); | ||
645 | } | ||
646 | EXPORT_SYMBOL(scsw_is_valid_actl); | ||
647 | |||
648 | /** | ||
649 | * scsw_is_valid_cc - check cc field validity | ||
650 | * @scsw: pointer to scsw | ||
651 | * | ||
652 | * Return non-zero if the cc field of the specified scsw is valid, | ||
653 | * regardless of whether it is a transport mode or command mode scsw. | ||
654 | * Return zero if the field does not contain a valid value. | ||
655 | */ | ||
656 | int scsw_is_valid_cc(union scsw *scsw) | ||
657 | { | ||
658 | if (scsw_is_tm(scsw)) | ||
659 | return scsw_tm_is_valid_cc(scsw); | ||
660 | else | ||
661 | return scsw_cmd_is_valid_cc(scsw); | ||
662 | } | ||
663 | EXPORT_SYMBOL(scsw_is_valid_cc); | ||
664 | |||
665 | /** | ||
666 | * scsw_is_valid_cstat - check cstat field validity | ||
667 | * @scsw: pointer to scsw | ||
668 | * | ||
669 | * Return non-zero if the cstat field of the specified scsw is valid, | ||
670 | * regardless of whether it is a transport mode or command mode scsw. | ||
671 | * Return zero if the field does not contain a valid value. | ||
672 | */ | ||
673 | int scsw_is_valid_cstat(union scsw *scsw) | ||
674 | { | ||
675 | if (scsw_is_tm(scsw)) | ||
676 | return scsw_tm_is_valid_cstat(scsw); | ||
677 | else | ||
678 | return scsw_cmd_is_valid_cstat(scsw); | ||
679 | } | ||
680 | EXPORT_SYMBOL(scsw_is_valid_cstat); | ||
681 | |||
682 | /** | ||
683 | * scsw_is_valid_dstat - check dstat field validity | ||
684 | * @scsw: pointer to scsw | ||
685 | * | ||
686 | * Return non-zero if the dstat field of the specified scsw is valid, | ||
687 | * regardless of whether it is a transport mode or command mode scsw. | ||
688 | * Return zero if the field does not contain a valid value. | ||
689 | */ | ||
690 | int scsw_is_valid_dstat(union scsw *scsw) | ||
691 | { | ||
692 | if (scsw_is_tm(scsw)) | ||
693 | return scsw_tm_is_valid_dstat(scsw); | ||
694 | else | ||
695 | return scsw_cmd_is_valid_dstat(scsw); | ||
696 | } | ||
697 | EXPORT_SYMBOL(scsw_is_valid_dstat); | ||
698 | |||
699 | /** | ||
700 | * scsw_is_valid_ectl - check ectl field validity | ||
701 | * @scsw: pointer to scsw | ||
702 | * | ||
703 | * Return non-zero if the ectl field of the specified scsw is valid, | ||
704 | * regardless of whether it is a transport mode or command mode scsw. | ||
705 | * Return zero if the field does not contain a valid value. | ||
706 | */ | ||
707 | int scsw_is_valid_ectl(union scsw *scsw) | ||
708 | { | ||
709 | if (scsw_is_tm(scsw)) | ||
710 | return scsw_tm_is_valid_ectl(scsw); | ||
711 | else | ||
712 | return scsw_cmd_is_valid_ectl(scsw); | ||
713 | } | ||
714 | EXPORT_SYMBOL(scsw_is_valid_ectl); | ||
715 | |||
716 | /** | ||
717 | * scsw_is_valid_eswf - check eswf field validity | ||
718 | * @scsw: pointer to scsw | ||
719 | * | ||
720 | * Return non-zero if the eswf field of the specified scsw is valid, | ||
721 | * regardless of whether it is a transport mode or command mode scsw. | ||
722 | * Return zero if the field does not contain a valid value. | ||
723 | */ | ||
724 | int scsw_is_valid_eswf(union scsw *scsw) | ||
725 | { | ||
726 | if (scsw_is_tm(scsw)) | ||
727 | return scsw_tm_is_valid_eswf(scsw); | ||
728 | else | ||
729 | return scsw_cmd_is_valid_eswf(scsw); | ||
730 | } | ||
731 | EXPORT_SYMBOL(scsw_is_valid_eswf); | ||
732 | |||
733 | /** | ||
734 | * scsw_is_valid_fctl - check fctl field validity | ||
735 | * @scsw: pointer to scsw | ||
736 | * | ||
737 | * Return non-zero if the fctl field of the specified scsw is valid, | ||
738 | * regardless of whether it is a transport mode or command mode scsw. | ||
739 | * Return zero if the field does not contain a valid value. | ||
740 | */ | ||
741 | int scsw_is_valid_fctl(union scsw *scsw) | ||
742 | { | ||
743 | if (scsw_is_tm(scsw)) | ||
744 | return scsw_tm_is_valid_fctl(scsw); | ||
745 | else | ||
746 | return scsw_cmd_is_valid_fctl(scsw); | ||
747 | } | ||
748 | EXPORT_SYMBOL(scsw_is_valid_fctl); | ||
749 | |||
750 | /** | ||
751 | * scsw_is_valid_key - check key field validity | ||
752 | * @scsw: pointer to scsw | ||
753 | * | ||
754 | * Return non-zero if the key field of the specified scsw is valid, | ||
755 | * regardless of whether it is a transport mode or command mode scsw. | ||
756 | * Return zero if the field does not contain a valid value. | ||
757 | */ | ||
758 | int scsw_is_valid_key(union scsw *scsw) | ||
759 | { | ||
760 | if (scsw_is_tm(scsw)) | ||
761 | return scsw_tm_is_valid_key(scsw); | ||
762 | else | ||
763 | return scsw_cmd_is_valid_key(scsw); | ||
764 | } | ||
765 | EXPORT_SYMBOL(scsw_is_valid_key); | ||
766 | |||
767 | /** | ||
768 | * scsw_is_valid_pno - check pno field validity | ||
769 | * @scsw: pointer to scsw | ||
770 | * | ||
771 | * Return non-zero if the pno field of the specified scsw is valid, | ||
772 | * regardless of whether it is a transport mode or command mode scsw. | ||
773 | * Return zero if the field does not contain a valid value. | ||
774 | */ | ||
775 | int scsw_is_valid_pno(union scsw *scsw) | ||
776 | { | ||
777 | if (scsw_is_tm(scsw)) | ||
778 | return scsw_tm_is_valid_pno(scsw); | ||
779 | else | ||
780 | return scsw_cmd_is_valid_pno(scsw); | ||
781 | } | ||
782 | EXPORT_SYMBOL(scsw_is_valid_pno); | ||
783 | |||
784 | /** | ||
785 | * scsw_is_valid_stctl - check stctl field validity | ||
786 | * @scsw: pointer to scsw | ||
787 | * | ||
788 | * Return non-zero if the stctl field of the specified scsw is valid, | ||
789 | * regardless of whether it is a transport mode or command mode scsw. | ||
790 | * Return zero if the field does not contain a valid value. | ||
791 | */ | ||
792 | int scsw_is_valid_stctl(union scsw *scsw) | ||
793 | { | ||
794 | if (scsw_is_tm(scsw)) | ||
795 | return scsw_tm_is_valid_stctl(scsw); | ||
796 | else | ||
797 | return scsw_cmd_is_valid_stctl(scsw); | ||
798 | } | ||
799 | EXPORT_SYMBOL(scsw_is_valid_stctl); | ||
800 | |||
801 | /** | ||
802 | * scsw_cmd_is_solicited - check for solicited scsw | ||
803 | * @scsw: pointer to scsw | ||
804 | * | ||
805 | * Return non-zero if the command mode scsw indicates that the associated | ||
806 | * status condition is solicited, zero if it is unsolicited. | ||
807 | */ | ||
808 | int scsw_cmd_is_solicited(union scsw *scsw) | ||
809 | { | ||
810 | return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != | ||
811 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
812 | } | ||
813 | EXPORT_SYMBOL(scsw_cmd_is_solicited); | ||
814 | |||
815 | /** | ||
816 | * scsw_tm_is_solicited - check for solicited scsw | ||
817 | * @scsw: pointer to scsw | ||
818 | * | ||
819 | * Return non-zero if the transport mode scsw indicates that the associated | ||
820 | * status condition is solicited, zero if it is unsolicited. | ||
821 | */ | ||
822 | int scsw_tm_is_solicited(union scsw *scsw) | ||
823 | { | ||
824 | return (scsw->tm.cc != 0) || (scsw->tm.stctl != | ||
825 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
826 | } | ||
827 | EXPORT_SYMBOL(scsw_tm_is_solicited); | ||
828 | |||
829 | /** | ||
830 | * scsw_is_solicited - check for solicited scsw | ||
831 | * @scsw: pointer to scsw | ||
832 | * | ||
833 | * Return non-zero if the transport or command mode scsw indicates that the | ||
834 | * associated status condition is solicited, zero if it is unsolicited. | ||
835 | */ | ||
836 | int scsw_is_solicited(union scsw *scsw) | ||
837 | { | ||
838 | if (scsw_is_tm(scsw)) | ||
839 | return scsw_tm_is_solicited(scsw); | ||
840 | else | ||
841 | return scsw_cmd_is_solicited(scsw); | ||
842 | } | ||
843 | EXPORT_SYMBOL(scsw_is_solicited); | ||
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a1ab3e3efd11..62b6b55230d0 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -34,13 +34,15 @@ | |||
34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
35 | #include <asm/s390_rdev.h> | 35 | #include <asm/s390_rdev.h> |
36 | #include <asm/reset.h> | 36 | #include <asm/reset.h> |
37 | #include <linux/hrtimer.h> | ||
38 | #include <linux/ktime.h> | ||
37 | 39 | ||
38 | #include "ap_bus.h" | 40 | #include "ap_bus.h" |
39 | 41 | ||
40 | /* Some prototypes. */ | 42 | /* Some prototypes. */ |
41 | static void ap_scan_bus(struct work_struct *); | 43 | static void ap_scan_bus(struct work_struct *); |
42 | static void ap_poll_all(unsigned long); | 44 | static void ap_poll_all(unsigned long); |
43 | static void ap_poll_timeout(unsigned long); | 45 | static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); |
44 | static int ap_poll_thread_start(void); | 46 | static int ap_poll_thread_start(void); |
45 | static void ap_poll_thread_stop(void); | 47 | static void ap_poll_thread_stop(void); |
46 | static void ap_request_timeout(unsigned long); | 48 | static void ap_request_timeout(unsigned long); |
@@ -80,12 +82,15 @@ static DECLARE_WORK(ap_config_work, ap_scan_bus); | |||
80 | /* | 82 | /* |
81 | * Tasklet & timer for AP request polling. | 83 | * Tasklet & timer for AP request polling. |
82 | */ | 84 | */ |
83 | static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0); | ||
84 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); | 85 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); |
85 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); | 86 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); |
86 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); | 87 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); |
87 | static struct task_struct *ap_poll_kthread = NULL; | 88 | static struct task_struct *ap_poll_kthread = NULL; |
88 | static DEFINE_MUTEX(ap_poll_thread_mutex); | 89 | static DEFINE_MUTEX(ap_poll_thread_mutex); |
90 | static struct hrtimer ap_poll_timer; | ||
91 | /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. | ||
92 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ | ||
93 | static unsigned long long poll_timeout = 250000; | ||
89 | 94 | ||
90 | /** | 95 | /** |
91 | * ap_intructions_available() - Test if AP instructions are available. | 96 | * ap_intructions_available() - Test if AP instructions are available. |
@@ -636,11 +641,39 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus, | |||
636 | 641 | ||
637 | static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); | 642 | static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); |
638 | 643 | ||
644 | static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) | ||
645 | { | ||
646 | return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); | ||
647 | } | ||
648 | |||
649 | static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, | ||
650 | size_t count) | ||
651 | { | ||
652 | unsigned long long time; | ||
653 | ktime_t hr_time; | ||
654 | |||
655 | /* 120 seconds = maximum poll interval */ | ||
656 | if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000) | ||
657 | return -EINVAL; | ||
658 | poll_timeout = time; | ||
659 | hr_time = ktime_set(0, poll_timeout); | ||
660 | |||
661 | if (!hrtimer_is_queued(&ap_poll_timer) || | ||
662 | !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) { | ||
663 | ap_poll_timer.expires = hr_time; | ||
664 | hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS); | ||
665 | } | ||
666 | return count; | ||
667 | } | ||
668 | |||
669 | static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); | ||
670 | |||
639 | static struct bus_attribute *const ap_bus_attrs[] = { | 671 | static struct bus_attribute *const ap_bus_attrs[] = { |
640 | &bus_attr_ap_domain, | 672 | &bus_attr_ap_domain, |
641 | &bus_attr_config_time, | 673 | &bus_attr_config_time, |
642 | &bus_attr_poll_thread, | 674 | &bus_attr_poll_thread, |
643 | NULL | 675 | &bus_attr_poll_timeout, |
676 | NULL, | ||
644 | }; | 677 | }; |
645 | 678 | ||
646 | /** | 679 | /** |
@@ -895,9 +928,10 @@ ap_config_timeout(unsigned long ptr) | |||
895 | */ | 928 | */ |
896 | static inline void ap_schedule_poll_timer(void) | 929 | static inline void ap_schedule_poll_timer(void) |
897 | { | 930 | { |
898 | if (timer_pending(&ap_poll_timer)) | 931 | if (hrtimer_is_queued(&ap_poll_timer)) |
899 | return; | 932 | return; |
900 | mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME); | 933 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), |
934 | HRTIMER_MODE_ABS); | ||
901 | } | 935 | } |
902 | 936 | ||
903 | /** | 937 | /** |
@@ -1115,13 +1149,14 @@ EXPORT_SYMBOL(ap_cancel_message); | |||
1115 | 1149 | ||
1116 | /** | 1150 | /** |
1117 | * ap_poll_timeout(): AP receive polling for finished AP requests. | 1151 | * ap_poll_timeout(): AP receive polling for finished AP requests. |
1118 | * @unused: Unused variable. | 1152 | * @unused: Unused pointer. |
1119 | * | 1153 | * |
1120 | * Schedules the AP tasklet. | 1154 | * Schedules the AP tasklet using a high resolution timer. |
1121 | */ | 1155 | */ |
1122 | static void ap_poll_timeout(unsigned long unused) | 1156 | static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) |
1123 | { | 1157 | { |
1124 | tasklet_schedule(&ap_tasklet); | 1158 | tasklet_schedule(&ap_tasklet); |
1159 | return HRTIMER_NORESTART; | ||
1125 | } | 1160 | } |
1126 | 1161 | ||
1127 | /** | 1162 | /** |
@@ -1344,6 +1379,14 @@ int __init ap_module_init(void) | |||
1344 | ap_config_timer.expires = jiffies + ap_config_time * HZ; | 1379 | ap_config_timer.expires = jiffies + ap_config_time * HZ; |
1345 | add_timer(&ap_config_timer); | 1380 | add_timer(&ap_config_timer); |
1346 | 1381 | ||
1382 | /* Setup the high resultion poll timer. | ||
1383 | * If we are running under z/VM adjust polling to z/VM polling rate. | ||
1384 | */ | ||
1385 | if (MACHINE_IS_VM) | ||
1386 | poll_timeout = 1500000; | ||
1387 | hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
1388 | ap_poll_timer.function = ap_poll_timeout; | ||
1389 | |||
1347 | /* Start the low priority AP bus poll thread. */ | 1390 | /* Start the low priority AP bus poll thread. */ |
1348 | if (ap_thread_flag) { | 1391 | if (ap_thread_flag) { |
1349 | rc = ap_poll_thread_start(); | 1392 | rc = ap_poll_thread_start(); |
@@ -1355,7 +1398,7 @@ int __init ap_module_init(void) | |||
1355 | 1398 | ||
1356 | out_work: | 1399 | out_work: |
1357 | del_timer_sync(&ap_config_timer); | 1400 | del_timer_sync(&ap_config_timer); |
1358 | del_timer_sync(&ap_poll_timer); | 1401 | hrtimer_cancel(&ap_poll_timer); |
1359 | destroy_workqueue(ap_work_queue); | 1402 | destroy_workqueue(ap_work_queue); |
1360 | out_root: | 1403 | out_root: |
1361 | s390_root_dev_unregister(ap_root_device); | 1404 | s390_root_dev_unregister(ap_root_device); |
@@ -1386,7 +1429,7 @@ void ap_module_exit(void) | |||
1386 | ap_reset_domain(); | 1429 | ap_reset_domain(); |
1387 | ap_poll_thread_stop(); | 1430 | ap_poll_thread_stop(); |
1388 | del_timer_sync(&ap_config_timer); | 1431 | del_timer_sync(&ap_config_timer); |
1389 | del_timer_sync(&ap_poll_timer); | 1432 | hrtimer_cancel(&ap_poll_timer); |
1390 | destroy_workqueue(ap_work_queue); | 1433 | destroy_workqueue(ap_work_queue); |
1391 | tasklet_kill(&ap_tasklet); | 1434 | tasklet_kill(&ap_tasklet); |
1392 | s390_root_dev_unregister(ap_root_device); | 1435 | s390_root_dev_unregister(ap_root_device); |
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index c1e1200c43fc..446378b308fc 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h | |||
@@ -92,6 +92,8 @@ struct ap_queue_status { | |||
92 | #define AP_DEVICE_TYPE_PCIXCC 5 | 92 | #define AP_DEVICE_TYPE_PCIXCC 5 |
93 | #define AP_DEVICE_TYPE_CEX2A 6 | 93 | #define AP_DEVICE_TYPE_CEX2A 6 |
94 | #define AP_DEVICE_TYPE_CEX2C 7 | 94 | #define AP_DEVICE_TYPE_CEX2C 7 |
95 | #define AP_DEVICE_TYPE_CEX2A2 8 | ||
96 | #define AP_DEVICE_TYPE_CEX2C2 9 | ||
95 | 97 | ||
96 | /* | 98 | /* |
97 | * AP reset flag states | 99 | * AP reset flag states |
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 4d36e805a234..cb22b97944b8 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/compat.h> | 36 | #include <linux/compat.h> |
37 | #include <linux/smp_lock.h> | ||
37 | #include <asm/atomic.h> | 38 | #include <asm/atomic.h> |
38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
39 | #include <linux/hw_random.h> | 40 | #include <linux/hw_random.h> |
@@ -300,7 +301,9 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf, | |||
300 | */ | 301 | */ |
301 | static int zcrypt_open(struct inode *inode, struct file *filp) | 302 | static int zcrypt_open(struct inode *inode, struct file *filp) |
302 | { | 303 | { |
304 | lock_kernel(); | ||
303 | atomic_inc(&zcrypt_open_count); | 305 | atomic_inc(&zcrypt_open_count); |
306 | unlock_kernel(); | ||
304 | return 0; | 307 | return 0; |
305 | } | 308 | } |
306 | 309 | ||
@@ -1068,10 +1071,8 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer, | |||
1068 | 1071 | ||
1069 | #define LBUFSIZE 1200UL | 1072 | #define LBUFSIZE 1200UL |
1070 | lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); | 1073 | lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); |
1071 | if (!lbuf) { | 1074 | if (!lbuf) |
1072 | PRINTK("kmalloc failed!\n"); | ||
1073 | return 0; | 1075 | return 0; |
1074 | } | ||
1075 | 1076 | ||
1076 | local_count = min(LBUFSIZE - 1, count); | 1077 | local_count = min(LBUFSIZE - 1, count); |
1077 | if (copy_from_user(lbuf, buffer, local_count) != 0) { | 1078 | if (copy_from_user(lbuf, buffer, local_count) != 0) { |
@@ -1081,23 +1082,15 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer, | |||
1081 | lbuf[local_count] = '\0'; | 1082 | lbuf[local_count] = '\0'; |
1082 | 1083 | ||
1083 | ptr = strstr(lbuf, "Online devices"); | 1084 | ptr = strstr(lbuf, "Online devices"); |
1084 | if (!ptr) { | 1085 | if (!ptr) |
1085 | PRINTK("Unable to parse data (missing \"Online devices\")\n"); | ||
1086 | goto out; | 1086 | goto out; |
1087 | } | ||
1088 | ptr = strstr(ptr, "\n"); | 1087 | ptr = strstr(ptr, "\n"); |
1089 | if (!ptr) { | 1088 | if (!ptr) |
1090 | PRINTK("Unable to parse data (missing newline " | ||
1091 | "after \"Online devices\")\n"); | ||
1092 | goto out; | 1089 | goto out; |
1093 | } | ||
1094 | ptr++; | 1090 | ptr++; |
1095 | 1091 | ||
1096 | if (strstr(ptr, "Waiting work element counts") == NULL) { | 1092 | if (strstr(ptr, "Waiting work element counts") == NULL) |
1097 | PRINTK("Unable to parse data (missing " | ||
1098 | "\"Waiting work element counts\")\n"); | ||
1099 | goto out; | 1093 | goto out; |
1100 | } | ||
1101 | 1094 | ||
1102 | for (j = 0; j < 64 && *ptr; ptr++) { | 1095 | for (j = 0; j < 64 && *ptr; ptr++) { |
1103 | /* | 1096 | /* |
@@ -1197,16 +1190,12 @@ int __init zcrypt_api_init(void) | |||
1197 | 1190 | ||
1198 | /* Register the request sprayer. */ | 1191 | /* Register the request sprayer. */ |
1199 | rc = misc_register(&zcrypt_misc_device); | 1192 | rc = misc_register(&zcrypt_misc_device); |
1200 | if (rc < 0) { | 1193 | if (rc < 0) |
1201 | PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n", | ||
1202 | zcrypt_misc_device.minor, rc); | ||
1203 | goto out; | 1194 | goto out; |
1204 | } | ||
1205 | 1195 | ||
1206 | /* Set up the proc file system */ | 1196 | /* Set up the proc file system */ |
1207 | zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); | 1197 | zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); |
1208 | if (!zcrypt_entry) { | 1198 | if (!zcrypt_entry) { |
1209 | PRINTK("Couldn't create z90crypt proc entry\n"); | ||
1210 | rc = -ENOMEM; | 1199 | rc = -ENOMEM; |
1211 | goto out_misc; | 1200 | goto out_misc; |
1212 | } | 1201 | } |
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 5c6e222b2ac4..1d1ec74dadb2 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h | |||
@@ -30,34 +30,6 @@ | |||
30 | #ifndef _ZCRYPT_API_H_ | 30 | #ifndef _ZCRYPT_API_H_ |
31 | #define _ZCRYPT_API_H_ | 31 | #define _ZCRYPT_API_H_ |
32 | 32 | ||
33 | /** | ||
34 | * Macro definitions | ||
35 | * | ||
36 | * PDEBUG debugs in the form "zcrypt: function_name -> message" | ||
37 | * | ||
38 | * PRINTK is like PDEBUG, except that it is always enabled | ||
39 | * PRINTKN is like PRINTK, except that it does not include the function name | ||
40 | * PRINTKW is like PRINTK, except that it uses KERN_WARNING | ||
41 | * PRINTKC is like PRINTK, except that it uses KERN_CRIT | ||
42 | */ | ||
43 | #define DEV_NAME "zcrypt" | ||
44 | |||
45 | #define PRINTK(fmt, args...) \ | ||
46 | printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args) | ||
47 | #define PRINTKN(fmt, args...) \ | ||
48 | printk(KERN_DEBUG DEV_NAME ": " fmt, ## args) | ||
49 | #define PRINTKW(fmt, args...) \ | ||
50 | printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args) | ||
51 | #define PRINTKC(fmt, args...) \ | ||
52 | printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args) | ||
53 | |||
54 | #ifdef ZCRYPT_DEBUG | ||
55 | #define PDEBUG(fmt, args...) \ | ||
56 | printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args) | ||
57 | #else | ||
58 | #define PDEBUG(fmt, args...) do {} while (0) | ||
59 | #endif | ||
60 | |||
61 | #include "ap_bus.h" | 33 | #include "ap_bus.h" |
62 | #include <asm/zcrypt.h> | 34 | #include <asm/zcrypt.h> |
63 | 35 | ||
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 08657f604b8c..54f4cbc3be9e 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c | |||
@@ -49,6 +49,7 @@ | |||
49 | 49 | ||
50 | static struct ap_device_id zcrypt_cex2a_ids[] = { | 50 | static struct ap_device_id zcrypt_cex2a_ids[] = { |
51 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, | 51 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, |
52 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) }, | ||
52 | { /* end of list */ }, | 53 | { /* end of list */ }, |
53 | }; | 54 | }; |
54 | 55 | ||
@@ -242,9 +243,6 @@ static int convert_response(struct zcrypt_device *zdev, | |||
242 | return convert_type80(zdev, reply, | 243 | return convert_type80(zdev, reply, |
243 | outputdata, outputdatalength); | 244 | outputdata, outputdatalength); |
244 | default: /* Unknown response type, this should NEVER EVER happen */ | 245 | default: /* Unknown response type, this should NEVER EVER happen */ |
245 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
246 | *(unsigned int *) reply->message, | ||
247 | *(unsigned int *) (reply->message+4)); | ||
248 | zdev->online = 0; | 246 | zdev->online = 0; |
249 | return -EAGAIN; /* repeat the request on a different device. */ | 247 | return -EAGAIN; /* repeat the request on a different device. */ |
250 | } | 248 | } |
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 3e27fe77d207..03ba27f05f92 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h | |||
@@ -92,10 +92,6 @@ static inline int convert_error(struct zcrypt_device *zdev, | |||
92 | { | 92 | { |
93 | struct error_hdr *ehdr = reply->message; | 93 | struct error_hdr *ehdr = reply->message; |
94 | 94 | ||
95 | PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n", | ||
96 | ehdr->type, *(unsigned int *) reply->message, | ||
97 | *(unsigned int *) (reply->message + 4)); | ||
98 | |||
99 | switch (ehdr->reply_code) { | 95 | switch (ehdr->reply_code) { |
100 | case REP82_ERROR_OPERAND_INVALID: | 96 | case REP82_ERROR_OPERAND_INVALID: |
101 | case REP82_ERROR_OPERAND_SIZE: | 97 | case REP82_ERROR_OPERAND_SIZE: |
@@ -123,8 +119,6 @@ static inline int convert_error(struct zcrypt_device *zdev, | |||
123 | zdev->online = 0; | 119 | zdev->online = 0; |
124 | return -EAGAIN; | 120 | return -EAGAIN; |
125 | default: | 121 | default: |
126 | PRINTKW("unknown type %02x reply code = %d\n", | ||
127 | ehdr->type, ehdr->reply_code); | ||
128 | zdev->online = 0; | 122 | zdev->online = 0; |
129 | return -EAGAIN; /* repeat the request on a different device. */ | 123 | return -EAGAIN; /* repeat the request on a different device. */ |
130 | } | 124 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 6e93b4751782..12da4815ba8e 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c | |||
@@ -226,9 +226,6 @@ static int convert_response(struct zcrypt_device *zdev, | |||
226 | return convert_type84(zdev, reply, | 226 | return convert_type84(zdev, reply, |
227 | outputdata, outputdatalength); | 227 | outputdata, outputdatalength); |
228 | default: /* Unknown response type, this should NEVER EVER happen */ | 228 | default: /* Unknown response type, this should NEVER EVER happen */ |
229 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
230 | *(unsigned int *) reply->message, | ||
231 | *(unsigned int *) (reply->message+4)); | ||
232 | zdev->online = 0; | 229 | zdev->online = 0; |
233 | return -EAGAIN; /* repeat the request on a different device. */ | 230 | return -EAGAIN; /* repeat the request on a different device. */ |
234 | } | 231 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 17ea56ce1c11..779952cb19fc 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c | |||
@@ -361,26 +361,18 @@ static int convert_type86(struct zcrypt_device *zdev, | |||
361 | service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); | 361 | service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); |
362 | if (unlikely(service_rc != 0)) { | 362 | if (unlikely(service_rc != 0)) { |
363 | service_rs = le16_to_cpu(msg->cprb.ccp_rscode); | 363 | service_rs = le16_to_cpu(msg->cprb.ccp_rscode); |
364 | if (service_rc == 8 && service_rs == 66) { | 364 | if (service_rc == 8 && service_rs == 66) |
365 | PDEBUG("Bad block format on PCICC\n"); | ||
366 | return -EINVAL; | 365 | return -EINVAL; |
367 | } | 366 | if (service_rc == 8 && service_rs == 65) |
368 | if (service_rc == 8 && service_rs == 65) { | ||
369 | PDEBUG("Probably an even modulus on PCICC\n"); | ||
370 | return -EINVAL; | 367 | return -EINVAL; |
371 | } | ||
372 | if (service_rc == 8 && service_rs == 770) { | 368 | if (service_rc == 8 && service_rs == 770) { |
373 | PDEBUG("Invalid key length on PCICC\n"); | ||
374 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; | 369 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; |
375 | return -EAGAIN; | 370 | return -EAGAIN; |
376 | } | 371 | } |
377 | if (service_rc == 8 && service_rs == 783) { | 372 | if (service_rc == 8 && service_rs == 783) { |
378 | PDEBUG("Extended bitlengths not enabled on PCICC\n"); | ||
379 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; | 373 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; |
380 | return -EAGAIN; | 374 | return -EAGAIN; |
381 | } | 375 | } |
382 | PRINTK("Unknown service rc/rs (PCICC): %d/%d\n", | ||
383 | service_rc, service_rs); | ||
384 | zdev->online = 0; | 376 | zdev->online = 0; |
385 | return -EAGAIN; /* repeat the request on a different device. */ | 377 | return -EAGAIN; /* repeat the request on a different device. */ |
386 | } | 378 | } |
@@ -434,9 +426,6 @@ static int convert_response(struct zcrypt_device *zdev, | |||
434 | outputdata, outputdatalength); | 426 | outputdata, outputdatalength); |
435 | /* no break, incorrect cprb version is an unknown response */ | 427 | /* no break, incorrect cprb version is an unknown response */ |
436 | default: /* Unknown response type, this should NEVER EVER happen */ | 428 | default: /* Unknown response type, this should NEVER EVER happen */ |
437 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
438 | *(unsigned int *) reply->message, | ||
439 | *(unsigned int *) (reply->message+4)); | ||
440 | zdev->online = 0; | 429 | zdev->online = 0; |
441 | return -EAGAIN; /* repeat the request on a different device. */ | 430 | return -EAGAIN; /* repeat the request on a different device. */ |
442 | } | 431 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 0bc9b3188e64..d8ad36f81540 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
@@ -72,6 +72,7 @@ struct response_type { | |||
72 | static struct ap_device_id zcrypt_pcixcc_ids[] = { | 72 | static struct ap_device_id zcrypt_pcixcc_ids[] = { |
73 | { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, | 73 | { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, |
74 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, | 74 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, |
75 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) }, | ||
75 | { /* end of list */ }, | 76 | { /* end of list */ }, |
76 | }; | 77 | }; |
77 | 78 | ||
@@ -289,38 +290,19 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, | |||
289 | ap_msg->length = sizeof(struct type6_hdr) + | 290 | ap_msg->length = sizeof(struct type6_hdr) + |
290 | CEIL4(xcRB->request_control_blk_length) + | 291 | CEIL4(xcRB->request_control_blk_length) + |
291 | xcRB->request_data_length; | 292 | xcRB->request_data_length; |
292 | if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) { | 293 | if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) |
293 | PRINTK("Combined message is too large (%ld/%d/%d).\n", | ||
294 | sizeof(struct type6_hdr), | ||
295 | xcRB->request_control_blk_length, | ||
296 | xcRB->request_data_length); | ||
297 | return -EFAULT; | 294 | return -EFAULT; |
298 | } | 295 | if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE) |
299 | if (CEIL4(xcRB->reply_control_blk_length) > | ||
300 | PCIXCC_MAX_XCRB_REPLY_SIZE) { | ||
301 | PDEBUG("Reply CPRB length is too large (%d).\n", | ||
302 | xcRB->request_control_blk_length); | ||
303 | return -EFAULT; | 296 | return -EFAULT; |
304 | } | 297 | if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) |
305 | if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) { | ||
306 | PDEBUG("Reply data block length is too large (%d).\n", | ||
307 | xcRB->reply_data_length); | ||
308 | return -EFAULT; | 298 | return -EFAULT; |
309 | } | ||
310 | replylen = CEIL4(xcRB->reply_control_blk_length) + | 299 | replylen = CEIL4(xcRB->reply_control_blk_length) + |
311 | CEIL4(xcRB->reply_data_length) + | 300 | CEIL4(xcRB->reply_data_length) + |
312 | sizeof(struct type86_fmt2_msg); | 301 | sizeof(struct type86_fmt2_msg); |
313 | if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { | 302 | if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { |
314 | PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE" | ||
315 | " (%d/%d/%d).\n", | ||
316 | sizeof(struct type86_fmt2_msg), | ||
317 | xcRB->reply_control_blk_length, | ||
318 | xcRB->reply_data_length); | ||
319 | xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - | 303 | xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - |
320 | (sizeof(struct type86_fmt2_msg) + | 304 | (sizeof(struct type86_fmt2_msg) + |
321 | CEIL4(xcRB->reply_data_length)); | 305 | CEIL4(xcRB->reply_data_length)); |
322 | PDEBUG("Capping Reply CPRB length at %d\n", | ||
323 | xcRB->reply_control_blk_length); | ||
324 | } | 306 | } |
325 | 307 | ||
326 | /* prepare type6 header */ | 308 | /* prepare type6 header */ |
@@ -339,11 +321,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, | |||
339 | xcRB->request_control_blk_length)) | 321 | xcRB->request_control_blk_length)) |
340 | return -EFAULT; | 322 | return -EFAULT; |
341 | if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > | 323 | if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > |
342 | xcRB->request_control_blk_length) { | 324 | xcRB->request_control_blk_length) |
343 | PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len, | ||
344 | xcRB->request_control_blk_length); | ||
345 | return -EFAULT; | 325 | return -EFAULT; |
346 | } | ||
347 | function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; | 326 | function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; |
348 | memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); | 327 | memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); |
349 | 328 | ||
@@ -471,29 +450,18 @@ static int convert_type86_ica(struct zcrypt_device *zdev, | |||
471 | service_rc = msg->cprbx.ccp_rtcode; | 450 | service_rc = msg->cprbx.ccp_rtcode; |
472 | if (unlikely(service_rc != 0)) { | 451 | if (unlikely(service_rc != 0)) { |
473 | service_rs = msg->cprbx.ccp_rscode; | 452 | service_rs = msg->cprbx.ccp_rscode; |
474 | if (service_rc == 8 && service_rs == 66) { | 453 | if (service_rc == 8 && service_rs == 66) |
475 | PDEBUG("Bad block format on PCIXCC/CEX2C\n"); | ||
476 | return -EINVAL; | 454 | return -EINVAL; |
477 | } | 455 | if (service_rc == 8 && service_rs == 65) |
478 | if (service_rc == 8 && service_rs == 65) { | ||
479 | PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n"); | ||
480 | return -EINVAL; | 456 | return -EINVAL; |
481 | } | 457 | if (service_rc == 8 && service_rs == 770) |
482 | if (service_rc == 8 && service_rs == 770) { | ||
483 | PDEBUG("Invalid key length on PCIXCC/CEX2C\n"); | ||
484 | return -EINVAL; | 458 | return -EINVAL; |
485 | } | ||
486 | if (service_rc == 8 && service_rs == 783) { | 459 | if (service_rc == 8 && service_rs == 783) { |
487 | PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n"); | ||
488 | zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; | 460 | zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; |
489 | return -EAGAIN; | 461 | return -EAGAIN; |
490 | } | 462 | } |
491 | if (service_rc == 12 && service_rs == 769) { | 463 | if (service_rc == 12 && service_rs == 769) |
492 | PDEBUG("Invalid key on PCIXCC/CEX2C\n"); | ||
493 | return -EINVAL; | 464 | return -EINVAL; |
494 | } | ||
495 | PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n", | ||
496 | service_rc, service_rs); | ||
497 | zdev->online = 0; | 465 | zdev->online = 0; |
498 | return -EAGAIN; /* repeat the request on a different device. */ | 466 | return -EAGAIN; /* repeat the request on a different device. */ |
499 | } | 467 | } |
@@ -569,11 +537,8 @@ static int convert_type86_rng(struct zcrypt_device *zdev, | |||
569 | } __attribute__((packed)) *msg = reply->message; | 537 | } __attribute__((packed)) *msg = reply->message; |
570 | char *data = reply->message; | 538 | char *data = reply->message; |
571 | 539 | ||
572 | if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) { | 540 | if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) |
573 | PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n", | ||
574 | rc, rs); | ||
575 | return -EINVAL; | 541 | return -EINVAL; |
576 | } | ||
577 | memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); | 542 | memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); |
578 | return msg->fmt2.count2; | 543 | return msg->fmt2.count2; |
579 | } | 544 | } |
@@ -598,9 +563,6 @@ static int convert_response_ica(struct zcrypt_device *zdev, | |||
598 | outputdata, outputdatalength); | 563 | outputdata, outputdatalength); |
599 | /* no break, incorrect cprb version is an unknown response */ | 564 | /* no break, incorrect cprb version is an unknown response */ |
600 | default: /* Unknown response type, this should NEVER EVER happen */ | 565 | default: /* Unknown response type, this should NEVER EVER happen */ |
601 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
602 | *(unsigned int *) reply->message, | ||
603 | *(unsigned int *) (reply->message+4)); | ||
604 | zdev->online = 0; | 566 | zdev->online = 0; |
605 | return -EAGAIN; /* repeat the request on a different device. */ | 567 | return -EAGAIN; /* repeat the request on a different device. */ |
606 | } | 568 | } |
@@ -627,9 +589,6 @@ static int convert_response_xcrb(struct zcrypt_device *zdev, | |||
627 | return convert_type86_xcrb(zdev, reply, xcRB); | 589 | return convert_type86_xcrb(zdev, reply, xcRB); |
628 | /* no break, incorrect cprb version is an unknown response */ | 590 | /* no break, incorrect cprb version is an unknown response */ |
629 | default: /* Unknown response type, this should NEVER EVER happen */ | 591 | default: /* Unknown response type, this should NEVER EVER happen */ |
630 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
631 | *(unsigned int *) reply->message, | ||
632 | *(unsigned int *) (reply->message+4)); | ||
633 | xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ | 592 | xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ |
634 | zdev->online = 0; | 593 | zdev->online = 0; |
635 | return -EAGAIN; /* repeat the request on a different device. */ | 594 | return -EAGAIN; /* repeat the request on a different device. */ |
@@ -653,9 +612,6 @@ static int convert_response_rng(struct zcrypt_device *zdev, | |||
653 | return convert_type86_rng(zdev, reply, data); | 612 | return convert_type86_rng(zdev, reply, data); |
654 | /* no break, incorrect cprb version is an unknown response */ | 613 | /* no break, incorrect cprb version is an unknown response */ |
655 | default: /* Unknown response type, this should NEVER EVER happen */ | 614 | default: /* Unknown response type, this should NEVER EVER happen */ |
656 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
657 | *(unsigned int *) reply->message, | ||
658 | *(unsigned int *) (reply->message+4)); | ||
659 | zdev->online = 0; | 615 | zdev->online = 0; |
660 | return -EAGAIN; /* repeat the request on a different device. */ | 616 | return -EAGAIN; /* repeat the request on a different device. */ |
661 | } | 617 | } |
@@ -700,10 +656,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, | |||
700 | memcpy(msg->message, reply->message, length); | 656 | memcpy(msg->message, reply->message, length); |
701 | break; | 657 | break; |
702 | default: | 658 | default: |
703 | PRINTK("Invalid internal response type: %i\n", | 659 | memcpy(msg->message, &error_reply, sizeof error_reply); |
704 | resp_type->type); | ||
705 | memcpy(msg->message, &error_reply, | ||
706 | sizeof error_reply); | ||
707 | } | 660 | } |
708 | } else | 661 | } else |
709 | memcpy(msg->message, reply->message, sizeof error_reply); | 662 | memcpy(msg->message, reply->message, sizeof error_reply); |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 04a1d7bf678c..c644669a75c2 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -703,7 +703,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
703 | if (!cdev->dev.driver_data) { | 703 | if (!cdev->dev.driver_data) { |
704 | printk(KERN_WARNING "claw: unsolicited interrupt for device:" | 704 | printk(KERN_WARNING "claw: unsolicited interrupt for device:" |
705 | "%s received c-%02x d-%02x\n", | 705 | "%s received c-%02x d-%02x\n", |
706 | cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); | 706 | cdev->dev.bus_id, irb->scsw.cmd.cstat, |
707 | irb->scsw.cmd.dstat); | ||
707 | #ifdef FUNCTRACE | 708 | #ifdef FUNCTRACE |
708 | printk(KERN_INFO "claw: %s() " | 709 | printk(KERN_INFO "claw: %s() " |
709 | "exit on line %d\n",__func__,__LINE__); | 710 | "exit on line %d\n",__func__,__LINE__); |
@@ -732,22 +733,23 @@ claw_irq_handler(struct ccw_device *cdev, | |||
732 | #ifdef IOTRACE | 733 | #ifdef IOTRACE |
733 | printk(KERN_INFO "%s: interrupt for device: %04x " | 734 | printk(KERN_INFO "%s: interrupt for device: %04x " |
734 | "received c-%02x d-%02x state-%02x\n", | 735 | "received c-%02x d-%02x state-%02x\n", |
735 | dev->name, p_ch->devno, irb->scsw.cstat, | 736 | dev->name, p_ch->devno, irb->scsw.cmd.cstat, |
736 | irb->scsw.dstat, p_ch->claw_state); | 737 | irb->scsw.cmd.dstat, p_ch->claw_state); |
737 | #endif | 738 | #endif |
738 | 739 | ||
739 | /* Copy interruption response block. */ | 740 | /* Copy interruption response block. */ |
740 | memcpy(p_ch->irb, irb, sizeof(struct irb)); | 741 | memcpy(p_ch->irb, irb, sizeof(struct irb)); |
741 | 742 | ||
742 | /* Check for good subchannel return code, otherwise error message */ | 743 | /* Check for good subchannel return code, otherwise error message */ |
743 | if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) { | 744 | if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { |
744 | printk(KERN_INFO "%s: subchannel check for device: %04x -" | 745 | printk(KERN_INFO "%s: subchannel check for device: %04x -" |
745 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", | 746 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", |
746 | dev->name, p_ch->devno, | 747 | dev->name, p_ch->devno, |
747 | irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa); | 748 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, |
749 | irb->scsw.cmd.cpa); | ||
748 | #ifdef IOTRACE | 750 | #ifdef IOTRACE |
749 | dumpit((char *)irb,sizeof(struct irb)); | 751 | dumpit((char *)irb,sizeof(struct irb)); |
750 | dumpit((char *)(unsigned long)irb->scsw.cpa, | 752 | dumpit((char *)(unsigned long)irb->scsw.cmd.cpa, |
751 | sizeof(struct ccw1)); | 753 | sizeof(struct ccw1)); |
752 | #endif | 754 | #endif |
753 | #ifdef FUNCTRACE | 755 | #ifdef FUNCTRACE |
@@ -759,22 +761,24 @@ claw_irq_handler(struct ccw_device *cdev, | |||
759 | } | 761 | } |
760 | 762 | ||
761 | /* Check the reason-code of a unit check */ | 763 | /* Check the reason-code of a unit check */ |
762 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 764 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
763 | ccw_check_unit_check(p_ch, irb->ecw[0]); | 765 | ccw_check_unit_check(p_ch, irb->ecw[0]); |
764 | } | ||
765 | 766 | ||
766 | /* State machine to bring the connection up, down and to restart */ | 767 | /* State machine to bring the connection up, down and to restart */ |
767 | p_ch->last_dstat = irb->scsw.dstat; | 768 | p_ch->last_dstat = irb->scsw.cmd.dstat; |
768 | 769 | ||
769 | switch (p_ch->claw_state) { | 770 | switch (p_ch->claw_state) { |
770 | case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ | 771 | case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ |
771 | #ifdef DEBUGMSG | 772 | #ifdef DEBUGMSG |
772 | printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); | 773 | printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); |
773 | #endif | 774 | #endif |
774 | if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 775 | if (!((p_ch->irb->scsw.cmd.stctl & |
775 | (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 776 | SCSW_STCTL_SEC_STATUS) || |
776 | (p_ch->irb->scsw.stctl == | 777 | (p_ch->irb->scsw.cmd.stctl == |
777 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | 778 | SCSW_STCTL_STATUS_PEND) || |
779 | (p_ch->irb->scsw.cmd.stctl == | ||
780 | (SCSW_STCTL_ALERT_STATUS | | ||
781 | SCSW_STCTL_STATUS_PEND)))) { | ||
778 | #ifdef FUNCTRACE | 782 | #ifdef FUNCTRACE |
779 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 783 | printk(KERN_INFO "%s:%s Exit on line %d\n", |
780 | dev->name,__func__,__LINE__); | 784 | dev->name,__func__,__LINE__); |
@@ -798,10 +802,13 @@ claw_irq_handler(struct ccw_device *cdev, | |||
798 | printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", | 802 | printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", |
799 | dev->name); | 803 | dev->name); |
800 | #endif | 804 | #endif |
801 | if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 805 | if (!((p_ch->irb->scsw.cmd.stctl & |
802 | (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 806 | SCSW_STCTL_SEC_STATUS) || |
803 | (p_ch->irb->scsw.stctl == | 807 | (p_ch->irb->scsw.cmd.stctl == |
804 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | 808 | SCSW_STCTL_STATUS_PEND) || |
809 | (p_ch->irb->scsw.cmd.stctl == | ||
810 | (SCSW_STCTL_ALERT_STATUS | | ||
811 | SCSW_STCTL_STATUS_PEND)))) { | ||
805 | #ifdef FUNCTRACE | 812 | #ifdef FUNCTRACE |
806 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 813 | printk(KERN_INFO "%s:%s Exit on line %d\n", |
807 | dev->name,__func__,__LINE__); | 814 | dev->name,__func__,__LINE__); |
@@ -828,8 +835,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
828 | "interrupt for device:" | 835 | "interrupt for device:" |
829 | "%s received c-%02x d-%02x\n", | 836 | "%s received c-%02x d-%02x\n", |
830 | cdev->dev.bus_id, | 837 | cdev->dev.bus_id, |
831 | irb->scsw.cstat, | 838 | irb->scsw.cmd.cstat, |
832 | irb->scsw.dstat); | 839 | irb->scsw.cmd.dstat); |
833 | return; | 840 | return; |
834 | } | 841 | } |
835 | #ifdef DEBUGMSG | 842 | #ifdef DEBUGMSG |
@@ -844,7 +851,7 @@ claw_irq_handler(struct ccw_device *cdev, | |||
844 | return; | 851 | return; |
845 | case CLAW_START_READ: | 852 | case CLAW_START_READ: |
846 | CLAW_DBF_TEXT(4,trace,"ReadIRQ"); | 853 | CLAW_DBF_TEXT(4,trace,"ReadIRQ"); |
847 | if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 854 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
848 | clear_bit(0, (void *)&p_ch->IO_active); | 855 | clear_bit(0, (void *)&p_ch->IO_active); |
849 | if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || | 856 | if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || |
850 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || | 857 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || |
@@ -863,8 +870,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
863 | CLAW_DBF_TEXT(4,trace,"notrdy"); | 870 | CLAW_DBF_TEXT(4,trace,"notrdy"); |
864 | return; | 871 | return; |
865 | } | 872 | } |
866 | if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) && | 873 | if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) && |
867 | (p_ch->irb->scsw.dstat==0)) { | 874 | (p_ch->irb->scsw.cmd.dstat == 0)) { |
868 | if (test_and_set_bit(CLAW_BH_ACTIVE, | 875 | if (test_and_set_bit(CLAW_BH_ACTIVE, |
869 | (void *)&p_ch->flag_a) == 0) { | 876 | (void *)&p_ch->flag_a) == 0) { |
870 | tasklet_schedule(&p_ch->tasklet); | 877 | tasklet_schedule(&p_ch->tasklet); |
@@ -879,10 +886,13 @@ claw_irq_handler(struct ccw_device *cdev, | |||
879 | CLAW_DBF_TEXT(4,trace,"PCI_read"); | 886 | CLAW_DBF_TEXT(4,trace,"PCI_read"); |
880 | return; | 887 | return; |
881 | } | 888 | } |
882 | if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 889 | if (!((p_ch->irb->scsw.cmd.stctl & |
883 | (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 890 | SCSW_STCTL_SEC_STATUS) || |
884 | (p_ch->irb->scsw.stctl == | 891 | (p_ch->irb->scsw.cmd.stctl == |
885 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | 892 | SCSW_STCTL_STATUS_PEND) || |
893 | (p_ch->irb->scsw.cmd.stctl == | ||
894 | (SCSW_STCTL_ALERT_STATUS | | ||
895 | SCSW_STCTL_STATUS_PEND)))) { | ||
886 | #ifdef FUNCTRACE | 896 | #ifdef FUNCTRACE |
887 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 897 | printk(KERN_INFO "%s:%s Exit on line %d\n", |
888 | dev->name,__func__,__LINE__); | 898 | dev->name,__func__,__LINE__); |
@@ -911,7 +921,7 @@ claw_irq_handler(struct ccw_device *cdev, | |||
911 | CLAW_DBF_TEXT(4,trace,"RdIRQXit"); | 921 | CLAW_DBF_TEXT(4,trace,"RdIRQXit"); |
912 | return; | 922 | return; |
913 | case CLAW_START_WRITE: | 923 | case CLAW_START_WRITE: |
914 | if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 924 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
915 | printk(KERN_INFO "%s: Unit Check Occured in " | 925 | printk(KERN_INFO "%s: Unit Check Occured in " |
916 | "write channel\n",dev->name); | 926 | "write channel\n",dev->name); |
917 | clear_bit(0, (void *)&p_ch->IO_active); | 927 | clear_bit(0, (void *)&p_ch->IO_active); |
@@ -934,16 +944,19 @@ claw_irq_handler(struct ccw_device *cdev, | |||
934 | CLAW_DBF_TEXT(4,trace,"rstrtwrt"); | 944 | CLAW_DBF_TEXT(4,trace,"rstrtwrt"); |
935 | return; | 945 | return; |
936 | } | 946 | } |
937 | if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { | 947 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { |
938 | clear_bit(0, (void *)&p_ch->IO_active); | 948 | clear_bit(0, (void *)&p_ch->IO_active); |
939 | printk(KERN_INFO "%s: Unit Exception " | 949 | printk(KERN_INFO "%s: Unit Exception " |
940 | "Occured in write channel\n", | 950 | "Occured in write channel\n", |
941 | dev->name); | 951 | dev->name); |
942 | } | 952 | } |
943 | if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 953 | if (!((p_ch->irb->scsw.cmd.stctl & |
944 | (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 954 | SCSW_STCTL_SEC_STATUS) || |
945 | (p_ch->irb->scsw.stctl == | 955 | (p_ch->irb->scsw.cmd.stctl == |
946 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | 956 | SCSW_STCTL_STATUS_PEND) || |
957 | (p_ch->irb->scsw.cmd.stctl == | ||
958 | (SCSW_STCTL_ALERT_STATUS | | ||
959 | SCSW_STCTL_STATUS_PEND)))) { | ||
947 | #ifdef FUNCTRACE | 960 | #ifdef FUNCTRACE |
948 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 961 | printk(KERN_INFO "%s:%s Exit on line %d\n", |
949 | dev->name,__func__,__LINE__); | 962 | dev->name,__func__,__LINE__); |
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 2a106f3a076d..7e6bd387f4d8 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c | |||
@@ -257,9 +257,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) | |||
257 | if (duration > ch->prof.tx_time) | 257 | if (duration > ch->prof.tx_time) |
258 | ch->prof.tx_time = duration; | 258 | ch->prof.tx_time = duration; |
259 | 259 | ||
260 | if (ch->irb->scsw.count != 0) | 260 | if (ch->irb->scsw.cmd.count != 0) |
261 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", | 261 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", |
262 | dev->name, ch->irb->scsw.count); | 262 | dev->name, ch->irb->scsw.cmd.count); |
263 | fsm_deltimer(&ch->timer); | 263 | fsm_deltimer(&ch->timer); |
264 | while ((skb = skb_dequeue(&ch->io_queue))) { | 264 | while ((skb = skb_dequeue(&ch->io_queue))) { |
265 | priv->stats.tx_packets++; | 265 | priv->stats.tx_packets++; |
@@ -353,7 +353,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) | |||
353 | struct channel *ch = arg; | 353 | struct channel *ch = arg; |
354 | struct net_device *dev = ch->netdev; | 354 | struct net_device *dev = ch->netdev; |
355 | struct ctcm_priv *priv = dev->priv; | 355 | struct ctcm_priv *priv = dev->priv; |
356 | int len = ch->max_bufsize - ch->irb->scsw.count; | 356 | int len = ch->max_bufsize - ch->irb->scsw.cmd.count; |
357 | struct sk_buff *skb = ch->trans_skb; | 357 | struct sk_buff *skb = ch->trans_skb; |
358 | __u16 block_len = *((__u16 *)skb->data); | 358 | __u16 block_len = *((__u16 *)skb->data); |
359 | int check_len; | 359 | int check_len; |
@@ -1234,9 +1234,9 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
1234 | if (duration > ch->prof.tx_time) | 1234 | if (duration > ch->prof.tx_time) |
1235 | ch->prof.tx_time = duration; | 1235 | ch->prof.tx_time = duration; |
1236 | 1236 | ||
1237 | if (ch->irb->scsw.count != 0) | 1237 | if (ch->irb->scsw.cmd.count != 0) |
1238 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", | 1238 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", |
1239 | dev->name, ch->irb->scsw.count); | 1239 | dev->name, ch->irb->scsw.cmd.count); |
1240 | fsm_deltimer(&ch->timer); | 1240 | fsm_deltimer(&ch->timer); |
1241 | while ((skb = skb_dequeue(&ch->io_queue))) { | 1241 | while ((skb = skb_dequeue(&ch->io_queue))) { |
1242 | priv->stats.tx_packets++; | 1242 | priv->stats.tx_packets++; |
@@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) | |||
1394 | struct sk_buff *skb = ch->trans_skb; | 1394 | struct sk_buff *skb = ch->trans_skb; |
1395 | struct sk_buff *new_skb; | 1395 | struct sk_buff *new_skb; |
1396 | unsigned long saveflags = 0; /* avoids compiler warning */ | 1396 | unsigned long saveflags = 0; /* avoids compiler warning */ |
1397 | int len = ch->max_bufsize - ch->irb->scsw.count; | 1397 | int len = ch->max_bufsize - ch->irb->scsw.cmd.count; |
1398 | 1398 | ||
1399 | if (do_debug_data) { | 1399 | if (do_debug_data) { |
1400 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", | 1400 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index d52843da4f55..6b13c1c1beb8 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -1236,8 +1236,8 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1236 | /* Check for unsolicited interrupts. */ | 1236 | /* Check for unsolicited interrupts. */ |
1237 | if (cgdev == NULL) { | 1237 | if (cgdev == NULL) { |
1238 | ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", | 1238 | ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", |
1239 | cdev->dev.bus_id, irb->scsw.cstat, | 1239 | cdev->dev.bus_id, irb->scsw.cmd.cstat, |
1240 | irb->scsw.dstat); | 1240 | irb->scsw.cmd.dstat); |
1241 | return; | 1241 | return; |
1242 | } | 1242 | } |
1243 | 1243 | ||
@@ -1266,40 +1266,40 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1266 | "received c-%02x d-%02x\n", | 1266 | "received c-%02x d-%02x\n", |
1267 | dev->name, | 1267 | dev->name, |
1268 | ch->id, | 1268 | ch->id, |
1269 | irb->scsw.cstat, | 1269 | irb->scsw.cmd.cstat, |
1270 | irb->scsw.dstat); | 1270 | irb->scsw.cmd.dstat); |
1271 | 1271 | ||
1272 | /* Copy interruption response block. */ | 1272 | /* Copy interruption response block. */ |
1273 | memcpy(ch->irb, irb, sizeof(struct irb)); | 1273 | memcpy(ch->irb, irb, sizeof(struct irb)); |
1274 | 1274 | ||
1275 | /* Check for good subchannel return code, otherwise error message */ | 1275 | /* Check for good subchannel return code, otherwise error message */ |
1276 | if (irb->scsw.cstat) { | 1276 | if (irb->scsw.cmd.cstat) { |
1277 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); | 1277 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); |
1278 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", | 1278 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", |
1279 | dev->name, ch->id, irb->scsw.cstat, | 1279 | dev->name, ch->id, irb->scsw.cmd.cstat, |
1280 | irb->scsw.dstat); | 1280 | irb->scsw.cmd.dstat); |
1281 | return; | 1281 | return; |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | /* Check the reason-code of a unit check */ | 1284 | /* Check the reason-code of a unit check */ |
1285 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 1285 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
1286 | ccw_unit_check(ch, irb->ecw[0]); | 1286 | ccw_unit_check(ch, irb->ecw[0]); |
1287 | return; | 1287 | return; |
1288 | } | 1288 | } |
1289 | if (irb->scsw.dstat & DEV_STAT_BUSY) { | 1289 | if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) { |
1290 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) | 1290 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) |
1291 | fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); | 1291 | fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); |
1292 | else | 1292 | else |
1293 | fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); | 1293 | fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); |
1294 | return; | 1294 | return; |
1295 | } | 1295 | } |
1296 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 1296 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
1297 | fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); | 1297 | fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); |
1298 | return; | 1298 | return; |
1299 | } | 1299 | } |
1300 | if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 1300 | if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || |
1301 | (irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 1301 | (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || |
1302 | (irb->scsw.stctl == | 1302 | (irb->scsw.cmd.stctl == |
1303 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) | 1303 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) |
1304 | fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); | 1304 | fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); |
1305 | else | 1305 | else |
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index 8e7697305a4c..f4a32375c037 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c | |||
@@ -36,7 +36,6 @@ const char *cu3088_type[] = { | |||
36 | "CTC/A", | 36 | "CTC/A", |
37 | "ESCON channel", | 37 | "ESCON channel", |
38 | "FICON channel", | 38 | "FICON channel", |
39 | "P390 LCS card", | ||
40 | "OSA LCS card", | 39 | "OSA LCS card", |
41 | "CLAW channel device", | 40 | "CLAW channel device", |
42 | "unknown channel type", | 41 | "unknown channel type", |
@@ -49,7 +48,6 @@ static struct ccw_device_id cu3088_ids[] = { | |||
49 | { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, | 48 | { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, |
50 | { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, | 49 | { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, |
51 | { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, | 50 | { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, |
52 | { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 }, | ||
53 | { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, | 51 | { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, |
54 | { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, | 52 | { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, |
55 | { /* end of list */ } | 53 | { /* end of list */ } |
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h index 1753661f702a..d8558a7105a5 100644 --- a/drivers/s390/net/cu3088.h +++ b/drivers/s390/net/cu3088.h | |||
@@ -17,9 +17,6 @@ enum channel_types { | |||
17 | /* Device is a FICON channel */ | 17 | /* Device is a FICON channel */ |
18 | channel_type_ficon, | 18 | channel_type_ficon, |
19 | 19 | ||
20 | /* Device is a P390 LCS card */ | ||
21 | channel_type_p390, | ||
22 | |||
23 | /* Device is a OSA2 card */ | 20 | /* Device is a OSA2 card */ |
24 | channel_type_osa2, | 21 | channel_type_osa2, |
25 | 22 | ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index dd22f4b37037..6de28385b354 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -1327,8 +1327,8 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
1327 | char *sense; | 1327 | char *sense; |
1328 | 1328 | ||
1329 | sense = (char *) irb->ecw; | 1329 | sense = (char *) irb->ecw; |
1330 | cstat = irb->scsw.cstat; | 1330 | cstat = irb->scsw.cmd.cstat; |
1331 | dstat = irb->scsw.dstat; | 1331 | dstat = irb->scsw.cmd.dstat; |
1332 | 1332 | ||
1333 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | | 1333 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | |
1334 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 1334 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
@@ -1388,11 +1388,13 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1388 | else | 1388 | else |
1389 | channel = &card->write; | 1389 | channel = &card->write; |
1390 | 1390 | ||
1391 | cstat = irb->scsw.cstat; | 1391 | cstat = irb->scsw.cmd.cstat; |
1392 | dstat = irb->scsw.dstat; | 1392 | dstat = irb->scsw.cmd.dstat; |
1393 | LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); | 1393 | LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); |
1394 | LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat); | 1394 | LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, |
1395 | LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl); | 1395 | irb->scsw.cmd.dstat); |
1396 | LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, | ||
1397 | irb->scsw.cmd.actl); | ||
1396 | 1398 | ||
1397 | /* Check for channel and device errors presented */ | 1399 | /* Check for channel and device errors presented */ |
1398 | rc = lcs_get_problem(cdev, irb); | 1400 | rc = lcs_get_problem(cdev, irb); |
@@ -1410,11 +1412,11 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1410 | } | 1412 | } |
1411 | /* How far in the ccw chain have we processed? */ | 1413 | /* How far in the ccw chain have we processed? */ |
1412 | if ((channel->state != LCS_CH_STATE_INIT) && | 1414 | if ((channel->state != LCS_CH_STATE_INIT) && |
1413 | (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { | 1415 | (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) { |
1414 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) | 1416 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) |
1415 | - channel->ccws; | 1417 | - channel->ccws; |
1416 | if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || | 1418 | if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || |
1417 | (irb->scsw.cstat & SCHN_STAT_PCI)) | 1419 | (irb->scsw.cmd.cstat & SCHN_STAT_PCI)) |
1418 | /* Bloody io subsystem tells us lies about cpa... */ | 1420 | /* Bloody io subsystem tells us lies about cpa... */ |
1419 | index = (index - 1) & (LCS_NUM_BUFFS - 1); | 1421 | index = (index - 1) & (LCS_NUM_BUFFS - 1); |
1420 | while (channel->io_idx != index) { | 1422 | while (channel->io_idx != index) { |
@@ -1425,25 +1427,24 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1425 | } | 1427 | } |
1426 | } | 1428 | } |
1427 | 1429 | ||
1428 | if ((irb->scsw.dstat & DEV_STAT_DEV_END) || | 1430 | if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) || |
1429 | (irb->scsw.dstat & DEV_STAT_CHN_END) || | 1431 | (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) || |
1430 | (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) | 1432 | (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) |
1431 | /* Mark channel as stopped. */ | 1433 | /* Mark channel as stopped. */ |
1432 | channel->state = LCS_CH_STATE_STOPPED; | 1434 | channel->state = LCS_CH_STATE_STOPPED; |
1433 | else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) | 1435 | else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) |
1434 | /* CCW execution stopped on a suspend bit. */ | 1436 | /* CCW execution stopped on a suspend bit. */ |
1435 | channel->state = LCS_CH_STATE_SUSPENDED; | 1437 | channel->state = LCS_CH_STATE_SUSPENDED; |
1436 | if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { | 1438 | if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { |
1437 | if (irb->scsw.cc != 0) { | 1439 | if (irb->scsw.cmd.cc != 0) { |
1438 | ccw_device_halt(channel->ccwdev, (addr_t) channel); | 1440 | ccw_device_halt(channel->ccwdev, (addr_t) channel); |
1439 | return; | 1441 | return; |
1440 | } | 1442 | } |
1441 | /* The channel has been stopped by halt_IO. */ | 1443 | /* The channel has been stopped by halt_IO. */ |
1442 | channel->state = LCS_CH_STATE_HALTED; | 1444 | channel->state = LCS_CH_STATE_HALTED; |
1443 | } | 1445 | } |
1444 | if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { | 1446 | if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) |
1445 | channel->state = LCS_CH_STATE_CLEARED; | 1447 | channel->state = LCS_CH_STATE_CLEARED; |
1446 | } | ||
1447 | /* Do the rest in the tasklet. */ | 1448 | /* Do the rest in the tasklet. */ |
1448 | tasklet_schedule(&channel->irq_tasklet); | 1449 | tasklet_schedule(&channel->irq_tasklet); |
1449 | } | 1450 | } |
@@ -1761,7 +1762,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) | |||
1761 | netif_carrier_off(card->dev); | 1762 | netif_carrier_off(card->dev); |
1762 | break; | 1763 | break; |
1763 | default: | 1764 | default: |
1764 | PRINT_INFO("UNRECOGNIZED LGW COMMAND\n"); | 1765 | LCS_DBF_TEXT(5, trace, "noLGWcmd"); |
1765 | break; | 1766 | break; |
1766 | } | 1767 | } |
1767 | } else | 1768 | } else |
@@ -2042,13 +2043,12 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev) | |||
2042 | LCS_DBF_TEXT(2, setup, "add_dev"); | 2043 | LCS_DBF_TEXT(2, setup, "add_dev"); |
2043 | card = lcs_alloc_card(); | 2044 | card = lcs_alloc_card(); |
2044 | if (!card) { | 2045 | if (!card) { |
2045 | PRINT_ERR("Allocation of lcs card failed\n"); | 2046 | LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM); |
2046 | put_device(&ccwgdev->dev); | 2047 | put_device(&ccwgdev->dev); |
2047 | return -ENOMEM; | 2048 | return -ENOMEM; |
2048 | } | 2049 | } |
2049 | ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); | 2050 | ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); |
2050 | if (ret) { | 2051 | if (ret) { |
2051 | PRINT_ERR("Creating attributes failed"); | ||
2052 | lcs_free_card(card); | 2052 | lcs_free_card(card); |
2053 | put_device(&ccwgdev->dev); | 2053 | put_device(&ccwgdev->dev); |
2054 | return ret; | 2054 | return ret; |
@@ -2140,7 +2140,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
2140 | default: | 2140 | default: |
2141 | LCS_DBF_TEXT(3, setup, "errinit"); | 2141 | LCS_DBF_TEXT(3, setup, "errinit"); |
2142 | PRINT_ERR("LCS: Initialization failed\n"); | 2142 | PRINT_ERR("LCS: Initialization failed\n"); |
2143 | PRINT_ERR("LCS: No device found!\n"); | ||
2144 | goto out; | 2143 | goto out; |
2145 | } | 2144 | } |
2146 | if (!dev) | 2145 | if (!dev) |
@@ -2269,7 +2268,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev) | |||
2269 | if (!card) | 2268 | if (!card) |
2270 | return; | 2269 | return; |
2271 | 2270 | ||
2272 | PRINT_INFO("Removing lcs group device ....\n"); | ||
2273 | LCS_DBF_TEXT(3, setup, "remdev"); | 2271 | LCS_DBF_TEXT(3, setup, "remdev"); |
2274 | LCS_DBF_HEX(3, setup, &card, sizeof(void*)); | 2272 | LCS_DBF_HEX(3, setup, &card, sizeof(void*)); |
2275 | if (ccwgdev->state == CCWGROUP_ONLINE) { | 2273 | if (ccwgdev->state == CCWGROUP_ONLINE) { |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index e4ba6a0372ac..9242b5acc66b 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -625,9 +625,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, | |||
625 | offset += header->next; | 625 | offset += header->next; |
626 | header->next -= NETIUCV_HDRLEN; | 626 | header->next -= NETIUCV_HDRLEN; |
627 | if (skb_tailroom(pskb) < header->next) { | 627 | if (skb_tailroom(pskb) < header->next) { |
628 | PRINT_WARN("%s: Illegal next field in iucv header: " | ||
629 | "%d > %d\n", | ||
630 | dev->name, header->next, skb_tailroom(pskb)); | ||
631 | IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", | 628 | IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", |
632 | header->next, skb_tailroom(pskb)); | 629 | header->next, skb_tailroom(pskb)); |
633 | return; | 630 | return; |
@@ -636,8 +633,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, | |||
636 | skb_reset_mac_header(pskb); | 633 | skb_reset_mac_header(pskb); |
637 | skb = dev_alloc_skb(pskb->len); | 634 | skb = dev_alloc_skb(pskb->len); |
638 | if (!skb) { | 635 | if (!skb) { |
639 | PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", | ||
640 | dev->name); | ||
641 | IUCV_DBF_TEXT(data, 2, | 636 | IUCV_DBF_TEXT(data, 2, |
642 | "Out of memory in netiucv_unpack_skb\n"); | 637 | "Out of memory in netiucv_unpack_skb\n"); |
643 | privptr->stats.rx_dropped++; | 638 | privptr->stats.rx_dropped++; |
@@ -674,7 +669,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
674 | 669 | ||
675 | if (!conn->netdev) { | 670 | if (!conn->netdev) { |
676 | iucv_message_reject(conn->path, msg); | 671 | iucv_message_reject(conn->path, msg); |
677 | PRINT_WARN("Received data for unlinked connection\n"); | ||
678 | IUCV_DBF_TEXT(data, 2, | 672 | IUCV_DBF_TEXT(data, 2, |
679 | "Received data for unlinked connection\n"); | 673 | "Received data for unlinked connection\n"); |
680 | return; | 674 | return; |
@@ -682,8 +676,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
682 | if (msg->length > conn->max_buffsize) { | 676 | if (msg->length > conn->max_buffsize) { |
683 | iucv_message_reject(conn->path, msg); | 677 | iucv_message_reject(conn->path, msg); |
684 | privptr->stats.rx_dropped++; | 678 | privptr->stats.rx_dropped++; |
685 | PRINT_WARN("msglen %d > max_buffsize %d\n", | ||
686 | msg->length, conn->max_buffsize); | ||
687 | IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", | 679 | IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", |
688 | msg->length, conn->max_buffsize); | 680 | msg->length, conn->max_buffsize); |
689 | return; | 681 | return; |
@@ -695,7 +687,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
695 | msg->length, NULL); | 687 | msg->length, NULL); |
696 | if (rc || msg->length < 5) { | 688 | if (rc || msg->length < 5) { |
697 | privptr->stats.rx_errors++; | 689 | privptr->stats.rx_errors++; |
698 | PRINT_WARN("iucv_receive returned %08x\n", rc); | ||
699 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); | 690 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); |
700 | return; | 691 | return; |
701 | } | 692 | } |
@@ -778,7 +769,6 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) | |||
778 | fsm_newstate(fi, CONN_STATE_IDLE); | 769 | fsm_newstate(fi, CONN_STATE_IDLE); |
779 | if (privptr) | 770 | if (privptr) |
780 | privptr->stats.tx_errors += txpackets; | 771 | privptr->stats.tx_errors += txpackets; |
781 | PRINT_WARN("iucv_send returned %08x\n", rc); | ||
782 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); | 772 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); |
783 | } else { | 773 | } else { |
784 | if (privptr) { | 774 | if (privptr) { |
@@ -806,8 +796,6 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) | |||
806 | path->flags = 0; | 796 | path->flags = 0; |
807 | rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); | 797 | rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); |
808 | if (rc) { | 798 | if (rc) { |
809 | PRINT_WARN("%s: IUCV accept failed with error %d\n", | ||
810 | netdev->name, rc); | ||
811 | IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); | 799 | IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); |
812 | return; | 800 | return; |
813 | } | 801 | } |
@@ -873,7 +861,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) | |||
873 | IUCV_DBF_TEXT(trace, 3, __func__); | 861 | IUCV_DBF_TEXT(trace, 3, __func__); |
874 | 862 | ||
875 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 863 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
876 | PRINT_DEBUG("%s('%s'): connecting ...\n", | 864 | IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", |
877 | conn->netdev->name, conn->userid); | 865 | conn->netdev->name, conn->userid); |
878 | 866 | ||
879 | /* | 867 | /* |
@@ -968,8 +956,8 @@ static void conn_action_inval(fsm_instance *fi, int event, void *arg) | |||
968 | struct iucv_connection *conn = arg; | 956 | struct iucv_connection *conn = arg; |
969 | struct net_device *netdev = conn->netdev; | 957 | struct net_device *netdev = conn->netdev; |
970 | 958 | ||
971 | PRINT_WARN("%s: Cannot connect without username\n", netdev->name); | 959 | IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n", |
972 | IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); | 960 | netdev->name, conn->userid); |
973 | } | 961 | } |
974 | 962 | ||
975 | static const fsm_node conn_fsm[] = { | 963 | static const fsm_node conn_fsm[] = { |
@@ -1077,9 +1065,6 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) | |||
1077 | "connection is up and running\n"); | 1065 | "connection is up and running\n"); |
1078 | break; | 1066 | break; |
1079 | case DEV_STATE_STOPWAIT: | 1067 | case DEV_STATE_STOPWAIT: |
1080 | PRINT_INFO( | ||
1081 | "%s: got connection UP event during shutdown!\n", | ||
1082 | dev->name); | ||
1083 | IUCV_DBF_TEXT(data, 2, | 1068 | IUCV_DBF_TEXT(data, 2, |
1084 | "dev_action_connup: in DEV_STATE_STOPWAIT\n"); | 1069 | "dev_action_connup: in DEV_STATE_STOPWAIT\n"); |
1085 | break; | 1070 | break; |
@@ -1174,8 +1159,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, | |||
1174 | nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + | 1159 | nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + |
1175 | NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); | 1160 | NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); |
1176 | if (!nskb) { | 1161 | if (!nskb) { |
1177 | PRINT_WARN("%s: Could not allocate tx_skb\n", | ||
1178 | conn->netdev->name); | ||
1179 | IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); | 1162 | IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); |
1180 | rc = -ENOMEM; | 1163 | rc = -ENOMEM; |
1181 | return rc; | 1164 | return rc; |
@@ -1223,7 +1206,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, | |||
1223 | skb_pull(skb, NETIUCV_HDRLEN); | 1206 | skb_pull(skb, NETIUCV_HDRLEN); |
1224 | skb_trim(skb, skb->len - NETIUCV_HDRLEN); | 1207 | skb_trim(skb, skb->len - NETIUCV_HDRLEN); |
1225 | } | 1208 | } |
1226 | PRINT_WARN("iucv_send returned %08x\n", rc); | ||
1227 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); | 1209 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); |
1228 | } else { | 1210 | } else { |
1229 | if (copied) | 1211 | if (copied) |
@@ -1293,14 +1275,11 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) | |||
1293 | * Some sanity checks ... | 1275 | * Some sanity checks ... |
1294 | */ | 1276 | */ |
1295 | if (skb == NULL) { | 1277 | if (skb == NULL) { |
1296 | PRINT_WARN("%s: NULL sk_buff passed\n", dev->name); | ||
1297 | IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); | 1278 | IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); |
1298 | privptr->stats.tx_dropped++; | 1279 | privptr->stats.tx_dropped++; |
1299 | return 0; | 1280 | return 0; |
1300 | } | 1281 | } |
1301 | if (skb_headroom(skb) < NETIUCV_HDRLEN) { | 1282 | if (skb_headroom(skb) < NETIUCV_HDRLEN) { |
1302 | PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n", | ||
1303 | dev->name, NETIUCV_HDRLEN); | ||
1304 | IUCV_DBF_TEXT(data, 2, | 1283 | IUCV_DBF_TEXT(data, 2, |
1305 | "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); | 1284 | "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); |
1306 | dev_kfree_skb(skb); | 1285 | dev_kfree_skb(skb); |
@@ -1393,7 +1372,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, | |||
1393 | 1372 | ||
1394 | IUCV_DBF_TEXT(trace, 3, __func__); | 1373 | IUCV_DBF_TEXT(trace, 3, __func__); |
1395 | if (count > 9) { | 1374 | if (count > 9) { |
1396 | PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); | ||
1397 | IUCV_DBF_TEXT_(setup, 2, | 1375 | IUCV_DBF_TEXT_(setup, 2, |
1398 | "%d is length of username\n", (int) count); | 1376 | "%d is length of username\n", (int) count); |
1399 | return -EINVAL; | 1377 | return -EINVAL; |
@@ -1409,7 +1387,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, | |||
1409 | /* trailing lf, grr */ | 1387 | /* trailing lf, grr */ |
1410 | break; | 1388 | break; |
1411 | } | 1389 | } |
1412 | PRINT_WARN("netiucv: Invalid char %c in username!\n", *p); | ||
1413 | IUCV_DBF_TEXT_(setup, 2, | 1390 | IUCV_DBF_TEXT_(setup, 2, |
1414 | "username: invalid character %c\n", *p); | 1391 | "username: invalid character %c\n", *p); |
1415 | return -EINVAL; | 1392 | return -EINVAL; |
@@ -1421,18 +1398,15 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, | |||
1421 | if (memcmp(username, priv->conn->userid, 9) && | 1398 | if (memcmp(username, priv->conn->userid, 9) && |
1422 | (ndev->flags & (IFF_UP | IFF_RUNNING))) { | 1399 | (ndev->flags & (IFF_UP | IFF_RUNNING))) { |
1423 | /* username changed while the interface is active. */ | 1400 | /* username changed while the interface is active. */ |
1424 | PRINT_WARN("netiucv: device %s active, connected to %s\n", | ||
1425 | dev->bus_id, priv->conn->userid); | ||
1426 | PRINT_WARN("netiucv: user cannot be updated\n"); | ||
1427 | IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); | 1401 | IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); |
1428 | return -EBUSY; | 1402 | return -EPERM; |
1429 | } | 1403 | } |
1430 | read_lock_bh(&iucv_connection_rwlock); | 1404 | read_lock_bh(&iucv_connection_rwlock); |
1431 | list_for_each_entry(cp, &iucv_connection_list, list) { | 1405 | list_for_each_entry(cp, &iucv_connection_list, list) { |
1432 | if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { | 1406 | if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { |
1433 | read_unlock_bh(&iucv_connection_rwlock); | 1407 | read_unlock_bh(&iucv_connection_rwlock); |
1434 | PRINT_WARN("netiucv: Connection to %s already " | 1408 | IUCV_DBF_TEXT_(setup, 2, "user_write: Connection " |
1435 | "exists\n", username); | 1409 | "to %s already exists\n", username); |
1436 | return -EEXIST; | 1410 | return -EEXIST; |
1437 | } | 1411 | } |
1438 | } | 1412 | } |
@@ -1466,13 +1440,10 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, | |||
1466 | bs1 = simple_strtoul(buf, &e, 0); | 1440 | bs1 = simple_strtoul(buf, &e, 0); |
1467 | 1441 | ||
1468 | if (e && (!isspace(*e))) { | 1442 | if (e && (!isspace(*e))) { |
1469 | PRINT_WARN("netiucv: Invalid character in buffer!\n"); | ||
1470 | IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); | 1443 | IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); |
1471 | return -EINVAL; | 1444 | return -EINVAL; |
1472 | } | 1445 | } |
1473 | if (bs1 > NETIUCV_BUFSIZE_MAX) { | 1446 | if (bs1 > NETIUCV_BUFSIZE_MAX) { |
1474 | PRINT_WARN("netiucv: Given buffer size %d too large.\n", | ||
1475 | bs1); | ||
1476 | IUCV_DBF_TEXT_(setup, 2, | 1447 | IUCV_DBF_TEXT_(setup, 2, |
1477 | "buffer_write: buffer size %d too large\n", | 1448 | "buffer_write: buffer size %d too large\n", |
1478 | bs1); | 1449 | bs1); |
@@ -1480,16 +1451,12 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, | |||
1480 | } | 1451 | } |
1481 | if ((ndev->flags & IFF_RUNNING) && | 1452 | if ((ndev->flags & IFF_RUNNING) && |
1482 | (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { | 1453 | (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { |
1483 | PRINT_WARN("netiucv: Given buffer size %d too small.\n", | ||
1484 | bs1); | ||
1485 | IUCV_DBF_TEXT_(setup, 2, | 1454 | IUCV_DBF_TEXT_(setup, 2, |
1486 | "buffer_write: buffer size %d too small\n", | 1455 | "buffer_write: buffer size %d too small\n", |
1487 | bs1); | 1456 | bs1); |
1488 | return -EINVAL; | 1457 | return -EINVAL; |
1489 | } | 1458 | } |
1490 | if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { | 1459 | if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { |
1491 | PRINT_WARN("netiucv: Given buffer size %d too small.\n", | ||
1492 | bs1); | ||
1493 | IUCV_DBF_TEXT_(setup, 2, | 1460 | IUCV_DBF_TEXT_(setup, 2, |
1494 | "buffer_write: buffer size %d too small\n", | 1461 | "buffer_write: buffer size %d too small\n", |
1495 | bs1); | 1462 | bs1); |
@@ -1963,7 +1930,6 @@ static ssize_t conn_write(struct device_driver *drv, | |||
1963 | 1930 | ||
1964 | IUCV_DBF_TEXT(trace, 3, __func__); | 1931 | IUCV_DBF_TEXT(trace, 3, __func__); |
1965 | if (count>9) { | 1932 | if (count>9) { |
1966 | PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); | ||
1967 | IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); | 1933 | IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); |
1968 | return -EINVAL; | 1934 | return -EINVAL; |
1969 | } | 1935 | } |
@@ -1976,7 +1942,6 @@ static ssize_t conn_write(struct device_driver *drv, | |||
1976 | if (*p == '\n') | 1942 | if (*p == '\n') |
1977 | /* trailing lf, grr */ | 1943 | /* trailing lf, grr */ |
1978 | break; | 1944 | break; |
1979 | PRINT_WARN("netiucv: Invalid character in username!\n"); | ||
1980 | IUCV_DBF_TEXT_(setup, 2, | 1945 | IUCV_DBF_TEXT_(setup, 2, |
1981 | "conn_write: invalid character %c\n", *p); | 1946 | "conn_write: invalid character %c\n", *p); |
1982 | return -EINVAL; | 1947 | return -EINVAL; |
@@ -1989,8 +1954,8 @@ static ssize_t conn_write(struct device_driver *drv, | |||
1989 | list_for_each_entry(cp, &iucv_connection_list, list) { | 1954 | list_for_each_entry(cp, &iucv_connection_list, list) { |
1990 | if (!strncmp(username, cp->userid, 9)) { | 1955 | if (!strncmp(username, cp->userid, 9)) { |
1991 | read_unlock_bh(&iucv_connection_rwlock); | 1956 | read_unlock_bh(&iucv_connection_rwlock); |
1992 | PRINT_WARN("netiucv: Connection to %s already " | 1957 | IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection " |
1993 | "exists\n", username); | 1958 | "to %s already exists\n", username); |
1994 | return -EEXIST; | 1959 | return -EEXIST; |
1995 | } | 1960 | } |
1996 | } | 1961 | } |
@@ -1998,9 +1963,6 @@ static ssize_t conn_write(struct device_driver *drv, | |||
1998 | 1963 | ||
1999 | dev = netiucv_init_netdevice(username); | 1964 | dev = netiucv_init_netdevice(username); |
2000 | if (!dev) { | 1965 | if (!dev) { |
2001 | PRINT_WARN("netiucv: Could not allocate network device " | ||
2002 | "structure for user '%s'\n", | ||
2003 | netiucv_printname(username)); | ||
2004 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); | 1966 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); |
2005 | return -ENODEV; | 1967 | return -ENODEV; |
2006 | } | 1968 | } |
@@ -2020,15 +1982,12 @@ static ssize_t conn_write(struct device_driver *drv, | |||
2020 | if (rc) | 1982 | if (rc) |
2021 | goto out_unreg; | 1983 | goto out_unreg; |
2022 | 1984 | ||
2023 | PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); | ||
2024 | 1985 | ||
2025 | return count; | 1986 | return count; |
2026 | 1987 | ||
2027 | out_unreg: | 1988 | out_unreg: |
2028 | netiucv_unregister_device(priv->dev); | 1989 | netiucv_unregister_device(priv->dev); |
2029 | out_free_ndev: | 1990 | out_free_ndev: |
2030 | PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); | ||
2031 | IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); | ||
2032 | netiucv_free_netdevice(dev); | 1991 | netiucv_free_netdevice(dev); |
2033 | return rc; | 1992 | return rc; |
2034 | } | 1993 | } |
@@ -2073,14 +2032,13 @@ static ssize_t remove_write (struct device_driver *drv, | |||
2073 | PRINT_WARN("netiucv: %s cannot be removed\n", | 2032 | PRINT_WARN("netiucv: %s cannot be removed\n", |
2074 | ndev->name); | 2033 | ndev->name); |
2075 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); | 2034 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); |
2076 | return -EBUSY; | 2035 | return -EPERM; |
2077 | } | 2036 | } |
2078 | unregister_netdev(ndev); | 2037 | unregister_netdev(ndev); |
2079 | netiucv_unregister_device(dev); | 2038 | netiucv_unregister_device(dev); |
2080 | return count; | 2039 | return count; |
2081 | } | 2040 | } |
2082 | read_unlock_bh(&iucv_connection_rwlock); | 2041 | read_unlock_bh(&iucv_connection_rwlock); |
2083 | PRINT_WARN("netiucv: net device %s unknown\n", name); | ||
2084 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); | 2042 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); |
2085 | return -EINVAL; | 2043 | return -EINVAL; |
2086 | } | 2044 | } |
@@ -2148,7 +2106,6 @@ static int __init netiucv_init(void) | |||
2148 | netiucv_driver.groups = netiucv_drv_attr_groups; | 2106 | netiucv_driver.groups = netiucv_drv_attr_groups; |
2149 | rc = driver_register(&netiucv_driver); | 2107 | rc = driver_register(&netiucv_driver); |
2150 | if (rc) { | 2108 | if (rc) { |
2151 | PRINT_ERR("NETIUCV: failed to register driver.\n"); | ||
2152 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); | 2109 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); |
2153 | goto out_iucv; | 2110 | goto out_iucv; |
2154 | } | 2111 | } |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9a71dae223e8..0ac54dc638c2 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -420,7 +420,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
420 | QETH_DBF_TEXT(TRACE, 3, "urla"); | 420 | QETH_DBF_TEXT(TRACE, 3, "urla"); |
421 | break; | 421 | break; |
422 | default: | 422 | default: |
423 | PRINT_WARN("Received data is IPA " | 423 | QETH_DBF_MESSAGE(2, "Received data is IPA " |
424 | "but not a reply!\n"); | 424 | "but not a reply!\n"); |
425 | break; | 425 | break; |
426 | } | 426 | } |
@@ -735,8 +735,8 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
735 | char *sense; | 735 | char *sense; |
736 | 736 | ||
737 | sense = (char *) irb->ecw; | 737 | sense = (char *) irb->ecw; |
738 | cstat = irb->scsw.cstat; | 738 | cstat = irb->scsw.cmd.cstat; |
739 | dstat = irb->scsw.dstat; | 739 | dstat = irb->scsw.cmd.dstat; |
740 | 740 | ||
741 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | | 741 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | |
742 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 742 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
@@ -823,8 +823,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
823 | 823 | ||
824 | if (__qeth_check_irb_error(cdev, intparm, irb)) | 824 | if (__qeth_check_irb_error(cdev, intparm, irb)) |
825 | return; | 825 | return; |
826 | cstat = irb->scsw.cstat; | 826 | cstat = irb->scsw.cmd.cstat; |
827 | dstat = irb->scsw.dstat; | 827 | dstat = irb->scsw.cmd.dstat; |
828 | 828 | ||
829 | card = CARD_FROM_CDEV(cdev); | 829 | card = CARD_FROM_CDEV(cdev); |
830 | if (!card) | 830 | if (!card) |
@@ -842,10 +842,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
842 | } | 842 | } |
843 | atomic_set(&channel->irq_pending, 0); | 843 | atomic_set(&channel->irq_pending, 0); |
844 | 844 | ||
845 | if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC)) | 845 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) |
846 | channel->state = CH_STATE_STOPPED; | 846 | channel->state = CH_STATE_STOPPED; |
847 | 847 | ||
848 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC)) | 848 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) |
849 | channel->state = CH_STATE_HALTED; | 849 | channel->state = CH_STATE_HALTED; |
850 | 850 | ||
851 | /*let's wake up immediately on data channel*/ | 851 | /*let's wake up immediately on data channel*/ |
@@ -4092,7 +4092,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
4092 | 4092 | ||
4093 | rc = qeth_determine_card_type(card); | 4093 | rc = qeth_determine_card_type(card); |
4094 | if (rc) { | 4094 | if (rc) { |
4095 | PRINT_WARN("%s: not a valid card type\n", __func__); | ||
4096 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); | 4095 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); |
4097 | goto err_card; | 4096 | goto err_card; |
4098 | } | 4097 | } |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 999552c83bbe..06deaee50f6d 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -944,15 +944,8 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, | |||
944 | else | 944 | else |
945 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, | 945 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, |
946 | addr->del_flags); | 946 | addr->del_flags); |
947 | if (rc) { | 947 | if (rc) |
948 | QETH_DBF_TEXT(TRACE, 2, "failed"); | 948 | QETH_DBF_TEXT(TRACE, 2, "failed"); |
949 | /* TODO: re-activate this warning as soon as we have a | ||
950 | * clean mirco code | ||
951 | qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); | ||
952 | PRINT_WARN("Could not deregister IP address %s (rc=%x)\n", | ||
953 | buf, rc); | ||
954 | */ | ||
955 | } | ||
956 | 949 | ||
957 | return rc; | 950 | return rc; |
958 | } | 951 | } |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 8735a415a116..164e090c2625 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -156,11 +156,8 @@ static int __init smsg_init(void) | |||
156 | if (rc != 0) | 156 | if (rc != 0) |
157 | goto out; | 157 | goto out; |
158 | rc = iucv_register(&smsg_handler, 1); | 158 | rc = iucv_register(&smsg_handler, 1); |
159 | if (rc) { | 159 | if (rc) |
160 | printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); | ||
161 | rc = -EIO; /* better errno ? */ | ||
162 | goto out_driver; | 160 | goto out_driver; |
163 | } | ||
164 | smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); | 161 | smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); |
165 | if (!smsg_path) { | 162 | if (!smsg_path) { |
166 | rc = -ENOMEM; | 163 | rc = -ENOMEM; |
@@ -168,11 +165,8 @@ static int __init smsg_init(void) | |||
168 | } | 165 | } |
169 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", | 166 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", |
170 | NULL, NULL, NULL); | 167 | NULL, NULL, NULL); |
171 | if (rc) { | 168 | if (rc) |
172 | printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); | ||
173 | rc = -EIO; /* better errno ? */ | ||
174 | goto out_free; | 169 | goto out_free; |
175 | } | ||
176 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 170 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
177 | return 0; | 171 | return 0; |
178 | 172 | ||
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 5bfbe7659830..834e9ee7e934 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -2,10 +2,10 @@ | |||
2 | * drivers/s390/s390mach.c | 2 | * drivers/s390/s390mach.c |
3 | * S/390 machine check handler | 3 | * S/390 machine check handler |
4 | * | 4 | * |
5 | * S390 version | 5 | * Copyright IBM Corp. 2000,2008 |
6 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
8 | * Cornelia Huck <cornelia.huck@de.ibm.com> | ||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -18,10 +18,6 @@ | |||
18 | #include <asm/etr.h> | 18 | #include <asm/etr.h> |
19 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
20 | #include <asm/cio.h> | 20 | #include <asm/cio.h> |
21 | #include "cio/cio.h" | ||
22 | #include "cio/chsc.h" | ||
23 | #include "cio/css.h" | ||
24 | #include "cio/chp.h" | ||
25 | #include "s390mach.h" | 21 | #include "s390mach.h" |
26 | 22 | ||
27 | static struct semaphore m_sem; | 23 | static struct semaphore m_sem; |
@@ -36,13 +32,40 @@ s390_handle_damage(char *msg) | |||
36 | for(;;); | 32 | for(;;); |
37 | } | 33 | } |
38 | 34 | ||
35 | static crw_handler_t crw_handlers[NR_RSCS]; | ||
36 | |||
37 | /** | ||
38 | * s390_register_crw_handler() - register a channel report word handler | ||
39 | * @rsc: reporting source code to handle | ||
40 | * @handler: handler to be registered | ||
41 | * | ||
42 | * Returns %0 on success and a negative error value otherwise. | ||
43 | */ | ||
44 | int s390_register_crw_handler(int rsc, crw_handler_t handler) | ||
45 | { | ||
46 | if ((rsc < 0) || (rsc >= NR_RSCS)) | ||
47 | return -EINVAL; | ||
48 | if (!cmpxchg(&crw_handlers[rsc], NULL, handler)) | ||
49 | return 0; | ||
50 | return -EBUSY; | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * s390_unregister_crw_handler() - unregister a channel report word handler | ||
55 | * @rsc: reporting source code to handle | ||
56 | */ | ||
57 | void s390_unregister_crw_handler(int rsc) | ||
58 | { | ||
59 | if ((rsc < 0) || (rsc >= NR_RSCS)) | ||
60 | return; | ||
61 | xchg(&crw_handlers[rsc], NULL); | ||
62 | synchronize_sched(); | ||
63 | } | ||
64 | |||
39 | /* | 65 | /* |
40 | * Retrieve CRWs and call function to handle event. | 66 | * Retrieve CRWs and call function to handle event. |
41 | * | ||
42 | * Note : we currently process CRWs for io and chsc subchannels only | ||
43 | */ | 67 | */ |
44 | static int | 68 | static int s390_collect_crw_info(void *param) |
45 | s390_collect_crw_info(void *param) | ||
46 | { | 69 | { |
47 | struct crw crw[2]; | 70 | struct crw crw[2]; |
48 | int ccode; | 71 | int ccode; |
@@ -84,57 +107,24 @@ repeat: | |||
84 | crw[chain].rsid); | 107 | crw[chain].rsid); |
85 | /* Check for overflows. */ | 108 | /* Check for overflows. */ |
86 | if (crw[chain].oflw) { | 109 | if (crw[chain].oflw) { |
110 | int i; | ||
111 | |||
87 | pr_debug("%s: crw overflow detected!\n", __func__); | 112 | pr_debug("%s: crw overflow detected!\n", __func__); |
88 | css_schedule_eval_all(); | 113 | for (i = 0; i < NR_RSCS; i++) { |
114 | if (crw_handlers[i]) | ||
115 | crw_handlers[i](NULL, NULL, 1); | ||
116 | } | ||
89 | chain = 0; | 117 | chain = 0; |
90 | continue; | 118 | continue; |
91 | } | 119 | } |
92 | switch (crw[chain].rsc) { | 120 | if (crw[0].chn && !chain) { |
93 | case CRW_RSC_SCH: | 121 | chain++; |
94 | if (crw[0].chn && !chain) | 122 | continue; |
95 | break; | ||
96 | pr_debug("source is subchannel %04X\n", crw[0].rsid); | ||
97 | css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0); | ||
98 | break; | ||
99 | case CRW_RSC_MONITOR: | ||
100 | pr_debug("source is monitoring facility\n"); | ||
101 | break; | ||
102 | case CRW_RSC_CPATH: | ||
103 | pr_debug("source is channel path %02X\n", crw[0].rsid); | ||
104 | /* | ||
105 | * Check for solicited machine checks. These are | ||
106 | * created by reset channel path and need not be | ||
107 | * reported to the common I/O layer. | ||
108 | */ | ||
109 | if (crw[chain].slct) { | ||
110 | pr_debug("solicited machine check for " | ||
111 | "channel path %02X\n", crw[0].rsid); | ||
112 | break; | ||
113 | } | ||
114 | switch (crw[0].erc) { | ||
115 | case CRW_ERC_IPARM: /* Path has come. */ | ||
116 | chp_process_crw(crw[0].rsid, 1); | ||
117 | break; | ||
118 | case CRW_ERC_PERRI: /* Path has gone. */ | ||
119 | case CRW_ERC_PERRN: | ||
120 | chp_process_crw(crw[0].rsid, 0); | ||
121 | break; | ||
122 | default: | ||
123 | pr_debug("Don't know how to handle erc=%x\n", | ||
124 | crw[0].erc); | ||
125 | } | ||
126 | break; | ||
127 | case CRW_RSC_CONFIG: | ||
128 | pr_debug("source is configuration-alert facility\n"); | ||
129 | break; | ||
130 | case CRW_RSC_CSS: | ||
131 | pr_debug("source is channel subsystem\n"); | ||
132 | chsc_process_crw(); | ||
133 | break; | ||
134 | default: | ||
135 | pr_debug("unknown source\n"); | ||
136 | break; | ||
137 | } | 123 | } |
124 | if (crw_handlers[crw[chain].rsc]) | ||
125 | crw_handlers[crw[chain].rsc](&crw[0], | ||
126 | chain ? &crw[1] : NULL, | ||
127 | 0); | ||
138 | /* chain is always 0 or 1 here. */ | 128 | /* chain is always 0 or 1 here. */ |
139 | chain = crw[chain].chn ? chain + 1 : 0; | 129 | chain = crw[chain].chn ? chain + 1 : 0; |
140 | } | 130 | } |
@@ -468,6 +458,10 @@ s390_do_machine_check(struct pt_regs *regs) | |||
468 | etr_sync_check(); | 458 | etr_sync_check(); |
469 | if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) | 459 | if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) |
470 | etr_switch_to_local(); | 460 | etr_switch_to_local(); |
461 | if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) | ||
462 | stp_sync_check(); | ||
463 | if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) | ||
464 | stp_island_check(); | ||
471 | } | 465 | } |
472 | 466 | ||
473 | if (mci->se) | 467 | if (mci->se) |
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h index ca681f9b67fc..d39f8b697d27 100644 --- a/drivers/s390/s390mach.h +++ b/drivers/s390/s390mach.h | |||
@@ -72,6 +72,13 @@ struct crw { | |||
72 | __u32 rsid : 16; /* reporting-source ID */ | 72 | __u32 rsid : 16; /* reporting-source ID */ |
73 | } __attribute__ ((packed)); | 73 | } __attribute__ ((packed)); |
74 | 74 | ||
75 | typedef void (*crw_handler_t)(struct crw *, struct crw *, int); | ||
76 | |||
77 | extern int s390_register_crw_handler(int rsc, crw_handler_t handler); | ||
78 | extern void s390_unregister_crw_handler(int rsc); | ||
79 | |||
80 | #define NR_RSCS 16 | ||
81 | |||
75 | #define CRW_RSC_MONITOR 0x2 /* monitoring facility */ | 82 | #define CRW_RSC_MONITOR 0x2 /* monitoring facility */ |
76 | #define CRW_RSC_SCH 0x3 /* subchannel */ | 83 | #define CRW_RSC_SCH 0x3 /* subchannel */ |
77 | #define CRW_RSC_CPATH 0x4 /* channel path */ | 84 | #define CRW_RSC_CPATH 0x4 /* channel path */ |
@@ -105,6 +112,9 @@ static inline int stcrw(struct crw *pcrw ) | |||
105 | #define ED_ETR_SYNC 12 /* External damage ETR sync check */ | 112 | #define ED_ETR_SYNC 12 /* External damage ETR sync check */ |
106 | #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ | 113 | #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ |
107 | 114 | ||
115 | #define ED_STP_SYNC 7 /* External damage STP sync check */ | ||
116 | #define ED_STP_ISLAND 6 /* External damage STP island check */ | ||
117 | |||
108 | struct pt_regs; | 118 | struct pt_regs; |
109 | 119 | ||
110 | void s390_handle_mcck(void); | 120 | void s390_handle_mcck(void); |