aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-25 11:36:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-25 11:36:50 -0400
commitc4a6eb3b7d5b483de331313e7ea38a6891a3447a (patch)
treede415b67626308b1fa414e47f17959939c017c92 /drivers
parent33081adf8b89d5a716d7e1c60171768d39795b39 (diff)
parent96f4a70d8eb4d746b19d5b5510407c8ff0d00340 (diff)
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (48 commits) [S390] topology: export cpu topology via proc/sysinfo [S390] topology: move topology sysinfo code [S390] topology: clean up facility detection [S390] cleanup facility list handling [S390] enable ARCH_DMA_ADDR_T_64BIT with 64BIT [S390] dasd: ignore unsolicited interrupts for DIAG [S390] kvm: Enable z196 instruction facilities [S390] dasd: fix unsolicited interrupt recognition [S390] dasd: fix use after free in dbf [S390] kvm: Fix badness at include/asm/mmu_context.h:83 [S390] cio: fix I/O cancel function [S390] topology: change default [S390] smp: use correct cpu address in print_cpu_info() [S390] remove ieee_instruction_pointer from thread_struct [S390] cleanup system call parameter setup [S390] correct alignment of cpuid structure [S390] cleanup lowcore access from external interrupts [S390] cleanup lowcore access from program checks [S390] pgtable: move pte_mkhuge() from hugetlb.h to pgtable.h [S390] fix SIGBUS handling ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hvc_iucv.c4
-rw-r--r--drivers/s390/block/dasd.c24
-rw-r--r--drivers/s390/block/dasd_3990_erp.c3
-rw-r--r--drivers/s390/block/dasd_diag.c19
-rw-r--r--drivers/s390/block/dasd_diag.h4
-rw-r--r--drivers/s390/block/dasd_eckd.c68
-rw-r--r--drivers/s390/block/dasd_proc.c1
-rw-r--r--drivers/s390/char/sclp.c14
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/chp.c41
-rw-r--r--drivers/s390/cio/chp.h12
-rw-r--r--drivers/s390/cio/chsc.c291
-rw-r--r--drivers/s390/cio/chsc.h28
-rw-r--r--drivers/s390/cio/chsc_sch.c12
-rw-r--r--drivers/s390/cio/css.c50
-rw-r--r--drivers/s390/cio/device.c18
-rw-r--r--drivers/s390/cio/device_fsm.c41
-rw-r--r--drivers/s390/cio/device_pgid.c23
-rw-r--r--drivers/s390/cio/io_sch.h7
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/s390/kvm/kvm_virtio.c9
22 files changed, 378 insertions, 314 deletions
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 7b01bc609de3..c3425bb3a1f6 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1303,13 +1303,11 @@ static int __init hvc_iucv_init(void)
1303 if (rc) { 1303 if (rc) {
1304 pr_err("Registering IUCV handlers failed with error code=%d\n", 1304 pr_err("Registering IUCV handlers failed with error code=%d\n",
1305 rc); 1305 rc);
1306 goto out_error_iucv; 1306 goto out_error_hvc;
1307 } 1307 }
1308 1308
1309 return 0; 1309 return 0;
1310 1310
1311out_error_iucv:
1312 iucv_unregister(&hvc_iucv_handler, 0);
1313out_error_hvc: 1311out_error_hvc:
1314 for (i = 0; i < hvc_iucv_devices; i++) 1312 for (i = 0; i < hvc_iucv_devices; i++)
1315 if (hvc_iucv_table[i]) 1313 if (hvc_iucv_table[i])
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index aa95f1001761..fb613d70c2cb 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1099,16 +1099,30 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1099 cqr = (struct dasd_ccw_req *) intparm; 1099 cqr = (struct dasd_ccw_req *) intparm;
1100 if (!cqr || ((scsw_cc(&irb->scsw) == 1) && 1100 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1101 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1101 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1102 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) { 1102 ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) ||
1103 (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND |
1104 SCSW_STCTL_ALERT_STATUS))))) {
1103 if (cqr && cqr->status == DASD_CQR_IN_IO) 1105 if (cqr && cqr->status == DASD_CQR_IN_IO)
1104 cqr->status = DASD_CQR_QUEUED; 1106 cqr->status = DASD_CQR_QUEUED;
1107 if (cqr)
1108 memcpy(&cqr->irb, irb, sizeof(*irb));
1105 device = dasd_device_from_cdev_locked(cdev); 1109 device = dasd_device_from_cdev_locked(cdev);
1106 if (!IS_ERR(device)) { 1110 if (IS_ERR(device))
1107 dasd_device_clear_timer(device); 1111 return;
1108 device->discipline->handle_unsolicited_interrupt(device, 1112 /* ignore unsolicited interrupts for DIAG discipline */
1109 irb); 1113 if (device->discipline == dasd_diag_discipline_pointer) {
1110 dasd_put_device(device); 1114 dasd_put_device(device);
1115 return;
1111 } 1116 }
1117 device->discipline->dump_sense_dbf(device, irb,
1118 "unsolicited");
1119 if ((device->features & DASD_FEATURE_ERPLOG))
1120 device->discipline->dump_sense(device, cqr,
1121 irb);
1122 dasd_device_clear_timer(device);
1123 device->discipline->handle_unsolicited_interrupt(device,
1124 irb);
1125 dasd_put_device(device);
1112 return; 1126 return;
1113 } 1127 }
1114 1128
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e82d427ff5eb..968c76cf7127 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -221,6 +221,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
221 ccw->cmd_code = CCW_CMD_DCTL; 221 ccw->cmd_code = CCW_CMD_DCTL;
222 ccw->count = 4; 222 ccw->count = 4;
223 ccw->cda = (__u32)(addr_t) DCTL_data; 223 ccw->cda = (__u32)(addr_t) DCTL_data;
224 dctl_cqr->flags = erp->flags;
224 dctl_cqr->function = dasd_3990_erp_DCTL; 225 dctl_cqr->function = dasd_3990_erp_DCTL;
225 dctl_cqr->refers = erp; 226 dctl_cqr->refers = erp;
226 dctl_cqr->startdev = device; 227 dctl_cqr->startdev = device;
@@ -1710,6 +1711,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1710 ccw->cda = cpa; 1711 ccw->cda = cpa;
1711 1712
1712 /* fill erp related fields */ 1713 /* fill erp related fields */
1714 erp->flags = default_erp->flags;
1713 erp->function = dasd_3990_erp_action_1B_32; 1715 erp->function = dasd_3990_erp_action_1B_32;
1714 erp->refers = default_erp->refers; 1716 erp->refers = default_erp->refers;
1715 erp->startdev = device; 1717 erp->startdev = device;
@@ -2354,6 +2356,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2354 ccw->cda = (long)(cqr->cpaddr); 2356 ccw->cda = (long)(cqr->cpaddr);
2355 } 2357 }
2356 2358
2359 erp->flags = cqr->flags;
2357 erp->function = dasd_3990_erp_add_erp; 2360 erp->function = dasd_3990_erp_add_erp;
2358 erp->refers = cqr; 2361 erp->refers = cqr;
2359 erp->startdev = device; 2362 erp->startdev = device;
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 2b3bc3ec0541..266b34b55403 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -228,25 +228,22 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
228} 228}
229 229
230/* Handle external interruption. */ 230/* Handle external interruption. */
231static void 231static void dasd_ext_handler(unsigned int ext_int_code,
232dasd_ext_handler(__u16 code) 232 unsigned int param32, unsigned long param64)
233{ 233{
234 struct dasd_ccw_req *cqr, *next; 234 struct dasd_ccw_req *cqr, *next;
235 struct dasd_device *device; 235 struct dasd_device *device;
236 unsigned long long expires; 236 unsigned long long expires;
237 unsigned long flags; 237 unsigned long flags;
238 u8 int_code, status;
239 addr_t ip; 238 addr_t ip;
240 int rc; 239 int rc;
241 240
242 int_code = *((u8 *) DASD_DIAG_LC_INT_CODE); 241 switch (ext_int_code >> 24) {
243 status = *((u8 *) DASD_DIAG_LC_INT_STATUS);
244 switch (int_code) {
245 case DASD_DIAG_CODE_31BIT: 242 case DASD_DIAG_CODE_31BIT:
246 ip = (addr_t) *((u32 *) DASD_DIAG_LC_INT_PARM_31BIT); 243 ip = (addr_t) param32;
247 break; 244 break;
248 case DASD_DIAG_CODE_64BIT: 245 case DASD_DIAG_CODE_64BIT:
249 ip = (addr_t) *((u64 *) DASD_DIAG_LC_INT_PARM_64BIT); 246 ip = (addr_t) param64;
250 break; 247 break;
251 default: 248 default:
252 return; 249 return;
@@ -281,7 +278,7 @@ dasd_ext_handler(__u16 code)
281 cqr->stopclk = get_clock(); 278 cqr->stopclk = get_clock();
282 279
283 expires = 0; 280 expires = 0;
284 if (status == 0) { 281 if ((ext_int_code & 0xff0000) == 0) {
285 cqr->status = DASD_CQR_SUCCESS; 282 cqr->status = DASD_CQR_SUCCESS;
286 /* Start first request on queue if possible -> fast_io. */ 283 /* Start first request on queue if possible -> fast_io. */
287 if (!list_empty(&device->ccw_queue)) { 284 if (!list_empty(&device->ccw_queue)) {
@@ -296,8 +293,8 @@ dasd_ext_handler(__u16 code)
296 } else { 293 } else {
297 cqr->status = DASD_CQR_QUEUED; 294 cqr->status = DASD_CQR_QUEUED;
298 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " 295 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
299 "request %p was %d (%d retries left)", cqr, status, 296 "request %p was %d (%d retries left)", cqr,
300 cqr->retries); 297 (ext_int_code >> 16) & 0xff, cqr->retries);
301 dasd_diag_erp(device); 298 dasd_diag_erp(device);
302 } 299 }
303 300
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index b8c78267ff3e..4f71fbe60c82 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -18,10 +18,6 @@
18#define DEV_CLASS_FBA 0x01 18#define DEV_CLASS_FBA 0x01
19#define DEV_CLASS_ECKD 0x04 19#define DEV_CLASS_ECKD 0x04
20 20
21#define DASD_DIAG_LC_INT_CODE 132
22#define DASD_DIAG_LC_INT_STATUS 133
23#define DASD_DIAG_LC_INT_PARM_31BIT 128
24#define DASD_DIAG_LC_INT_PARM_64BIT 4536
25#define DASD_DIAG_CODE_31BIT 0x03 21#define DASD_DIAG_CODE_31BIT 0x03
26#define DASD_DIAG_CODE_64BIT 0x07 22#define DASD_DIAG_CODE_64BIT 0x07
27 23
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 59b4ecfb967b..50cf96389d2c 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1776,13 +1776,13 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1776 } 1776 }
1777 1777
1778 /* summary unit check */ 1778 /* summary unit check */
1779 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1779 sense = dasd_get_sense(irb);
1780 (irb->ecw[7] == 0x0D)) { 1780 if (sense && (sense[7] == 0x0D) &&
1781 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
1781 dasd_alias_handle_summary_unit_check(device, irb); 1782 dasd_alias_handle_summary_unit_check(device, irb);
1782 return; 1783 return;
1783 } 1784 }
1784 1785
1785 sense = dasd_get_sense(irb);
1786 /* service information message SIM */ 1786 /* service information message SIM */
1787 if (sense && !(sense[27] & DASD_SENSE_BIT_0) && 1787 if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
1788 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 1788 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
@@ -1791,26 +1791,15 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1791 return; 1791 return;
1792 } 1792 }
1793 1793
1794 if ((scsw_cc(&irb->scsw) == 1) && 1794 if ((scsw_cc(&irb->scsw) == 1) && !sense &&
1795 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1795 (scsw_fctl(&irb->scsw) == SCSW_FCTL_START_FUNC) &&
1796 (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) && 1796 (scsw_actl(&irb->scsw) == SCSW_ACTL_START_PEND) &&
1797 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) { 1797 (scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND)) {
1798 /* fake irb do nothing, they are handled elsewhere */ 1798 /* fake irb do nothing, they are handled elsewhere */
1799 dasd_schedule_device_bh(device); 1799 dasd_schedule_device_bh(device);
1800 return; 1800 return;
1801 } 1801 }
1802 1802
1803 if (!sense) {
1804 /* just report other unsolicited interrupts */
1805 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1806 "unsolicited interrupt received");
1807 } else {
1808 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1809 "unsolicited interrupt received "
1810 "(sense available)");
1811 device->discipline->dump_sense_dbf(device, irb, "unsolicited");
1812 }
1813
1814 dasd_schedule_device_bh(device); 1803 dasd_schedule_device_bh(device);
1815 return; 1804 return;
1816}; 1805};
@@ -3093,19 +3082,19 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
3093 char *reason) 3082 char *reason)
3094{ 3083{
3095 u64 *sense; 3084 u64 *sense;
3085 u64 *stat;
3096 3086
3097 sense = (u64 *) dasd_get_sense(irb); 3087 sense = (u64 *) dasd_get_sense(irb);
3088 stat = (u64 *) &irb->scsw;
3098 if (sense) { 3089 if (sense) {
3099 DBF_DEV_EVENT(DBF_EMERG, device, 3090 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
3100 "%s: %s %02x%02x%02x %016llx %016llx %016llx " 3091 "%016llx %016llx %016llx %016llx",
3101 "%016llx", reason, 3092 reason, *stat, *((u32 *) (stat + 1)),
3102 scsw_is_tm(&irb->scsw) ? "t" : "c", 3093 sense[0], sense[1], sense[2], sense[3]);
3103 scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
3104 scsw_dstat(&irb->scsw), sense[0], sense[1],
3105 sense[2], sense[3]);
3106 } else { 3094 } else {
3107 DBF_DEV_EVENT(DBF_EMERG, device, "%s", 3095 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
3108 "SORRY - NO VALID SENSE AVAILABLE\n"); 3096 reason, *stat, *((u32 *) (stat + 1)),
3097 "NO VALID SENSE");
3109 } 3098 }
3110} 3099}
3111 3100
@@ -3131,9 +3120,12 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3131 " I/O status report for device %s:\n", 3120 " I/O status report for device %s:\n",
3132 dev_name(&device->cdev->dev)); 3121 dev_name(&device->cdev->dev));
3133 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3122 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3134 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", 3123 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
3135 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3124 "CS:%02X RC:%d\n",
3136 scsw_cc(&irb->scsw), req ? req->intrc : 0); 3125 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
3126 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
3127 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
3128 req ? req->intrc : 0);
3137 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3129 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3138 " device %s: Failing CCW: %p\n", 3130 " device %s: Failing CCW: %p\n",
3139 dev_name(&device->cdev->dev), 3131 dev_name(&device->cdev->dev),
@@ -3234,11 +3226,13 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3234 " I/O status report for device %s:\n", 3226 " I/O status report for device %s:\n",
3235 dev_name(&device->cdev->dev)); 3227 dev_name(&device->cdev->dev));
3236 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3228 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3237 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d " 3229 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
3238 "fcxs: 0x%02X schxs: 0x%02X\n", req, 3230 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
3239 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3231 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
3240 scsw_cc(&irb->scsw), req->intrc, 3232 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
3241 irb->scsw.tm.fcxs, irb->scsw.tm.schxs); 3233 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
3234 irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
3235 req ? req->intrc : 0);
3242 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3236 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3243 " device %s: Failing TCW: %p\n", 3237 " device %s: Failing TCW: %p\n",
3244 dev_name(&device->cdev->dev), 3238 dev_name(&device->cdev->dev),
@@ -3246,7 +3240,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3246 3240
3247 tsb = NULL; 3241 tsb = NULL;
3248 sense = NULL; 3242 sense = NULL;
3249 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01)) 3243 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
3250 tsb = tcw_get_tsb( 3244 tsb = tcw_get_tsb(
3251 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 3245 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3252 3246
@@ -3344,7 +3338,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3344static void dasd_eckd_dump_sense(struct dasd_device *device, 3338static void dasd_eckd_dump_sense(struct dasd_device *device,
3345 struct dasd_ccw_req *req, struct irb *irb) 3339 struct dasd_ccw_req *req, struct irb *irb)
3346{ 3340{
3347 if (req && scsw_is_tm(&req->irb.scsw)) 3341 if (scsw_is_tm(&irb->scsw))
3348 dasd_eckd_dump_sense_tcw(device, req, irb); 3342 dasd_eckd_dump_sense_tcw(device, req, irb);
3349 else 3343 else
3350 dasd_eckd_dump_sense_ccw(device, req, irb); 3344 dasd_eckd_dump_sense_ccw(device, req, irb);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 2eb025592809..c4a6a31bd9cd 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -251,7 +251,6 @@ static ssize_t dasd_stats_proc_write(struct file *file,
251 buffer = dasd_get_user_string(user_buf, user_len); 251 buffer = dasd_get_user_string(user_buf, user_len);
252 if (IS_ERR(buffer)) 252 if (IS_ERR(buffer))
253 return PTR_ERR(buffer); 253 return PTR_ERR(buffer);
254 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
255 254
256 /* check for valid verbs */ 255 /* check for valid verbs */
257 str = skip_spaces(buffer); 256 str = skip_spaces(buffer);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 5707a80b96b6..35cc4686b99b 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -395,16 +395,16 @@ __sclp_find_req(u32 sccb)
395/* Handler for external interruption. Perform request post-processing. 395/* Handler for external interruption. Perform request post-processing.
396 * Prepare read event data request if necessary. Start processing of next 396 * Prepare read event data request if necessary. Start processing of next
397 * request on queue. */ 397 * request on queue. */
398static void 398static void sclp_interrupt_handler(unsigned int ext_int_code,
399sclp_interrupt_handler(__u16 code) 399 unsigned int param32, unsigned long param64)
400{ 400{
401 struct sclp_req *req; 401 struct sclp_req *req;
402 u32 finished_sccb; 402 u32 finished_sccb;
403 u32 evbuf_pending; 403 u32 evbuf_pending;
404 404
405 spin_lock(&sclp_lock); 405 spin_lock(&sclp_lock);
406 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 406 finished_sccb = param32 & 0xfffffff8;
407 evbuf_pending = S390_lowcore.ext_params & 0x3; 407 evbuf_pending = param32 & 0x3;
408 if (finished_sccb) { 408 if (finished_sccb) {
409 del_timer(&sclp_request_timer); 409 del_timer(&sclp_request_timer);
410 sclp_running_state = sclp_running_state_reset_pending; 410 sclp_running_state = sclp_running_state_reset_pending;
@@ -819,12 +819,12 @@ EXPORT_SYMBOL(sclp_reactivate);
819 819
820/* Handler for external interruption used during initialization. Modify 820/* Handler for external interruption used during initialization. Modify
821 * request state to done. */ 821 * request state to done. */
822static void 822static void sclp_check_handler(unsigned int ext_int_code,
823sclp_check_handler(__u16 code) 823 unsigned int param32, unsigned long param64)
824{ 824{
825 u32 finished_sccb; 825 u32 finished_sccb;
826 826
827 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 827 finished_sccb = param32 & 0xfffffff8;
828 /* Is this the interrupt we are waiting for? */ 828 /* Is this the interrupt we are waiting for? */
829 if (finished_sccb == 0) 829 if (finished_sccb == 0)
830 return; 830 return;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 0d6dc4b92cc2..9f661426e4a1 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -215,7 +215,7 @@ static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
215 215
216static int vmlogrdr_get_recording_class_AB(void) 216static int vmlogrdr_get_recording_class_AB(void)
217{ 217{
218 char cp_command[]="QUERY COMMAND RECORDING "; 218 static const char cp_command[] = "QUERY COMMAND RECORDING ";
219 char cp_response[80]; 219 char cp_response[80];
220 char *tail; 220 char *tail;
221 int len,i; 221 int len,i;
@@ -638,7 +638,7 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
638 char *buf) 638 char *buf)
639{ 639{
640 640
641 char cp_command[] = "QUERY RECORDING "; 641 static const char cp_command[] = "QUERY RECORDING ";
642 int len; 642 int len;
643 643
644 cpcmd(cp_command, buf, 4096, NULL); 644 cpcmd(cp_command, buf, 4096, NULL);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 13cb60162e42..76058a5166ed 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -79,17 +79,15 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit,
79 int max_digit, int max_val) 79 int max_digit, int max_val)
80{ 80{
81 int diff; 81 int diff;
82 unsigned int value;
83 82
84 diff = 0; 83 diff = 0;
85 *val = 0; 84 *val = 0;
86 85
87 while (isxdigit(**cp) && (diff <= max_digit)) { 86 while (diff <= max_digit) {
87 int value = hex_to_bin(**cp);
88 88
89 if (isdigit(**cp)) 89 if (value < 0)
90 value = **cp - '0'; 90 break;
91 else
92 value = tolower(**cp) - 'a' + 10;
93 *val = *val * 16 + value; 91 *val = *val * 16 + value;
94 (*cp)++; 92 (*cp)++;
95 diff++; 93 diff++;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 6c9fa15aac7b..2d32233943a9 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chp.c 2 * drivers/s390/cio/chp.c
3 * 3 *
4 * Copyright IBM Corp. 1999,2007 4 * Copyright IBM Corp. 1999,2010
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Arnd Bergmann (arndb@de.ibm.com) 6 * Arnd Bergmann (arndb@de.ibm.com)
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -54,12 +54,6 @@ static struct work_struct cfg_work;
54/* Wait queue for configure completion events. */ 54/* Wait queue for configure completion events. */
55static wait_queue_head_t cfg_wait_queue; 55static wait_queue_head_t cfg_wait_queue;
56 56
57/* Return channel_path struct for given chpid. */
58static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
59{
60 return channel_subsystems[chpid.cssid]->chps[chpid.id];
61}
62
63/* Set vary state for given chpid. */ 57/* Set vary state for given chpid. */
64static void set_chp_logically_online(struct chp_id chpid, int onoff) 58static void set_chp_logically_online(struct chp_id chpid, int onoff)
65{ 59{
@@ -241,11 +235,13 @@ static ssize_t chp_status_show(struct device *dev,
241 struct device_attribute *attr, char *buf) 235 struct device_attribute *attr, char *buf)
242{ 236{
243 struct channel_path *chp = to_channelpath(dev); 237 struct channel_path *chp = to_channelpath(dev);
238 int status;
244 239
245 if (!chp) 240 mutex_lock(&chp->lock);
246 return 0; 241 status = chp->state;
247 return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : 242 mutex_unlock(&chp->lock);
248 sprintf(buf, "offline\n")); 243
244 return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
249} 245}
250 246
251static ssize_t chp_status_write(struct device *dev, 247static ssize_t chp_status_write(struct device *dev,
@@ -261,15 +257,18 @@ static ssize_t chp_status_write(struct device *dev,
261 if (!num_args) 257 if (!num_args)
262 return count; 258 return count;
263 259
264 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) 260 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
261 mutex_lock(&cp->lock);
265 error = s390_vary_chpid(cp->chpid, 1); 262 error = s390_vary_chpid(cp->chpid, 1);
266 else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) 263 mutex_unlock(&cp->lock);
264 } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
265 mutex_lock(&cp->lock);
267 error = s390_vary_chpid(cp->chpid, 0); 266 error = s390_vary_chpid(cp->chpid, 0);
268 else 267 mutex_unlock(&cp->lock);
268 } else
269 error = -EINVAL; 269 error = -EINVAL;
270 270
271 return error < 0 ? error : count; 271 return error < 0 ? error : count;
272
273} 272}
274 273
275static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); 274static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
@@ -315,10 +314,12 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
315 char *buf) 314 char *buf)
316{ 315{
317 struct channel_path *chp = to_channelpath(dev); 316 struct channel_path *chp = to_channelpath(dev);
317 u8 type;
318 318
319 if (!chp) 319 mutex_lock(&chp->lock);
320 return 0; 320 type = chp->desc.desc;
321 return sprintf(buf, "%x\n", chp->desc.desc); 321 mutex_unlock(&chp->lock);
322 return sprintf(buf, "%x\n", type);
322} 323}
323 324
324static DEVICE_ATTR(type, 0444, chp_type_show, NULL); 325static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
@@ -395,6 +396,7 @@ int chp_new(struct chp_id chpid)
395 chp->state = 1; 396 chp->state = 1;
396 chp->dev.parent = &channel_subsystems[chpid.cssid]->device; 397 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
397 chp->dev.release = chp_release; 398 chp->dev.release = chp_release;
399 mutex_init(&chp->lock);
398 400
399 /* Obtain channel path description and fill it in. */ 401 /* Obtain channel path description and fill it in. */
400 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); 402 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
@@ -464,7 +466,10 @@ void *chp_get_chp_desc(struct chp_id chpid)
464 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 466 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
465 if (!desc) 467 if (!desc)
466 return NULL; 468 return NULL;
469
470 mutex_lock(&chp->lock);
467 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 471 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
472 mutex_unlock(&chp->lock);
468 return desc; 473 return desc;
469} 474}
470 475
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 26c3d2246176..12b4903d6fe3 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chp.h 2 * drivers/s390/cio/chp.h
3 * 3 *
4 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007,2010
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
@@ -10,6 +10,7 @@
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/mutex.h>
13#include <asm/chpid.h> 14#include <asm/chpid.h>
14#include "chsc.h" 15#include "chsc.h"
15#include "css.h" 16#include "css.h"
@@ -40,16 +41,23 @@ static inline int chp_test_bit(u8 *bitmap, int num)
40 41
41 42
42struct channel_path { 43struct channel_path {
44 struct device dev;
43 struct chp_id chpid; 45 struct chp_id chpid;
46 struct mutex lock; /* Serialize access to below members. */
44 int state; 47 int state;
45 struct channel_path_desc desc; 48 struct channel_path_desc desc;
46 /* Channel-measurement related stuff: */ 49 /* Channel-measurement related stuff: */
47 int cmg; 50 int cmg;
48 int shared; 51 int shared;
49 void *cmg_chars; 52 void *cmg_chars;
50 struct device dev;
51}; 53};
52 54
55/* Return channel_path struct for given chpid. */
56static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
57{
58 return channel_subsystems[chpid.cssid]->chps[chpid.id];
59}
60
53int chp_get_status(struct chp_id chpid); 61int chp_get_status(struct chp_id chpid);
54u8 chp_get_sch_opm(struct subchannel *sch); 62u8 chp_get_sch_opm(struct subchannel *sch);
55int chp_is_registered(struct chp_id chpid); 63int chp_is_registered(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4cbb1a6ca33c..1aaddea673e0 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,7 +2,7 @@
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * 4 *
5 * Copyright IBM Corp. 1999,2008 5 * Copyright IBM Corp. 1999,2010
6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -29,8 +29,8 @@
29#include "chsc.h" 29#include "chsc.h"
30 30
31static void *sei_page; 31static void *sei_page;
32static DEFINE_SPINLOCK(siosl_lock); 32static void *chsc_page;
33static DEFINE_SPINLOCK(sda_lock); 33static DEFINE_SPINLOCK(chsc_page_lock);
34 34
35/** 35/**
36 * chsc_error_from_response() - convert a chsc response to an error 36 * chsc_error_from_response() - convert a chsc response to an error
@@ -85,17 +85,15 @@ struct chsc_ssd_area {
85 85
86int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 86int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
87{ 87{
88 unsigned long page;
89 struct chsc_ssd_area *ssd_area; 88 struct chsc_ssd_area *ssd_area;
90 int ccode; 89 int ccode;
91 int ret; 90 int ret;
92 int i; 91 int i;
93 int mask; 92 int mask;
94 93
95 page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 94 spin_lock_irq(&chsc_page_lock);
96 if (!page) 95 memset(chsc_page, 0, PAGE_SIZE);
97 return -ENOMEM; 96 ssd_area = chsc_page;
98 ssd_area = (struct chsc_ssd_area *) page;
99 ssd_area->request.length = 0x0010; 97 ssd_area->request.length = 0x0010;
100 ssd_area->request.code = 0x0004; 98 ssd_area->request.code = 0x0004;
101 ssd_area->ssid = schid.ssid; 99 ssd_area->ssid = schid.ssid;
@@ -106,25 +104,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
106 /* Check response. */ 104 /* Check response. */
107 if (ccode > 0) { 105 if (ccode > 0) {
108 ret = (ccode == 3) ? -ENODEV : -EBUSY; 106 ret = (ccode == 3) ? -ENODEV : -EBUSY;
109 goto out_free; 107 goto out;
110 } 108 }
111 ret = chsc_error_from_response(ssd_area->response.code); 109 ret = chsc_error_from_response(ssd_area->response.code);
112 if (ret != 0) { 110 if (ret != 0) {
113 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 111 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 schid.ssid, schid.sch_no, 112 schid.ssid, schid.sch_no,
115 ssd_area->response.code); 113 ssd_area->response.code);
116 goto out_free; 114 goto out;
117 } 115 }
118 if (!ssd_area->sch_valid) { 116 if (!ssd_area->sch_valid) {
119 ret = -ENODEV; 117 ret = -ENODEV;
120 goto out_free; 118 goto out;
121 } 119 }
122 /* Copy data */ 120 /* Copy data */
123 ret = 0; 121 ret = 0;
124 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 122 memset(ssd, 0, sizeof(struct chsc_ssd_info));
125 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 123 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
126 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 124 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
127 goto out_free; 125 goto out;
128 ssd->path_mask = ssd_area->path_mask; 126 ssd->path_mask = ssd_area->path_mask;
129 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 127 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
130 for (i = 0; i < 8; i++) { 128 for (i = 0; i < 8; i++) {
@@ -136,8 +134,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
136 if (ssd_area->fla_valid_mask & mask) 134 if (ssd_area->fla_valid_mask & mask)
137 ssd->fla[i] = ssd_area->fla[i]; 135 ssd->fla[i] = ssd_area->fla[i];
138 } 136 }
139out_free: 137out:
140 free_page(page); 138 spin_unlock_irq(&chsc_page_lock);
141 return ret; 139 return ret;
142} 140}
143 141
@@ -497,6 +495,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
497 */ 495 */
498int chsc_chp_vary(struct chp_id chpid, int on) 496int chsc_chp_vary(struct chp_id chpid, int on)
499{ 497{
498 struct channel_path *chp = chpid_to_chp(chpid);
500 struct chp_link link; 499 struct chp_link link;
501 500
502 memset(&link, 0, sizeof(struct chp_link)); 501 memset(&link, 0, sizeof(struct chp_link));
@@ -506,11 +505,12 @@ int chsc_chp_vary(struct chp_id chpid, int on)
506 /* 505 /*
507 * Redo PathVerification on the devices the chpid connects to 506 * Redo PathVerification on the devices the chpid connects to
508 */ 507 */
509 508 if (on) {
510 if (on) 509 /* Try to update the channel path descritor. */
510 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
511 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 511 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
512 __s390_vary_chpid_on, &link); 512 __s390_vary_chpid_on, &link);
513 else 513 } else
514 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 514 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
515 NULL, &link); 515 NULL, &link);
516 516
@@ -552,7 +552,7 @@ cleanup:
552 return ret; 552 return ret;
553} 553}
554 554
555int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 555int __chsc_do_secm(struct channel_subsystem *css, int enable)
556{ 556{
557 struct { 557 struct {
558 struct chsc_header request; 558 struct chsc_header request;
@@ -573,7 +573,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
573 } __attribute__ ((packed)) *secm_area; 573 } __attribute__ ((packed)) *secm_area;
574 int ret, ccode; 574 int ret, ccode;
575 575
576 secm_area = page; 576 spin_lock_irq(&chsc_page_lock);
577 memset(chsc_page, 0, PAGE_SIZE);
578 secm_area = chsc_page;
577 secm_area->request.length = 0x0050; 579 secm_area->request.length = 0x0050;
578 secm_area->request.code = 0x0016; 580 secm_area->request.code = 0x0016;
579 581
@@ -584,8 +586,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
584 secm_area->operation_code = enable ? 0 : 1; 586 secm_area->operation_code = enable ? 0 : 1;
585 587
586 ccode = chsc(secm_area); 588 ccode = chsc(secm_area);
587 if (ccode > 0) 589 if (ccode > 0) {
588 return (ccode == 3) ? -ENODEV : -EBUSY; 590 ret = (ccode == 3) ? -ENODEV : -EBUSY;
591 goto out;
592 }
589 593
590 switch (secm_area->response.code) { 594 switch (secm_area->response.code) {
591 case 0x0102: 595 case 0x0102:
@@ -598,37 +602,32 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
598 if (ret != 0) 602 if (ret != 0)
599 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 603 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
600 secm_area->response.code); 604 secm_area->response.code);
605out:
606 spin_unlock_irq(&chsc_page_lock);
601 return ret; 607 return ret;
602} 608}
603 609
604int 610int
605chsc_secm(struct channel_subsystem *css, int enable) 611chsc_secm(struct channel_subsystem *css, int enable)
606{ 612{
607 void *secm_area;
608 int ret; 613 int ret;
609 614
610 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
611 if (!secm_area)
612 return -ENOMEM;
613
614 if (enable && !css->cm_enabled) { 615 if (enable && !css->cm_enabled) {
615 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 616 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
616 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 617 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
617 if (!css->cub_addr1 || !css->cub_addr2) { 618 if (!css->cub_addr1 || !css->cub_addr2) {
618 free_page((unsigned long)css->cub_addr1); 619 free_page((unsigned long)css->cub_addr1);
619 free_page((unsigned long)css->cub_addr2); 620 free_page((unsigned long)css->cub_addr2);
620 free_page((unsigned long)secm_area);
621 return -ENOMEM; 621 return -ENOMEM;
622 } 622 }
623 } 623 }
624 ret = __chsc_do_secm(css, enable, secm_area); 624 ret = __chsc_do_secm(css, enable);
625 if (!ret) { 625 if (!ret) {
626 css->cm_enabled = enable; 626 css->cm_enabled = enable;
627 if (css->cm_enabled) { 627 if (css->cm_enabled) {
628 ret = chsc_add_cmg_attr(css); 628 ret = chsc_add_cmg_attr(css);
629 if (ret) { 629 if (ret) {
630 memset(secm_area, 0, PAGE_SIZE); 630 __chsc_do_secm(css, 0);
631 __chsc_do_secm(css, 0, secm_area);
632 css->cm_enabled = 0; 631 css->cm_enabled = 0;
633 } 632 }
634 } else 633 } else
@@ -638,44 +637,24 @@ chsc_secm(struct channel_subsystem *css, int enable)
638 free_page((unsigned long)css->cub_addr1); 637 free_page((unsigned long)css->cub_addr1);
639 free_page((unsigned long)css->cub_addr2); 638 free_page((unsigned long)css->cub_addr2);
640 } 639 }
641 free_page((unsigned long)secm_area);
642 return ret; 640 return ret;
643} 641}
644 642
645int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 643int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
646 int c, int m, 644 int c, int m, void *page)
647 struct chsc_response_struct *resp)
648{ 645{
646 struct chsc_scpd *scpd_area;
649 int ccode, ret; 647 int ccode, ret;
650 648
651 struct {
652 struct chsc_header request;
653 u32 : 2;
654 u32 m : 1;
655 u32 c : 1;
656 u32 fmt : 4;
657 u32 cssid : 8;
658 u32 : 4;
659 u32 rfmt : 4;
660 u32 first_chpid : 8;
661 u32 : 24;
662 u32 last_chpid : 8;
663 u32 zeroes1;
664 struct chsc_header response;
665 u8 data[PAGE_SIZE - 20];
666 } __attribute__ ((packed)) *scpd_area;
667
668 if ((rfmt == 1) && !css_general_characteristics.fcs) 649 if ((rfmt == 1) && !css_general_characteristics.fcs)
669 return -EINVAL; 650 return -EINVAL;
670 if ((rfmt == 2) && !css_general_characteristics.cib) 651 if ((rfmt == 2) && !css_general_characteristics.cib)
671 return -EINVAL; 652 return -EINVAL;
672 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
673 if (!scpd_area)
674 return -ENOMEM;
675 653
654 memset(page, 0, PAGE_SIZE);
655 scpd_area = page;
676 scpd_area->request.length = 0x0010; 656 scpd_area->request.length = 0x0010;
677 scpd_area->request.code = 0x0002; 657 scpd_area->request.code = 0x0002;
678
679 scpd_area->cssid = chpid.cssid; 658 scpd_area->cssid = chpid.cssid;
680 scpd_area->first_chpid = chpid.id; 659 scpd_area->first_chpid = chpid.id;
681 scpd_area->last_chpid = chpid.id; 660 scpd_area->last_chpid = chpid.id;
@@ -685,20 +664,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
685 scpd_area->rfmt = rfmt; 664 scpd_area->rfmt = rfmt;
686 665
687 ccode = chsc(scpd_area); 666 ccode = chsc(scpd_area);
688 if (ccode > 0) { 667 if (ccode > 0)
689 ret = (ccode == 3) ? -ENODEV : -EBUSY; 668 return (ccode == 3) ? -ENODEV : -EBUSY;
690 goto out;
691 }
692 669
693 ret = chsc_error_from_response(scpd_area->response.code); 670 ret = chsc_error_from_response(scpd_area->response.code);
694 if (ret == 0) 671 if (ret)
695 /* Success. */
696 memcpy(resp, &scpd_area->response, scpd_area->response.length);
697 else
698 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 672 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
699 scpd_area->response.code); 673 scpd_area->response.code);
700out:
701 free_page((unsigned long)scpd_area);
702 return ret; 674 return ret;
703} 675}
704EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 676EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
@@ -707,17 +679,19 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
707 struct channel_path_desc *desc) 679 struct channel_path_desc *desc)
708{ 680{
709 struct chsc_response_struct *chsc_resp; 681 struct chsc_response_struct *chsc_resp;
682 struct chsc_scpd *scpd_area;
683 unsigned long flags;
710 int ret; 684 int ret;
711 685
712 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); 686 spin_lock_irqsave(&chsc_page_lock, flags);
713 if (!chsc_resp) 687 scpd_area = chsc_page;
714 return -ENOMEM; 688 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
715 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
716 if (ret) 689 if (ret)
717 goto out_free; 690 goto out;
691 chsc_resp = (void *)&scpd_area->response;
718 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 692 memcpy(desc, &chsc_resp->data, sizeof(*desc));
719out_free: 693out:
720 kfree(chsc_resp); 694 spin_unlock_irqrestore(&chsc_page_lock, flags);
721 return ret; 695 return ret;
722} 696}
723 697
@@ -725,33 +699,22 @@ static void
725chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 699chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
726 struct cmg_chars *chars) 700 struct cmg_chars *chars)
727{ 701{
728 switch (chp->cmg) { 702 struct cmg_chars *cmg_chars;
729 case 2: 703 int i, mask;
730 case 3: 704
731 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 705 cmg_chars = chp->cmg_chars;
732 GFP_KERNEL); 706 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
733 if (chp->cmg_chars) { 707 mask = 0x80 >> (i + 3);
734 int i, mask; 708 if (cmcv & mask)
735 struct cmg_chars *cmg_chars; 709 cmg_chars->values[i] = chars->values[i];
736 710 else
737 cmg_chars = chp->cmg_chars; 711 cmg_chars->values[i] = 0;
738 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
739 mask = 0x80 >> (i + 3);
740 if (cmcv & mask)
741 cmg_chars->values[i] = chars->values[i];
742 else
743 cmg_chars->values[i] = 0;
744 }
745 }
746 break;
747 default:
748 /* No cmg-dependent data. */
749 break;
750 } 712 }
751} 713}
752 714
753int chsc_get_channel_measurement_chars(struct channel_path *chp) 715int chsc_get_channel_measurement_chars(struct channel_path *chp)
754{ 716{
717 struct cmg_chars *cmg_chars;
755 int ccode, ret; 718 int ccode, ret;
756 719
757 struct { 720 struct {
@@ -775,13 +738,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
775 u32 data[NR_MEASUREMENT_CHARS]; 738 u32 data[NR_MEASUREMENT_CHARS];
776 } __attribute__ ((packed)) *scmc_area; 739 } __attribute__ ((packed)) *scmc_area;
777 740
778 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 741 chp->cmg_chars = NULL;
779 if (!scmc_area) 742 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
743 if (!cmg_chars)
780 return -ENOMEM; 744 return -ENOMEM;
781 745
746 spin_lock_irq(&chsc_page_lock);
747 memset(chsc_page, 0, PAGE_SIZE);
748 scmc_area = chsc_page;
782 scmc_area->request.length = 0x0010; 749 scmc_area->request.length = 0x0010;
783 scmc_area->request.code = 0x0022; 750 scmc_area->request.code = 0x0022;
784
785 scmc_area->first_chpid = chp->chpid.id; 751 scmc_area->first_chpid = chp->chpid.id;
786 scmc_area->last_chpid = chp->chpid.id; 752 scmc_area->last_chpid = chp->chpid.id;
787 753
@@ -792,53 +758,65 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
792 } 758 }
793 759
794 ret = chsc_error_from_response(scmc_area->response.code); 760 ret = chsc_error_from_response(scmc_area->response.code);
795 if (ret == 0) { 761 if (ret) {
796 /* Success. */
797 if (!scmc_area->not_valid) {
798 chp->cmg = scmc_area->cmg;
799 chp->shared = scmc_area->shared;
800 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
801 (struct cmg_chars *)
802 &scmc_area->data);
803 } else {
804 chp->cmg = -1;
805 chp->shared = -1;
806 }
807 } else {
808 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 762 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
809 scmc_area->response.code); 763 scmc_area->response.code);
764 goto out;
765 }
766 if (scmc_area->not_valid) {
767 chp->cmg = -1;
768 chp->shared = -1;
769 goto out;
810 } 770 }
771 chp->cmg = scmc_area->cmg;
772 chp->shared = scmc_area->shared;
773 if (chp->cmg != 2 && chp->cmg != 3) {
774 /* No cmg-dependent data. */
775 goto out;
776 }
777 chp->cmg_chars = cmg_chars;
778 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
779 (struct cmg_chars *) &scmc_area->data);
811out: 780out:
812 free_page((unsigned long)scmc_area); 781 spin_unlock_irq(&chsc_page_lock);
782 if (!chp->cmg_chars)
783 kfree(cmg_chars);
784
813 return ret; 785 return ret;
814} 786}
815 787
816int __init chsc_alloc_sei_area(void) 788int __init chsc_init(void)
817{ 789{
818 int ret; 790 int ret;
819 791
820 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 792 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
821 if (!sei_page) { 793 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
822 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 794 if (!sei_page || !chsc_page) {
823 "chsc machine checks!\n"); 795 ret = -ENOMEM;
824 return -ENOMEM; 796 goto out_err;
825 } 797 }
826 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 798 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
827 if (ret) 799 if (ret)
828 kfree(sei_page); 800 goto out_err;
801 return ret;
802out_err:
803 free_page((unsigned long)chsc_page);
804 free_page((unsigned long)sei_page);
829 return ret; 805 return ret;
830} 806}
831 807
832void __init chsc_free_sei_area(void) 808void __init chsc_init_cleanup(void)
833{ 809{
834 crw_unregister_handler(CRW_RSC_CSS); 810 crw_unregister_handler(CRW_RSC_CSS);
835 kfree(sei_page); 811 free_page((unsigned long)chsc_page);
812 free_page((unsigned long)sei_page);
836} 813}
837 814
838int chsc_enable_facility(int operation_code) 815int chsc_enable_facility(int operation_code)
839{ 816{
817 unsigned long flags;
840 int ret; 818 int ret;
841 static struct { 819 struct {
842 struct chsc_header request; 820 struct chsc_header request;
843 u8 reserved1:4; 821 u8 reserved1:4;
844 u8 format:4; 822 u8 format:4;
@@ -851,32 +829,33 @@ int chsc_enable_facility(int operation_code)
851 u32 reserved5:4; 829 u32 reserved5:4;
852 u32 format2:4; 830 u32 format2:4;
853 u32 reserved6:24; 831 u32 reserved6:24;
854 } __attribute__ ((packed, aligned(4096))) sda_area; 832 } __attribute__ ((packed)) *sda_area;
855 833
856 spin_lock(&sda_lock); 834 spin_lock_irqsave(&chsc_page_lock, flags);
857 memset(&sda_area, 0, sizeof(sda_area)); 835 memset(chsc_page, 0, PAGE_SIZE);
858 sda_area.request.length = 0x0400; 836 sda_area = chsc_page;
859 sda_area.request.code = 0x0031; 837 sda_area->request.length = 0x0400;
860 sda_area.operation_code = operation_code; 838 sda_area->request.code = 0x0031;
839 sda_area->operation_code = operation_code;
861 840
862 ret = chsc(&sda_area); 841 ret = chsc(sda_area);
863 if (ret > 0) { 842 if (ret > 0) {
864 ret = (ret == 3) ? -ENODEV : -EBUSY; 843 ret = (ret == 3) ? -ENODEV : -EBUSY;
865 goto out; 844 goto out;
866 } 845 }
867 846
868 switch (sda_area.response.code) { 847 switch (sda_area->response.code) {
869 case 0x0101: 848 case 0x0101:
870 ret = -EOPNOTSUPP; 849 ret = -EOPNOTSUPP;
871 break; 850 break;
872 default: 851 default:
873 ret = chsc_error_from_response(sda_area.response.code); 852 ret = chsc_error_from_response(sda_area->response.code);
874 } 853 }
875 if (ret != 0) 854 if (ret != 0)
876 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 855 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
877 operation_code, sda_area.response.code); 856 operation_code, sda_area->response.code);
878 out: 857out:
879 spin_unlock(&sda_lock); 858 spin_unlock_irqrestore(&chsc_page_lock, flags);
880 return ret; 859 return ret;
881} 860}
882 861
@@ -895,13 +874,12 @@ chsc_determine_css_characteristics(void)
895 struct chsc_header response; 874 struct chsc_header response;
896 u32 reserved4; 875 u32 reserved4;
897 u32 general_char[510]; 876 u32 general_char[510];
898 u32 chsc_char[518]; 877 u32 chsc_char[508];
899 } __attribute__ ((packed)) *scsc_area; 878 } __attribute__ ((packed)) *scsc_area;
900 879
901 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 880 spin_lock_irq(&chsc_page_lock);
902 if (!scsc_area) 881 memset(chsc_page, 0, PAGE_SIZE);
903 return -ENOMEM; 882 scsc_area = chsc_page;
904
905 scsc_area->request.length = 0x0010; 883 scsc_area->request.length = 0x0010;
906 scsc_area->request.code = 0x0010; 884 scsc_area->request.code = 0x0010;
907 885
@@ -921,7 +899,7 @@ chsc_determine_css_characteristics(void)
921 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 899 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
922 scsc_area->response.code); 900 scsc_area->response.code);
923exit: 901exit:
924 free_page ((unsigned long) scsc_area); 902 spin_unlock_irq(&chsc_page_lock);
925 return result; 903 return result;
926} 904}
927 905
@@ -976,29 +954,29 @@ int chsc_sstpi(void *page, void *result, size_t size)
976 return (rr->response.code == 0x0001) ? 0 : -EIO; 954 return (rr->response.code == 0x0001) ? 0 : -EIO;
977} 955}
978 956
979static struct {
980 struct chsc_header request;
981 u32 word1;
982 struct subchannel_id sid;
983 u32 word3;
984 struct chsc_header response;
985 u32 word[11];
986} __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE)));
987
988int chsc_siosl(struct subchannel_id schid) 957int chsc_siosl(struct subchannel_id schid)
989{ 958{
959 struct {
960 struct chsc_header request;
961 u32 word1;
962 struct subchannel_id sid;
963 u32 word3;
964 struct chsc_header response;
965 u32 word[11];
966 } __attribute__ ((packed)) *siosl_area;
990 unsigned long flags; 967 unsigned long flags;
991 int ccode; 968 int ccode;
992 int rc; 969 int rc;
993 970
994 spin_lock_irqsave(&siosl_lock, flags); 971 spin_lock_irqsave(&chsc_page_lock, flags);
995 memset(&siosl_area, 0, sizeof(siosl_area)); 972 memset(chsc_page, 0, PAGE_SIZE);
996 siosl_area.request.length = 0x0010; 973 siosl_area = chsc_page;
997 siosl_area.request.code = 0x0046; 974 siosl_area->request.length = 0x0010;
998 siosl_area.word1 = 0x80000000; 975 siosl_area->request.code = 0x0046;
999 siosl_area.sid = schid; 976 siosl_area->word1 = 0x80000000;
977 siosl_area->sid = schid;
1000 978
1001 ccode = chsc(&siosl_area); 979 ccode = chsc(siosl_area);
1002 if (ccode > 0) { 980 if (ccode > 0) {
1003 if (ccode == 3) 981 if (ccode == 3)
1004 rc = -ENODEV; 982 rc = -ENODEV;
@@ -1008,17 +986,16 @@ int chsc_siosl(struct subchannel_id schid)
1008 schid.ssid, schid.sch_no, ccode); 986 schid.ssid, schid.sch_no, ccode);
1009 goto out; 987 goto out;
1010 } 988 }
1011 rc = chsc_error_from_response(siosl_area.response.code); 989 rc = chsc_error_from_response(siosl_area->response.code);
1012 if (rc) 990 if (rc)
1013 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 991 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1014 schid.ssid, schid.sch_no, 992 schid.ssid, schid.sch_no,
1015 siosl_area.response.code); 993 siosl_area->response.code);
1016 else 994 else
1017 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 995 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1018 schid.ssid, schid.sch_no); 996 schid.ssid, schid.sch_no);
1019out: 997out:
1020 spin_unlock_irqrestore(&siosl_lock, flags); 998 spin_unlock_irqrestore(&chsc_page_lock, flags);
1021
1022 return rc; 999 return rc;
1023} 1000}
1024EXPORT_SYMBOL_GPL(chsc_siosl); 1001EXPORT_SYMBOL_GPL(chsc_siosl);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 5453013f094b..6693f5e3176f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -57,21 +57,39 @@ struct chsc_ssd_info {
57 struct chp_id chpid[8]; 57 struct chp_id chpid[8];
58 u16 fla[8]; 58 u16 fla[8];
59}; 59};
60
61struct chsc_scpd {
62 struct chsc_header request;
63 u32:2;
64 u32 m:1;
65 u32 c:1;
66 u32 fmt:4;
67 u32 cssid:8;
68 u32:4;
69 u32 rfmt:4;
70 u32 first_chpid:8;
71 u32:24;
72 u32 last_chpid:8;
73 u32 zeroes1;
74 struct chsc_header response;
75 u8 data[PAGE_SIZE - 20];
76} __attribute__ ((packed));
77
78
60extern int chsc_get_ssd_info(struct subchannel_id schid, 79extern int chsc_get_ssd_info(struct subchannel_id schid,
61 struct chsc_ssd_info *ssd); 80 struct chsc_ssd_info *ssd);
62extern int chsc_determine_css_characteristics(void); 81extern int chsc_determine_css_characteristics(void);
63extern int chsc_alloc_sei_area(void); 82extern int chsc_init(void);
64extern void chsc_free_sei_area(void); 83extern void chsc_init_cleanup(void);
65 84
66extern int chsc_enable_facility(int); 85extern int chsc_enable_facility(int);
67struct channel_subsystem; 86struct channel_subsystem;
68extern int chsc_secm(struct channel_subsystem *, int); 87extern int chsc_secm(struct channel_subsystem *, int);
69int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); 88int __chsc_do_secm(struct channel_subsystem *css, int enable);
70 89
71int chsc_chp_vary(struct chp_id chpid, int on); 90int chsc_chp_vary(struct chp_id chpid, int on);
72int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 91int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
73 int c, int m, 92 int c, int m, void *page);
74 struct chsc_response_struct *resp);
75int chsc_determine_base_channel_path_desc(struct chp_id chpid, 93int chsc_determine_base_channel_path_desc(struct chp_id chpid,
76 struct channel_path_desc *desc); 94 struct channel_path_desc *desc);
77void chsc_chp_online(struct chp_id chpid); 95void chsc_chp_online(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index f2b77e7bfc6f..3c3f3ffe2179 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -688,25 +688,31 @@ out_free:
688 688
689static int chsc_ioctl_chpd(void __user *user_chpd) 689static int chsc_ioctl_chpd(void __user *user_chpd)
690{ 690{
691 struct chsc_scpd *scpd_area;
691 struct chsc_cpd_info *chpd; 692 struct chsc_cpd_info *chpd;
692 int ret; 693 int ret;
693 694
694 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); 695 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
695 if (!chpd) 696 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
696 return -ENOMEM; 697 if (!scpd_area || !chpd) {
698 ret = -ENOMEM;
699 goto out_free;
700 }
697 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { 701 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
698 ret = -EFAULT; 702 ret = -EFAULT;
699 goto out_free; 703 goto out_free;
700 } 704 }
701 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, 705 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
702 chpd->rfmt, chpd->c, chpd->m, 706 chpd->rfmt, chpd->c, chpd->m,
703 &chpd->chpdb); 707 scpd_area);
704 if (ret) 708 if (ret)
705 goto out_free; 709 goto out_free;
710 memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
706 if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) 711 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
707 ret = -EFAULT; 712 ret = -EFAULT;
708out_free: 713out_free:
709 kfree(chpd); 714 kfree(chpd);
715 free_page((unsigned long)scpd_area);
710 return ret; 716 return ret;
711} 717}
712 718
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index ca8e1c240c3c..a5050e217150 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * driver for channel subsystem 2 * driver for channel subsystem
3 * 3 *
4 * Copyright IBM Corp. 2002, 2009 4 * Copyright IBM Corp. 2002, 2010
5 * 5 *
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -577,7 +577,7 @@ static int __unset_registered(struct device *dev, void *data)
577 return 0; 577 return 0;
578} 578}
579 579
580void css_schedule_eval_all_unreg(void) 580static void css_schedule_eval_all_unreg(void)
581{ 581{
582 unsigned long flags; 582 unsigned long flags;
583 struct idset *unreg_set; 583 struct idset *unreg_set;
@@ -790,7 +790,6 @@ static struct notifier_block css_reboot_notifier = {
790static int css_power_event(struct notifier_block *this, unsigned long event, 790static int css_power_event(struct notifier_block *this, unsigned long event,
791 void *ptr) 791 void *ptr)
792{ 792{
793 void *secm_area;
794 int ret, i; 793 int ret, i;
795 794
796 switch (event) { 795 switch (event) {
@@ -806,15 +805,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
806 mutex_unlock(&css->mutex); 805 mutex_unlock(&css->mutex);
807 continue; 806 continue;
808 } 807 }
809 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 808 if (__chsc_do_secm(css, 0))
810 GFP_DMA);
811 if (secm_area) {
812 if (__chsc_do_secm(css, 0, secm_area))
813 ret = NOTIFY_BAD;
814 free_page((unsigned long)secm_area);
815 } else
816 ret = NOTIFY_BAD; 809 ret = NOTIFY_BAD;
817
818 mutex_unlock(&css->mutex); 810 mutex_unlock(&css->mutex);
819 } 811 }
820 break; 812 break;
@@ -830,15 +822,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
830 mutex_unlock(&css->mutex); 822 mutex_unlock(&css->mutex);
831 continue; 823 continue;
832 } 824 }
833 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 825 if (__chsc_do_secm(css, 1))
834 GFP_DMA);
835 if (secm_area) {
836 if (__chsc_do_secm(css, 1, secm_area))
837 ret = NOTIFY_BAD;
838 free_page((unsigned long)secm_area);
839 } else
840 ret = NOTIFY_BAD; 826 ret = NOTIFY_BAD;
841
842 mutex_unlock(&css->mutex); 827 mutex_unlock(&css->mutex);
843 } 828 }
844 /* search for subchannels, which appeared during hibernation */ 829 /* search for subchannels, which appeared during hibernation */
@@ -863,14 +848,11 @@ static int __init css_bus_init(void)
863{ 848{
864 int ret, i; 849 int ret, i;
865 850
866 ret = chsc_determine_css_characteristics(); 851 ret = chsc_init();
867 if (ret == -ENOMEM)
868 goto out;
869
870 ret = chsc_alloc_sei_area();
871 if (ret) 852 if (ret)
872 goto out; 853 return ret;
873 854
855 chsc_determine_css_characteristics();
874 /* Try to enable MSS. */ 856 /* Try to enable MSS. */
875 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 857 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
876 if (ret) 858 if (ret)
@@ -956,9 +938,9 @@ out_unregister:
956 } 938 }
957 bus_unregister(&css_bus_type); 939 bus_unregister(&css_bus_type);
958out: 940out:
959 crw_unregister_handler(CRW_RSC_CSS); 941 crw_unregister_handler(CRW_RSC_SCH);
960 chsc_free_sei_area();
961 idset_free(slow_subchannel_set); 942 idset_free(slow_subchannel_set);
943 chsc_init_cleanup();
962 pr_alert("The CSS device driver initialization failed with " 944 pr_alert("The CSS device driver initialization failed with "
963 "errno=%d\n", ret); 945 "errno=%d\n", ret);
964 return ret; 946 return ret;
@@ -978,9 +960,9 @@ static void __init css_bus_cleanup(void)
978 device_unregister(&css->device); 960 device_unregister(&css->device);
979 } 961 }
980 bus_unregister(&css_bus_type); 962 bus_unregister(&css_bus_type);
981 crw_unregister_handler(CRW_RSC_CSS); 963 crw_unregister_handler(CRW_RSC_SCH);
982 chsc_free_sei_area();
983 idset_free(slow_subchannel_set); 964 idset_free(slow_subchannel_set);
965 chsc_init_cleanup();
984 isc_unregister(IO_SCH_ISC); 966 isc_unregister(IO_SCH_ISC);
985} 967}
986 968
@@ -1048,7 +1030,16 @@ subsys_initcall_sync(channel_subsystem_init_sync);
1048 1030
1049void channel_subsystem_reinit(void) 1031void channel_subsystem_reinit(void)
1050{ 1032{
1033 struct channel_path *chp;
1034 struct chp_id chpid;
1035
1051 chsc_enable_facility(CHSC_SDA_OC_MSS); 1036 chsc_enable_facility(CHSC_SDA_OC_MSS);
1037 chp_id_for_each(&chpid) {
1038 chp = chpid_to_chp(chpid);
1039 if (!chp)
1040 continue;
1041 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
1042 }
1052} 1043}
1053 1044
1054#ifdef CONFIG_PROC_FS 1045#ifdef CONFIG_PROC_FS
@@ -1200,6 +1191,7 @@ static int css_pm_restore(struct device *dev)
1200 struct subchannel *sch = to_subchannel(dev); 1191 struct subchannel *sch = to_subchannel(dev);
1201 struct css_driver *drv; 1192 struct css_driver *drv;
1202 1193
1194 css_update_ssd_info(sch);
1203 if (!sch->dev.driver) 1195 if (!sch->dev.driver)
1204 return 0; 1196 return 0;
1205 drv = to_cssdriver(sch->dev.driver); 1197 drv = to_cssdriver(sch->dev.driver);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 51bd3687d163..2ff8a22d4257 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1147,6 +1147,7 @@ err:
1147static int io_subchannel_chp_event(struct subchannel *sch, 1147static int io_subchannel_chp_event(struct subchannel *sch,
1148 struct chp_link *link, int event) 1148 struct chp_link *link, int event)
1149{ 1149{
1150 struct ccw_device *cdev = sch_get_cdev(sch);
1150 int mask; 1151 int mask;
1151 1152
1152 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1153 mask = chp_ssd_get_mask(&sch->ssd_info, link);
@@ -1156,22 +1157,30 @@ static int io_subchannel_chp_event(struct subchannel *sch,
1156 case CHP_VARY_OFF: 1157 case CHP_VARY_OFF:
1157 sch->opm &= ~mask; 1158 sch->opm &= ~mask;
1158 sch->lpm &= ~mask; 1159 sch->lpm &= ~mask;
1160 if (cdev)
1161 cdev->private->path_gone_mask |= mask;
1159 io_subchannel_terminate_path(sch, mask); 1162 io_subchannel_terminate_path(sch, mask);
1160 break; 1163 break;
1161 case CHP_VARY_ON: 1164 case CHP_VARY_ON:
1162 sch->opm |= mask; 1165 sch->opm |= mask;
1163 sch->lpm |= mask; 1166 sch->lpm |= mask;
1167 if (cdev)
1168 cdev->private->path_new_mask |= mask;
1164 io_subchannel_verify(sch); 1169 io_subchannel_verify(sch);
1165 break; 1170 break;
1166 case CHP_OFFLINE: 1171 case CHP_OFFLINE:
1167 if (cio_update_schib(sch)) 1172 if (cio_update_schib(sch))
1168 return -ENODEV; 1173 return -ENODEV;
1174 if (cdev)
1175 cdev->private->path_gone_mask |= mask;
1169 io_subchannel_terminate_path(sch, mask); 1176 io_subchannel_terminate_path(sch, mask);
1170 break; 1177 break;
1171 case CHP_ONLINE: 1178 case CHP_ONLINE:
1172 if (cio_update_schib(sch)) 1179 if (cio_update_schib(sch))
1173 return -ENODEV; 1180 return -ENODEV;
1174 sch->lpm |= mask & sch->opm; 1181 sch->lpm |= mask & sch->opm;
1182 if (cdev)
1183 cdev->private->path_new_mask |= mask;
1175 io_subchannel_verify(sch); 1184 io_subchannel_verify(sch);
1176 break; 1185 break;
1177 } 1186 }
@@ -1196,6 +1205,7 @@ static void io_subchannel_quiesce(struct subchannel *sch)
1196 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); 1205 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1197 while (ret == -EBUSY) { 1206 while (ret == -EBUSY) {
1198 cdev->private->state = DEV_STATE_QUIESCE; 1207 cdev->private->state = DEV_STATE_QUIESCE;
1208 cdev->private->iretry = 255;
1199 ret = ccw_device_cancel_halt_clear(cdev); 1209 ret = ccw_device_cancel_halt_clear(cdev);
1200 if (ret == -EBUSY) { 1210 if (ret == -EBUSY) {
1201 ccw_device_set_timeout(cdev, HZ/10); 1211 ccw_device_set_timeout(cdev, HZ/10);
@@ -1468,9 +1478,13 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1468 goto out; 1478 goto out;
1469 break; 1479 break;
1470 case IO_SCH_UNREG_ATTACH: 1480 case IO_SCH_UNREG_ATTACH:
1481 if (cdev->private->flags.resuming) {
1482 /* Device will be handled later. */
1483 rc = 0;
1484 goto out;
1485 }
1471 /* Unregister ccw device. */ 1486 /* Unregister ccw device. */
1472 if (!cdev->private->flags.resuming) 1487 ccw_device_unregister(cdev);
1473 ccw_device_unregister(cdev);
1474 break; 1488 break;
1475 default: 1489 default:
1476 break; 1490 break;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c9b852647f01..a845695ac314 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -174,7 +174,10 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
174 ret = cio_clear (sch); 174 ret = cio_clear (sch);
175 return (ret == 0) ? -EBUSY : ret; 175 return (ret == 0) ? -EBUSY : ret;
176 } 176 }
177 panic("Can't stop i/o on subchannel.\n"); 177 /* Function was unsuccessful */
178 CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
179 cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
180 return -EIO;
178} 181}
179 182
180void ccw_device_update_sense_data(struct ccw_device *cdev) 183void ccw_device_update_sense_data(struct ccw_device *cdev)
@@ -349,9 +352,13 @@ out:
349 352
350static void ccw_device_oper_notify(struct ccw_device *cdev) 353static void ccw_device_oper_notify(struct ccw_device *cdev)
351{ 354{
355 struct subchannel *sch = to_subchannel(cdev->dev.parent);
356
352 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { 357 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
353 /* Reenable channel measurements, if needed. */ 358 /* Reenable channel measurements, if needed. */
354 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 359 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
360 /* Save indication for new paths. */
361 cdev->private->path_new_mask = sch->vpm;
355 return; 362 return;
356 } 363 }
357 /* Driver doesn't want device back. */ 364 /* Driver doesn't want device back. */
@@ -462,6 +469,32 @@ static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
462 } 469 }
463} 470}
464 471
472static void ccw_device_report_path_events(struct ccw_device *cdev)
473{
474 struct subchannel *sch = to_subchannel(cdev->dev.parent);
475 int path_event[8];
476 int chp, mask;
477
478 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
479 path_event[chp] = PE_NONE;
480 if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
481 path_event[chp] |= PE_PATH_GONE;
482 if (mask & cdev->private->path_new_mask & sch->vpm)
483 path_event[chp] |= PE_PATH_AVAILABLE;
484 if (mask & cdev->private->pgid_reset_mask & sch->vpm)
485 path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
486 }
487 if (cdev->online && cdev->drv->path_event)
488 cdev->drv->path_event(cdev, path_event);
489}
490
491static void ccw_device_reset_path_events(struct ccw_device *cdev)
492{
493 cdev->private->path_gone_mask = 0;
494 cdev->private->path_new_mask = 0;
495 cdev->private->pgid_reset_mask = 0;
496}
497
465void 498void
466ccw_device_verify_done(struct ccw_device *cdev, int err) 499ccw_device_verify_done(struct ccw_device *cdev, int err)
467{ 500{
@@ -498,6 +531,7 @@ callback:
498 &cdev->private->irb); 531 &cdev->private->irb);
499 memset(&cdev->private->irb, 0, sizeof(struct irb)); 532 memset(&cdev->private->irb, 0, sizeof(struct irb));
500 } 533 }
534 ccw_device_report_path_events(cdev);
501 break; 535 break;
502 case -ETIME: 536 case -ETIME:
503 case -EUSERS: 537 case -EUSERS:
@@ -516,6 +550,7 @@ callback:
516 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 550 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
517 break; 551 break;
518 } 552 }
553 ccw_device_reset_path_events(cdev);
519} 554}
520 555
521/* 556/*
@@ -734,13 +769,14 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
734 int ret; 769 int ret;
735 770
736 ccw_device_set_timeout(cdev, 0); 771 ccw_device_set_timeout(cdev, 0);
772 cdev->private->iretry = 255;
737 ret = ccw_device_cancel_halt_clear(cdev); 773 ret = ccw_device_cancel_halt_clear(cdev);
738 if (ret == -EBUSY) { 774 if (ret == -EBUSY) {
739 ccw_device_set_timeout(cdev, 3*HZ); 775 ccw_device_set_timeout(cdev, 3*HZ);
740 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 776 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
741 return; 777 return;
742 } 778 }
743 if (ret == -ENODEV) 779 if (ret)
744 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 780 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
745 else if (cdev->handler) 781 else if (cdev->handler)
746 cdev->handler(cdev, cdev->private->intparm, 782 cdev->handler(cdev, cdev->private->intparm,
@@ -837,6 +873,7 @@ void ccw_device_kill_io(struct ccw_device *cdev)
837{ 873{
838 int ret; 874 int ret;
839 875
876 cdev->private->iretry = 255;
840 ret = ccw_device_cancel_halt_clear(cdev); 877 ret = ccw_device_cancel_halt_clear(cdev);
841 if (ret == -EBUSY) { 878 if (ret == -EBUSY) {
842 ccw_device_set_timeout(cdev, 3*HZ); 879 ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 82a5ad0d63f6..07a4fd29f096 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -213,6 +213,17 @@ static void spid_start(struct ccw_device *cdev)
213 spid_do(cdev); 213 spid_do(cdev);
214} 214}
215 215
216static int pgid_is_reset(struct pgid *p)
217{
218 char *c;
219
220 for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
221 if (*c != 0)
222 return 0;
223 }
224 return 1;
225}
226
216static int pgid_cmp(struct pgid *p1, struct pgid *p2) 227static int pgid_cmp(struct pgid *p1, struct pgid *p2)
217{ 228{
218 return memcmp((char *) p1 + 1, (char *) p2 + 1, 229 return memcmp((char *) p1 + 1, (char *) p2 + 1,
@@ -223,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
223 * Determine pathgroup state from PGID data. 234 * Determine pathgroup state from PGID data.
224 */ 235 */
225static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, 236static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
226 int *mismatch, int *reserved, int *reset) 237 int *mismatch, int *reserved, u8 *reset)
227{ 238{
228 struct pgid *pgid = &cdev->private->pgid[0]; 239 struct pgid *pgid = &cdev->private->pgid[0];
229 struct pgid *first = NULL; 240 struct pgid *first = NULL;
@@ -238,9 +249,8 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
238 continue; 249 continue;
239 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) 250 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
240 *reserved = 1; 251 *reserved = 1;
241 if (pgid->inf.ps.state1 == SNID_STATE1_RESET) { 252 if (pgid_is_reset(pgid)) {
242 /* A PGID was reset. */ 253 *reset |= lpm;
243 *reset = 1;
244 continue; 254 continue;
245 } 255 }
246 if (!first) { 256 if (!first) {
@@ -307,7 +317,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
307 struct pgid *pgid; 317 struct pgid *pgid;
308 int mismatch = 0; 318 int mismatch = 0;
309 int reserved = 0; 319 int reserved = 0;
310 int reset = 0; 320 u8 reset = 0;
311 u8 donepm; 321 u8 donepm;
312 322
313 if (rc) 323 if (rc)
@@ -321,11 +331,12 @@ static void snid_done(struct ccw_device *cdev, int rc)
321 donepm = pgid_to_donepm(cdev); 331 donepm = pgid_to_donepm(cdev);
322 sch->vpm = donepm & sch->opm; 332 sch->vpm = donepm & sch->opm;
323 cdev->private->pgid_todo_mask &= ~donepm; 333 cdev->private->pgid_todo_mask &= ~donepm;
334 cdev->private->pgid_reset_mask |= reset;
324 pgid_fill(cdev, pgid); 335 pgid_fill(cdev, pgid);
325 } 336 }
326out: 337out:
327 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 338 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
328 "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid, 339 "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
329 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, 340 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
330 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 341 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
331 switch (rc) { 342 switch (rc) {
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 469ef93f2302..d024d2c21897 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -151,8 +151,11 @@ struct ccw_device_private {
151 struct subchannel_id schid; /* subchannel number */ 151 struct subchannel_id schid; /* subchannel number */
152 struct ccw_request req; /* internal I/O request */ 152 struct ccw_request req; /* internal I/O request */
153 int iretry; 153 int iretry;
154 u8 pgid_valid_mask; /* mask of valid PGIDs */ 154 u8 pgid_valid_mask; /* mask of valid PGIDs */
155 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ 155 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
156 u8 pgid_reset_mask; /* mask of PGIDs which were reset */
157 u8 path_gone_mask; /* mask of paths, that became unavailable */
158 u8 path_new_mask; /* mask of paths, that became available */
156 struct { 159 struct {
157 unsigned int fast:1; /* post with "channel end" */ 160 unsigned int fast:1; /* post with "channel end" */
158 unsigned int repall:1; /* report every interrupt status */ 161 unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 91c6028d7b74..8fd8c62455e9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -154,14 +154,7 @@ static inline int ap_instructions_available(void)
154 */ 154 */
155static int ap_interrupts_available(void) 155static int ap_interrupts_available(void)
156{ 156{
157 unsigned long long facility_bits[2]; 157 return test_facility(1) && test_facility(2);
158
159 if (stfle(facility_bits, 2) <= 1)
160 return 0;
161 if (!(facility_bits[0] & (1ULL << 61)) ||
162 !(facility_bits[1] & (1ULL << 62)))
163 return 0;
164 return 1;
165} 158}
166 159
167/** 160/**
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 5a46b8c5d68a..375aeeaf9ea5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -372,21 +372,22 @@ static void hotplug_devices(struct work_struct *dummy)
372/* 372/*
373 * we emulate the request_irq behaviour on top of s390 extints 373 * we emulate the request_irq behaviour on top of s390 extints
374 */ 374 */
375static void kvm_extint_handler(u16 code) 375static void kvm_extint_handler(unsigned int ext_int_code,
376 unsigned int param32, unsigned long param64)
376{ 377{
377 struct virtqueue *vq; 378 struct virtqueue *vq;
378 u16 subcode; 379 u16 subcode;
379 u32 param; 380 u32 param;
380 381
381 subcode = S390_lowcore.cpu_addr; 382 subcode = ext_int_code >> 16;
382 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) 383 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
383 return; 384 return;
384 385
385 /* The LSB might be overloaded, we have to mask it */ 386 /* The LSB might be overloaded, we have to mask it */
386 vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); 387 vq = (struct virtqueue *)(param64 & ~1UL);
387 388
388 /* We use ext_params to decide what this interrupt means */ 389 /* We use ext_params to decide what this interrupt means */
389 param = S390_lowcore.ext_params & VIRTIO_PARAM_MASK; 390 param = param32 & VIRTIO_PARAM_MASK;
390 391
391 switch (param) { 392 switch (param) {
392 case VIRTIO_PARAM_CONFIG_CHANGED: 393 case VIRTIO_PARAM_CONFIG_CHANGED: