diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/s390/block/dasd.c | 48 | ||||
| -rw-r--r-- | drivers/s390/block/dasd_devmap.c | 13 | ||||
| -rw-r--r-- | drivers/s390/block/dasd_genhd.c | 1 | ||||
| -rw-r--r-- | drivers/s390/block/dasd_int.h | 1 | ||||
| -rw-r--r-- | drivers/s390/block/dasd_proc.c | 109 | ||||
| -rw-r--r-- | drivers/s390/char/zcore.c | 163 | ||||
| -rw-r--r-- | drivers/s390/cio/ccwreq.c | 2 | ||||
| -rw-r--r-- | drivers/s390/cio/chsc.c | 2 | ||||
| -rw-r--r-- | drivers/s390/cio/chsc_sch.c | 4 | ||||
| -rw-r--r-- | drivers/s390/cio/cio.c | 14 | ||||
| -rw-r--r-- | drivers/s390/cio/crw.c | 29 | ||||
| -rw-r--r-- | drivers/s390/cio/css.c | 79 | ||||
| -rw-r--r-- | drivers/s390/cio/css.h | 5 | ||||
| -rw-r--r-- | drivers/s390/cio/device.c | 160 | ||||
| -rw-r--r-- | drivers/s390/cio/device.h | 3 | ||||
| -rw-r--r-- | drivers/s390/cio/device_fsm.c | 43 | ||||
| -rw-r--r-- | drivers/s390/cio/qdio.h | 92 | ||||
| -rw-r--r-- | drivers/s390/cio/qdio_debug.c | 23 | ||||
| -rw-r--r-- | drivers/s390/cio/qdio_main.c | 28 | ||||
| -rw-r--r-- | drivers/s390/cio/qdio_setup.c | 20 | ||||
| -rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 4 | ||||
| -rw-r--r-- | drivers/s390/crypto/zcrypt_api.c | 158 | ||||
| -rw-r--r-- | drivers/s390/kvm/kvm_virtio.c | 4 |
23 files changed, 562 insertions, 443 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 5905936c7c60..9ab1ae40565f 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/buffer_head.h> | 20 | #include <linux/buffer_head.h> |
| 21 | #include <linux/hdreg.h> | 21 | #include <linux/hdreg.h> |
| 22 | #include <linux/async.h> | 22 | #include <linux/async.h> |
| 23 | #include <linux/mutex.h> | ||
| 23 | 24 | ||
| 24 | #include <asm/ccwdev.h> | 25 | #include <asm/ccwdev.h> |
| 25 | #include <asm/ebcdic.h> | 26 | #include <asm/ebcdic.h> |
| @@ -112,6 +113,7 @@ struct dasd_device *dasd_alloc_device(void) | |||
| 112 | INIT_WORK(&device->restore_device, do_restore_device); | 113 | INIT_WORK(&device->restore_device, do_restore_device); |
| 113 | device->state = DASD_STATE_NEW; | 114 | device->state = DASD_STATE_NEW; |
| 114 | device->target = DASD_STATE_NEW; | 115 | device->target = DASD_STATE_NEW; |
| 116 | mutex_init(&device->state_mutex); | ||
| 115 | 117 | ||
| 116 | return device; | 118 | return device; |
| 117 | } | 119 | } |
| @@ -321,8 +323,8 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) | |||
| 321 | device->state = DASD_STATE_READY; | 323 | device->state = DASD_STATE_READY; |
| 322 | return rc; | 324 | return rc; |
| 323 | } | 325 | } |
| 324 | dasd_destroy_partitions(block); | ||
| 325 | dasd_flush_request_queue(block); | 326 | dasd_flush_request_queue(block); |
| 327 | dasd_destroy_partitions(block); | ||
| 326 | block->blocks = 0; | 328 | block->blocks = 0; |
| 327 | block->bp_block = 0; | 329 | block->bp_block = 0; |
| 328 | block->s2b_shift = 0; | 330 | block->s2b_shift = 0; |
| @@ -484,10 +486,8 @@ static void dasd_change_state(struct dasd_device *device) | |||
| 484 | if (rc) | 486 | if (rc) |
| 485 | device->target = device->state; | 487 | device->target = device->state; |
| 486 | 488 | ||
| 487 | if (device->state == device->target) { | 489 | if (device->state == device->target) |
| 488 | wake_up(&dasd_init_waitq); | 490 | wake_up(&dasd_init_waitq); |
| 489 | dasd_put_device(device); | ||
| 490 | } | ||
| 491 | 491 | ||
| 492 | /* let user-space know that the device status changed */ | 492 | /* let user-space know that the device status changed */ |
| 493 | kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); | 493 | kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); |
| @@ -502,7 +502,9 @@ static void dasd_change_state(struct dasd_device *device) | |||
| 502 | static void do_kick_device(struct work_struct *work) | 502 | static void do_kick_device(struct work_struct *work) |
| 503 | { | 503 | { |
| 504 | struct dasd_device *device = container_of(work, struct dasd_device, kick_work); | 504 | struct dasd_device *device = container_of(work, struct dasd_device, kick_work); |
| 505 | mutex_lock(&device->state_mutex); | ||
| 505 | dasd_change_state(device); | 506 | dasd_change_state(device); |
| 507 | mutex_unlock(&device->state_mutex); | ||
| 506 | dasd_schedule_device_bh(device); | 508 | dasd_schedule_device_bh(device); |
| 507 | dasd_put_device(device); | 509 | dasd_put_device(device); |
| 508 | } | 510 | } |
| @@ -539,18 +541,19 @@ void dasd_restore_device(struct dasd_device *device) | |||
| 539 | void dasd_set_target_state(struct dasd_device *device, int target) | 541 | void dasd_set_target_state(struct dasd_device *device, int target) |
| 540 | { | 542 | { |
| 541 | dasd_get_device(device); | 543 | dasd_get_device(device); |
| 544 | mutex_lock(&device->state_mutex); | ||
| 542 | /* If we are in probeonly mode stop at DASD_STATE_READY. */ | 545 | /* If we are in probeonly mode stop at DASD_STATE_READY. */ |
| 543 | if (dasd_probeonly && target > DASD_STATE_READY) | 546 | if (dasd_probeonly && target > DASD_STATE_READY) |
| 544 | target = DASD_STATE_READY; | 547 | target = DASD_STATE_READY; |
| 545 | if (device->target != target) { | 548 | if (device->target != target) { |
| 546 | if (device->state == target) { | 549 | if (device->state == target) |
| 547 | wake_up(&dasd_init_waitq); | 550 | wake_up(&dasd_init_waitq); |
| 548 | dasd_put_device(device); | ||
| 549 | } | ||
| 550 | device->target = target; | 551 | device->target = target; |
| 551 | } | 552 | } |
| 552 | if (device->state != device->target) | 553 | if (device->state != device->target) |
| 553 | dasd_change_state(device); | 554 | dasd_change_state(device); |
| 555 | mutex_unlock(&device->state_mutex); | ||
| 556 | dasd_put_device(device); | ||
| 554 | } | 557 | } |
| 555 | 558 | ||
| 556 | /* | 559 | /* |
| @@ -1000,12 +1003,20 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, | |||
| 1000 | return; | 1003 | return; |
| 1001 | } | 1004 | } |
| 1002 | 1005 | ||
| 1003 | device = (struct dasd_device *) cqr->startdev; | 1006 | device = dasd_device_from_cdev_locked(cdev); |
| 1004 | if (device == NULL || | 1007 | if (IS_ERR(device)) { |
| 1005 | device != dasd_device_from_cdev_locked(cdev) || | 1008 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", |
| 1006 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 1009 | "unable to get device from cdev"); |
| 1010 | return; | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | if (!cqr->startdev || | ||
| 1014 | device != cqr->startdev || | ||
| 1015 | strncmp(cqr->startdev->discipline->ebcname, | ||
| 1016 | (char *) &cqr->magic, 4)) { | ||
| 1007 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", | 1017 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", |
| 1008 | "invalid device in request"); | 1018 | "invalid device in request"); |
| 1019 | dasd_put_device(device); | ||
| 1009 | return; | 1020 | return; |
| 1010 | } | 1021 | } |
| 1011 | 1022 | ||
| @@ -1692,7 +1703,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) | |||
| 1692 | cqr, rc); | 1703 | cqr, rc); |
| 1693 | } else { | 1704 | } else { |
| 1694 | cqr->stopclk = get_clock(); | 1705 | cqr->stopclk = get_clock(); |
| 1695 | rc = 1; | ||
| 1696 | } | 1706 | } |
| 1697 | break; | 1707 | break; |
| 1698 | default: /* already finished or clear pending - do nothing */ | 1708 | default: /* already finished or clear pending - do nothing */ |
| @@ -2170,9 +2180,13 @@ static void dasd_flush_request_queue(struct dasd_block *block) | |||
| 2170 | static int dasd_open(struct block_device *bdev, fmode_t mode) | 2180 | static int dasd_open(struct block_device *bdev, fmode_t mode) |
| 2171 | { | 2181 | { |
| 2172 | struct dasd_block *block = bdev->bd_disk->private_data; | 2182 | struct dasd_block *block = bdev->bd_disk->private_data; |
| 2173 | struct dasd_device *base = block->base; | 2183 | struct dasd_device *base; |
| 2174 | int rc; | 2184 | int rc; |
| 2175 | 2185 | ||
| 2186 | if (!block) | ||
| 2187 | return -ENODEV; | ||
| 2188 | |||
| 2189 | base = block->base; | ||
| 2176 | atomic_inc(&block->open_count); | 2190 | atomic_inc(&block->open_count); |
| 2177 | if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { | 2191 | if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { |
| 2178 | rc = -ENODEV; | 2192 | rc = -ENODEV; |
| @@ -2285,11 +2299,6 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie) | |||
| 2285 | if (ret) | 2299 | if (ret) |
| 2286 | pr_warning("%s: Setting the DASD online failed with rc=%d\n", | 2300 | pr_warning("%s: Setting the DASD online failed with rc=%d\n", |
| 2287 | dev_name(&cdev->dev), ret); | 2301 | dev_name(&cdev->dev), ret); |
| 2288 | else { | ||
| 2289 | struct dasd_device *device = dasd_device_from_cdev(cdev); | ||
| 2290 | wait_event(dasd_init_waitq, _wait_for_device(device)); | ||
| 2291 | dasd_put_device(device); | ||
| 2292 | } | ||
| 2293 | } | 2302 | } |
| 2294 | 2303 | ||
| 2295 | /* | 2304 | /* |
| @@ -2424,6 +2433,9 @@ int dasd_generic_set_online(struct ccw_device *cdev, | |||
| 2424 | } else | 2433 | } else |
| 2425 | pr_debug("dasd_generic device %s found\n", | 2434 | pr_debug("dasd_generic device %s found\n", |
| 2426 | dev_name(&cdev->dev)); | 2435 | dev_name(&cdev->dev)); |
| 2436 | |||
| 2437 | wait_event(dasd_init_waitq, _wait_for_device(device)); | ||
| 2438 | |||
| 2427 | dasd_put_device(device); | 2439 | dasd_put_device(device); |
| 2428 | return rc; | 2440 | return rc; |
| 2429 | } | 2441 | } |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 4cac5b54f26a..d49766f3b940 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
| @@ -874,12 +874,19 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, | |||
| 874 | ssize_t len; | 874 | ssize_t len; |
| 875 | 875 | ||
| 876 | device = dasd_device_from_cdev(to_ccwdev(dev)); | 876 | device = dasd_device_from_cdev(to_ccwdev(dev)); |
| 877 | if (!IS_ERR(device) && device->discipline) { | 877 | if (IS_ERR(device)) |
| 878 | goto out; | ||
| 879 | else if (!device->discipline) { | ||
| 880 | dasd_put_device(device); | ||
| 881 | goto out; | ||
| 882 | } else { | ||
| 878 | len = snprintf(buf, PAGE_SIZE, "%s\n", | 883 | len = snprintf(buf, PAGE_SIZE, "%s\n", |
| 879 | device->discipline->name); | 884 | device->discipline->name); |
| 880 | dasd_put_device(device); | 885 | dasd_put_device(device); |
| 881 | } else | 886 | return len; |
| 882 | len = snprintf(buf, PAGE_SIZE, "none\n"); | 887 | } |
| 888 | out: | ||
| 889 | len = snprintf(buf, PAGE_SIZE, "none\n"); | ||
| 883 | return len; | 890 | return len; |
| 884 | } | 891 | } |
| 885 | 892 | ||
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index d3198303b93c..94f92a1247f2 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c | |||
| @@ -88,6 +88,7 @@ void dasd_gendisk_free(struct dasd_block *block) | |||
| 88 | if (block->gdp) { | 88 | if (block->gdp) { |
| 89 | del_gendisk(block->gdp); | 89 | del_gendisk(block->gdp); |
| 90 | block->gdp->queue = NULL; | 90 | block->gdp->queue = NULL; |
| 91 | block->gdp->private_data = NULL; | ||
| 91 | put_disk(block->gdp); | 92 | put_disk(block->gdp); |
| 92 | block->gdp = NULL; | 93 | block->gdp = NULL; |
| 93 | } | 94 | } |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index e4c2143dabf6..ed73ce550822 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
| @@ -368,6 +368,7 @@ struct dasd_device { | |||
| 368 | 368 | ||
| 369 | /* Device state and target state. */ | 369 | /* Device state and target state. */ |
| 370 | int state, target; | 370 | int state, target; |
| 371 | struct mutex state_mutex; | ||
| 371 | int stopped; /* device (ccw_device_start) was stopped */ | 372 | int stopped; /* device (ccw_device_start) was stopped */ |
| 372 | 373 | ||
| 373 | /* reference count. */ | 374 | /* reference count. */ |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 71f95f54866f..f13a0bdd148c 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
| @@ -165,51 +165,32 @@ static const struct file_operations dasd_devices_file_ops = { | |||
| 165 | .release = seq_release, | 165 | .release = seq_release, |
| 166 | }; | 166 | }; |
| 167 | 167 | ||
| 168 | static int | ||
| 169 | dasd_calc_metrics(char *page, char **start, off_t off, | ||
| 170 | int count, int *eof, int len) | ||
| 171 | { | ||
| 172 | len = (len > off) ? len - off : 0; | ||
| 173 | if (len > count) | ||
| 174 | len = count; | ||
| 175 | if (len < count) | ||
| 176 | *eof = 1; | ||
| 177 | *start = page + off; | ||
| 178 | return len; | ||
| 179 | } | ||
| 180 | |||
| 181 | #ifdef CONFIG_DASD_PROFILE | 168 | #ifdef CONFIG_DASD_PROFILE |
| 182 | static char * | 169 | static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor) |
| 183 | dasd_statistics_array(char *str, unsigned int *array, int factor) | ||
| 184 | { | 170 | { |
| 185 | int i; | 171 | int i; |
| 186 | 172 | ||
| 187 | for (i = 0; i < 32; i++) { | 173 | for (i = 0; i < 32; i++) { |
| 188 | str += sprintf(str, "%7d ", array[i] / factor); | 174 | seq_printf(m, "%7d ", array[i] / factor); |
| 189 | if (i == 15) | 175 | if (i == 15) |
| 190 | str += sprintf(str, "\n"); | 176 | seq_putc(m, '\n'); |
| 191 | } | 177 | } |
| 192 | str += sprintf(str,"\n"); | 178 | seq_putc(m, '\n'); |
| 193 | return str; | ||
| 194 | } | 179 | } |
| 195 | #endif /* CONFIG_DASD_PROFILE */ | 180 | #endif /* CONFIG_DASD_PROFILE */ |
| 196 | 181 | ||
| 197 | static int | 182 | static int dasd_stats_proc_show(struct seq_file *m, void *v) |
| 198 | dasd_statistics_read(char *page, char **start, off_t off, | ||
| 199 | int count, int *eof, void *data) | ||
| 200 | { | 183 | { |
| 201 | unsigned long len; | ||
| 202 | #ifdef CONFIG_DASD_PROFILE | 184 | #ifdef CONFIG_DASD_PROFILE |
| 203 | struct dasd_profile_info_t *prof; | 185 | struct dasd_profile_info_t *prof; |
| 204 | char *str; | ||
| 205 | int factor; | 186 | int factor; |
| 206 | 187 | ||
| 207 | /* check for active profiling */ | 188 | /* check for active profiling */ |
| 208 | if (dasd_profile_level == DASD_PROFILE_OFF) { | 189 | if (dasd_profile_level == DASD_PROFILE_OFF) { |
| 209 | len = sprintf(page, "Statistics are off - they might be " | 190 | seq_printf(m, "Statistics are off - they might be " |
| 210 | "switched on using 'echo set on > " | 191 | "switched on using 'echo set on > " |
| 211 | "/proc/dasd/statistics'\n"); | 192 | "/proc/dasd/statistics'\n"); |
| 212 | return dasd_calc_metrics(page, start, off, count, eof, len); | 193 | return 0; |
| 213 | } | 194 | } |
| 214 | 195 | ||
| 215 | prof = &dasd_global_profile; | 196 | prof = &dasd_global_profile; |
| @@ -217,47 +198,49 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
| 217 | for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; | 198 | for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; |
| 218 | factor *= 10); | 199 | factor *= 10); |
| 219 | 200 | ||
| 220 | str = page; | 201 | seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs); |
| 221 | str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); | 202 | seq_printf(m, "with %u sectors(512B each)\n", |
| 222 | str += sprintf(str, "with %u sectors(512B each)\n", | ||
| 223 | prof->dasd_io_sects); | 203 | prof->dasd_io_sects); |
| 224 | str += sprintf(str, "Scale Factor is %d\n", factor); | 204 | seq_printf(m, "Scale Factor is %d\n", factor); |
| 225 | str += sprintf(str, | 205 | seq_printf(m, |
| 226 | " __<4 ___8 __16 __32 __64 _128 " | 206 | " __<4 ___8 __16 __32 __64 _128 " |
| 227 | " _256 _512 __1k __2k __4k __8k " | 207 | " _256 _512 __1k __2k __4k __8k " |
| 228 | " _16k _32k _64k 128k\n"); | 208 | " _16k _32k _64k 128k\n"); |
| 229 | str += sprintf(str, | 209 | seq_printf(m, |
| 230 | " _256 _512 __1M __2M __4M __8M " | 210 | " _256 _512 __1M __2M __4M __8M " |
| 231 | " _16M _32M _64M 128M 256M 512M " | 211 | " _16M _32M _64M 128M 256M 512M " |
| 232 | " __1G __2G __4G " " _>4G\n"); | 212 | " __1G __2G __4G " " _>4G\n"); |
| 233 | 213 | ||
| 234 | str += sprintf(str, "Histogram of sizes (512B secs)\n"); | 214 | seq_printf(m, "Histogram of sizes (512B secs)\n"); |
| 235 | str = dasd_statistics_array(str, prof->dasd_io_secs, factor); | 215 | dasd_statistics_array(m, prof->dasd_io_secs, factor); |
| 236 | str += sprintf(str, "Histogram of I/O times (microseconds)\n"); | 216 | seq_printf(m, "Histogram of I/O times (microseconds)\n"); |
| 237 | str = dasd_statistics_array(str, prof->dasd_io_times, factor); | 217 | dasd_statistics_array(m, prof->dasd_io_times, factor); |
| 238 | str += sprintf(str, "Histogram of I/O times per sector\n"); | 218 | seq_printf(m, "Histogram of I/O times per sector\n"); |
| 239 | str = dasd_statistics_array(str, prof->dasd_io_timps, factor); | 219 | dasd_statistics_array(m, prof->dasd_io_timps, factor); |
| 240 | str += sprintf(str, "Histogram of I/O time till ssch\n"); | 220 | seq_printf(m, "Histogram of I/O time till ssch\n"); |
| 241 | str = dasd_statistics_array(str, prof->dasd_io_time1, factor); | 221 | dasd_statistics_array(m, prof->dasd_io_time1, factor); |
| 242 | str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); | 222 | seq_printf(m, "Histogram of I/O time between ssch and irq\n"); |
| 243 | str = dasd_statistics_array(str, prof->dasd_io_time2, factor); | 223 | dasd_statistics_array(m, prof->dasd_io_time2, factor); |
| 244 | str += sprintf(str, "Histogram of I/O time between ssch " | 224 | seq_printf(m, "Histogram of I/O time between ssch " |
| 245 | "and irq per sector\n"); | 225 | "and irq per sector\n"); |
| 246 | str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); | 226 | dasd_statistics_array(m, prof->dasd_io_time2ps, factor); |
| 247 | str += sprintf(str, "Histogram of I/O time between irq and end\n"); | 227 | seq_printf(m, "Histogram of I/O time between irq and end\n"); |
| 248 | str = dasd_statistics_array(str, prof->dasd_io_time3, factor); | 228 | dasd_statistics_array(m, prof->dasd_io_time3, factor); |
| 249 | str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); | 229 | seq_printf(m, "# of req in chanq at enqueuing (1..32) \n"); |
| 250 | str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); | 230 | dasd_statistics_array(m, prof->dasd_io_nr_req, factor); |
| 251 | len = str - page; | ||
| 252 | #else | 231 | #else |
| 253 | len = sprintf(page, "Statistics are not activated in this kernel\n"); | 232 | seq_printf(m, "Statistics are not activated in this kernel\n"); |
| 254 | #endif | 233 | #endif |
| 255 | return dasd_calc_metrics(page, start, off, count, eof, len); | 234 | return 0; |
| 256 | } | 235 | } |
| 257 | 236 | ||
| 258 | static int | 237 | static int dasd_stats_proc_open(struct inode *inode, struct file *file) |
| 259 | dasd_statistics_write(struct file *file, const char __user *user_buf, | 238 | { |
| 260 | unsigned long user_len, void *data) | 239 | return single_open(file, dasd_stats_proc_show, NULL); |
| 240 | } | ||
| 241 | |||
| 242 | static ssize_t dasd_stats_proc_write(struct file *file, | ||
| 243 | const char __user *user_buf, size_t user_len, loff_t *pos) | ||
| 261 | { | 244 | { |
| 262 | #ifdef CONFIG_DASD_PROFILE | 245 | #ifdef CONFIG_DASD_PROFILE |
| 263 | char *buffer, *str; | 246 | char *buffer, *str; |
| @@ -308,6 +291,15 @@ out_error: | |||
| 308 | #endif /* CONFIG_DASD_PROFILE */ | 291 | #endif /* CONFIG_DASD_PROFILE */ |
| 309 | } | 292 | } |
| 310 | 293 | ||
| 294 | static const struct file_operations dasd_stats_proc_fops = { | ||
| 295 | .owner = THIS_MODULE, | ||
| 296 | .open = dasd_stats_proc_open, | ||
| 297 | .read = seq_read, | ||
| 298 | .llseek = seq_lseek, | ||
| 299 | .release = single_release, | ||
| 300 | .write = dasd_stats_proc_write, | ||
| 301 | }; | ||
| 302 | |||
| 311 | /* | 303 | /* |
| 312 | * Create dasd proc-fs entries. | 304 | * Create dasd proc-fs entries. |
| 313 | * In case creation failed, cleanup and return -ENOENT. | 305 | * In case creation failed, cleanup and return -ENOENT. |
| @@ -324,13 +316,12 @@ dasd_proc_init(void) | |||
| 324 | &dasd_devices_file_ops); | 316 | &dasd_devices_file_ops); |
| 325 | if (!dasd_devices_entry) | 317 | if (!dasd_devices_entry) |
| 326 | goto out_nodevices; | 318 | goto out_nodevices; |
| 327 | dasd_statistics_entry = create_proc_entry("statistics", | 319 | dasd_statistics_entry = proc_create("statistics", |
| 328 | S_IFREG | S_IRUGO | S_IWUSR, | 320 | S_IFREG | S_IRUGO | S_IWUSR, |
| 329 | dasd_proc_root_entry); | 321 | dasd_proc_root_entry, |
| 322 | &dasd_stats_proc_fops); | ||
| 330 | if (!dasd_statistics_entry) | 323 | if (!dasd_statistics_entry) |
| 331 | goto out_nostatistics; | 324 | goto out_nostatistics; |
| 332 | dasd_statistics_entry->read_proc = dasd_statistics_read; | ||
| 333 | dasd_statistics_entry->write_proc = dasd_statistics_write; | ||
| 334 | return 0; | 325 | return 0; |
| 335 | 326 | ||
| 336 | out_nostatistics: | 327 | out_nostatistics: |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 82daa3c1dc9c..3438658b66b7 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| 16 | #include <linux/miscdevice.h> | 16 | #include <linux/miscdevice.h> |
| 17 | #include <linux/debugfs.h> | 17 | #include <linux/debugfs.h> |
| 18 | #include <asm/asm-offsets.h> | ||
| 18 | #include <asm/ipl.h> | 19 | #include <asm/ipl.h> |
| 19 | #include <asm/sclp.h> | 20 | #include <asm/sclp.h> |
| 20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
| @@ -40,12 +41,12 @@ enum arch_id { | |||
| 40 | /* dump system info */ | 41 | /* dump system info */ |
| 41 | 42 | ||
| 42 | struct sys_info { | 43 | struct sys_info { |
| 43 | enum arch_id arch; | 44 | enum arch_id arch; |
| 44 | unsigned long sa_base; | 45 | unsigned long sa_base; |
| 45 | u32 sa_size; | 46 | u32 sa_size; |
| 46 | int cpu_map[NR_CPUS]; | 47 | int cpu_map[NR_CPUS]; |
| 47 | unsigned long mem_size; | 48 | unsigned long mem_size; |
| 48 | union save_area lc_mask; | 49 | struct save_area lc_mask; |
| 49 | }; | 50 | }; |
| 50 | 51 | ||
| 51 | struct ipib_info { | 52 | struct ipib_info { |
| @@ -183,52 +184,9 @@ static int memcpy_real_user(void __user *dest, unsigned long src, size_t count) | |||
| 183 | return 0; | 184 | return 0; |
| 184 | } | 185 | } |
| 185 | 186 | ||
| 186 | #ifdef __s390x__ | ||
| 187 | /* | ||
| 188 | * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info | ||
| 189 | */ | ||
| 190 | static void __init s390x_to_s390_regs(union save_area *out, union save_area *in, | ||
| 191 | int cpu) | ||
| 192 | { | ||
| 193 | int i; | ||
| 194 | |||
| 195 | for (i = 0; i < 16; i++) { | ||
| 196 | out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff; | ||
| 197 | out->s390.acc_regs[i] = in->s390x.acc_regs[i]; | ||
| 198 | out->s390.ctrl_regs[i] = | ||
| 199 | in->s390x.ctrl_regs[i] & 0x00000000ffffffff; | ||
| 200 | } | ||
| 201 | /* locore for 31 bit has only space for fpregs 0,2,4,6 */ | ||
| 202 | out->s390.fp_regs[0] = in->s390x.fp_regs[0]; | ||
| 203 | out->s390.fp_regs[1] = in->s390x.fp_regs[2]; | ||
| 204 | out->s390.fp_regs[2] = in->s390x.fp_regs[4]; | ||
| 205 | out->s390.fp_regs[3] = in->s390x.fp_regs[6]; | ||
| 206 | memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4); | ||
| 207 | out->s390.psw[1] |= 0x8; /* set bit 12 */ | ||
| 208 | memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4); | ||
| 209 | out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */ | ||
| 210 | out->s390.pref_reg = in->s390x.pref_reg; | ||
| 211 | out->s390.timer = in->s390x.timer; | ||
| 212 | out->s390.clk_cmp = in->s390x.clk_cmp; | ||
| 213 | } | ||
| 214 | |||
| 215 | static void __init s390x_to_s390_save_areas(void) | ||
| 216 | { | ||
| 217 | int i = 1; | ||
| 218 | static union save_area tmp; | ||
| 219 | |||
| 220 | while (zfcpdump_save_areas[i]) { | ||
| 221 | s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i); | ||
| 222 | memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp)); | ||
| 223 | i++; | ||
| 224 | } | ||
| 225 | } | ||
| 226 | |||
| 227 | #endif /* __s390x__ */ | ||
| 228 | |||
| 229 | static int __init init_cpu_info(enum arch_id arch) | 187 | static int __init init_cpu_info(enum arch_id arch) |
| 230 | { | 188 | { |
| 231 | union save_area *sa; | 189 | struct save_area *sa; |
| 232 | 190 | ||
| 233 | /* get info for boot cpu from lowcore, stored in the HSA */ | 191 | /* get info for boot cpu from lowcore, stored in the HSA */ |
| 234 | 192 | ||
| @@ -241,20 +199,12 @@ static int __init init_cpu_info(enum arch_id arch) | |||
| 241 | return -EIO; | 199 | return -EIO; |
| 242 | } | 200 | } |
| 243 | zfcpdump_save_areas[0] = sa; | 201 | zfcpdump_save_areas[0] = sa; |
| 244 | |||
| 245 | #ifdef __s390x__ | ||
| 246 | /* convert s390x regs to s390, if we are dumping an s390 Linux */ | ||
| 247 | |||
| 248 | if (arch == ARCH_S390) | ||
| 249 | s390x_to_s390_save_areas(); | ||
| 250 | #endif | ||
| 251 | |||
| 252 | return 0; | 202 | return 0; |
| 253 | } | 203 | } |
| 254 | 204 | ||
| 255 | static DEFINE_MUTEX(zcore_mutex); | 205 | static DEFINE_MUTEX(zcore_mutex); |
| 256 | 206 | ||
| 257 | #define DUMP_VERSION 0x3 | 207 | #define DUMP_VERSION 0x5 |
| 258 | #define DUMP_MAGIC 0xa8190173618f23fdULL | 208 | #define DUMP_MAGIC 0xa8190173618f23fdULL |
| 259 | #define DUMP_ARCH_S390X 2 | 209 | #define DUMP_ARCH_S390X 2 |
| 260 | #define DUMP_ARCH_S390 1 | 210 | #define DUMP_ARCH_S390 1 |
| @@ -279,7 +229,14 @@ struct zcore_header { | |||
| 279 | u32 volnr; | 229 | u32 volnr; |
| 280 | u32 build_arch; | 230 | u32 build_arch; |
| 281 | u64 rmem_size; | 231 | u64 rmem_size; |
| 282 | char pad2[4016]; | 232 | u8 mvdump; |
| 233 | u16 cpu_cnt; | ||
| 234 | u16 real_cpu_cnt; | ||
| 235 | u8 end_pad1[0x200-0x061]; | ||
| 236 | u64 mvdump_sign; | ||
| 237 | u64 mvdump_zipl_time; | ||
| 238 | u8 end_pad2[0x800-0x210]; | ||
| 239 | u32 lc_vec[512]; | ||
| 283 | } __attribute__((packed,__aligned__(16))); | 240 | } __attribute__((packed,__aligned__(16))); |
| 284 | 241 | ||
| 285 | static struct zcore_header zcore_header = { | 242 | static struct zcore_header zcore_header = { |
| @@ -289,7 +246,7 @@ static struct zcore_header zcore_header = { | |||
| 289 | .dump_level = 0, | 246 | .dump_level = 0, |
| 290 | .page_size = PAGE_SIZE, | 247 | .page_size = PAGE_SIZE, |
| 291 | .mem_start = 0, | 248 | .mem_start = 0, |
| 292 | #ifdef __s390x__ | 249 | #ifdef CONFIG_64BIT |
| 293 | .build_arch = DUMP_ARCH_S390X, | 250 | .build_arch = DUMP_ARCH_S390X, |
| 294 | #else | 251 | #else |
| 295 | .build_arch = DUMP_ARCH_S390, | 252 | .build_arch = DUMP_ARCH_S390, |
| @@ -340,11 +297,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) | |||
| 340 | unsigned long prefix; | 297 | unsigned long prefix; |
| 341 | unsigned long sa_off, len, buf_off; | 298 | unsigned long sa_off, len, buf_off; |
| 342 | 299 | ||
| 343 | if (sys_info.arch == ARCH_S390) | 300 | prefix = zfcpdump_save_areas[i]->pref_reg; |
| 344 | prefix = zfcpdump_save_areas[i]->s390.pref_reg; | ||
| 345 | else | ||
| 346 | prefix = zfcpdump_save_areas[i]->s390x.pref_reg; | ||
| 347 | |||
| 348 | sa_start = prefix + sys_info.sa_base; | 301 | sa_start = prefix + sys_info.sa_base; |
| 349 | sa_end = prefix + sys_info.sa_base + sys_info.sa_size; | 302 | sa_end = prefix + sys_info.sa_base + sys_info.sa_size; |
| 350 | 303 | ||
| @@ -561,34 +514,39 @@ static const struct file_operations zcore_reipl_fops = { | |||
| 561 | .release = zcore_reipl_release, | 514 | .release = zcore_reipl_release, |
| 562 | }; | 515 | }; |
| 563 | 516 | ||
| 517 | #ifdef CONFIG_32BIT | ||
| 564 | 518 | ||
| 565 | static void __init set_s390_lc_mask(union save_area *map) | 519 | static void __init set_lc_mask(struct save_area *map) |
| 566 | { | 520 | { |
| 567 | memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); | 521 | memset(&map->ext_save, 0xff, sizeof(map->ext_save)); |
| 568 | memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); | 522 | memset(&map->timer, 0xff, sizeof(map->timer)); |
| 569 | memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); | 523 | memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp)); |
| 570 | memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); | 524 | memset(&map->psw, 0xff, sizeof(map->psw)); |
| 571 | memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); | 525 | memset(&map->pref_reg, 0xff, sizeof(map->pref_reg)); |
| 572 | memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); | 526 | memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); |
| 573 | memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); | 527 | memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); |
| 574 | memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); | 528 | memset(&map->gp_regs, 0xff, sizeof(map->gp_regs)); |
| 575 | memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); | 529 | memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); |
| 576 | } | 530 | } |
| 577 | 531 | ||
| 578 | static void __init set_s390x_lc_mask(union save_area *map) | 532 | #else /* CONFIG_32BIT */ |
| 533 | |||
| 534 | static void __init set_lc_mask(struct save_area *map) | ||
| 579 | { | 535 | { |
| 580 | memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); | 536 | memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); |
| 581 | memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); | 537 | memset(&map->gp_regs, 0xff, sizeof(map->gp_regs)); |
| 582 | memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); | 538 | memset(&map->psw, 0xff, sizeof(map->psw)); |
| 583 | memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); | 539 | memset(&map->pref_reg, 0xff, sizeof(map->pref_reg)); |
| 584 | memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); | 540 | memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg)); |
| 585 | memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); | 541 | memset(&map->tod_reg, 0xff, sizeof(map->tod_reg)); |
| 586 | memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); | 542 | memset(&map->timer, 0xff, sizeof(map->timer)); |
| 587 | memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); | 543 | memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp)); |
| 588 | memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); | 544 | memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); |
| 589 | memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); | 545 | memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); |
| 590 | } | 546 | } |
| 591 | 547 | ||
| 548 | #endif /* CONFIG_32BIT */ | ||
| 549 | |||
| 592 | /* | 550 | /* |
| 593 | * Initialize dump globals for a given architecture | 551 | * Initialize dump globals for a given architecture |
| 594 | */ | 552 | */ |
| @@ -599,21 +557,18 @@ static int __init sys_info_init(enum arch_id arch) | |||
| 599 | switch (arch) { | 557 | switch (arch) { |
| 600 | case ARCH_S390X: | 558 | case ARCH_S390X: |
| 601 | pr_alert("DETECTED 'S390X (64 bit) OS'\n"); | 559 | pr_alert("DETECTED 'S390X (64 bit) OS'\n"); |
| 602 | sys_info.sa_base = SAVE_AREA_BASE_S390X; | ||
| 603 | sys_info.sa_size = sizeof(struct save_area_s390x); | ||
| 604 | set_s390x_lc_mask(&sys_info.lc_mask); | ||
| 605 | break; | 560 | break; |
| 606 | case ARCH_S390: | 561 | case ARCH_S390: |
| 607 | pr_alert("DETECTED 'S390 (32 bit) OS'\n"); | 562 | pr_alert("DETECTED 'S390 (32 bit) OS'\n"); |
| 608 | sys_info.sa_base = SAVE_AREA_BASE_S390; | ||
| 609 | sys_info.sa_size = sizeof(struct save_area_s390); | ||
| 610 | set_s390_lc_mask(&sys_info.lc_mask); | ||
| 611 | break; | 563 | break; |
| 612 | default: | 564 | default: |
| 613 | pr_alert("0x%x is an unknown architecture.\n",arch); | 565 | pr_alert("0x%x is an unknown architecture.\n",arch); |
| 614 | return -EINVAL; | 566 | return -EINVAL; |
| 615 | } | 567 | } |
| 568 | sys_info.sa_base = SAVE_AREA_BASE; | ||
| 569 | sys_info.sa_size = sizeof(struct save_area); | ||
| 616 | sys_info.arch = arch; | 570 | sys_info.arch = arch; |
| 571 | set_lc_mask(&sys_info.lc_mask); | ||
| 617 | rc = init_cpu_info(arch); | 572 | rc = init_cpu_info(arch); |
| 618 | if (rc) | 573 | if (rc) |
| 619 | return rc; | 574 | return rc; |
| @@ -660,8 +615,9 @@ static int __init get_mem_size(unsigned long *mem) | |||
| 660 | 615 | ||
| 661 | static int __init zcore_header_init(int arch, struct zcore_header *hdr) | 616 | static int __init zcore_header_init(int arch, struct zcore_header *hdr) |
| 662 | { | 617 | { |
| 663 | int rc; | 618 | int rc, i; |
| 664 | unsigned long memory = 0; | 619 | unsigned long memory = 0; |
| 620 | u32 prefix; | ||
| 665 | 621 | ||
| 666 | if (arch == ARCH_S390X) | 622 | if (arch == ARCH_S390X) |
| 667 | hdr->arch_id = DUMP_ARCH_S390X; | 623 | hdr->arch_id = DUMP_ARCH_S390X; |
| @@ -676,6 +632,14 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr) | |||
| 676 | hdr->num_pages = memory / PAGE_SIZE; | 632 | hdr->num_pages = memory / PAGE_SIZE; |
| 677 | hdr->tod = get_clock(); | 633 | hdr->tod = get_clock(); |
| 678 | get_cpu_id(&hdr->cpu_id); | 634 | get_cpu_id(&hdr->cpu_id); |
| 635 | for (i = 0; zfcpdump_save_areas[i]; i++) { | ||
| 636 | prefix = zfcpdump_save_areas[i]->pref_reg; | ||
| 637 | hdr->real_cpu_cnt++; | ||
| 638 | if (!prefix) | ||
| 639 | continue; | ||
| 640 | hdr->lc_vec[hdr->cpu_cnt] = prefix; | ||
| 641 | hdr->cpu_cnt++; | ||
| 642 | } | ||
| 679 | return 0; | 643 | return 0; |
| 680 | } | 644 | } |
| 681 | 645 | ||
| @@ -741,14 +705,21 @@ static int __init zcore_init(void) | |||
| 741 | if (rc) | 705 | if (rc) |
| 742 | goto fail; | 706 | goto fail; |
| 743 | 707 | ||
| 744 | #ifndef __s390x__ | 708 | #ifdef CONFIG_64BIT |
| 709 | if (arch == ARCH_S390) { | ||
| 710 | pr_alert("The 64-bit dump tool cannot be used for a " | ||
| 711 | "32-bit system\n"); | ||
| 712 | rc = -EINVAL; | ||
| 713 | goto fail; | ||
| 714 | } | ||
| 715 | #else /* CONFIG_64BIT */ | ||
| 745 | if (arch == ARCH_S390X) { | 716 | if (arch == ARCH_S390X) { |
| 746 | pr_alert("The 32-bit dump tool cannot be used for a " | 717 | pr_alert("The 32-bit dump tool cannot be used for a " |
| 747 | "64-bit system\n"); | 718 | "64-bit system\n"); |
| 748 | rc = -EINVAL; | 719 | rc = -EINVAL; |
| 749 | goto fail; | 720 | goto fail; |
| 750 | } | 721 | } |
| 751 | #endif | 722 | #endif /* CONFIG_64BIT */ |
| 752 | 723 | ||
| 753 | rc = sys_info_init(arch); | 724 | rc = sys_info_init(arch); |
| 754 | if (rc) | 725 | if (rc) |
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 7a28a3029a3f..37df42af05ec 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c | |||
| @@ -224,8 +224,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) | |||
| 224 | */ | 224 | */ |
| 225 | void ccw_request_handler(struct ccw_device *cdev) | 225 | void ccw_request_handler(struct ccw_device *cdev) |
| 226 | { | 226 | { |
| 227 | struct irb *irb = (struct irb *)&S390_lowcore.irb; | ||
| 227 | struct ccw_request *req = &cdev->private->req; | 228 | struct ccw_request *req = &cdev->private->req; |
| 228 | struct irb *irb = (struct irb *) __LC_IRB; | ||
| 229 | enum io_status status; | 229 | enum io_status status; |
| 230 | int rc = -EOPNOTSUPP; | 230 | int rc = -EOPNOTSUPP; |
| 231 | 231 | ||
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 1ecd3e567648..4038f5b4f144 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
| @@ -574,7 +574,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
| 574 | secm_area->request.length = 0x0050; | 574 | secm_area->request.length = 0x0050; |
| 575 | secm_area->request.code = 0x0016; | 575 | secm_area->request.code = 0x0016; |
| 576 | 576 | ||
| 577 | secm_area->key = PAGE_DEFAULT_KEY; | 577 | secm_area->key = PAGE_DEFAULT_KEY >> 4; |
| 578 | secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; | 578 | secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; |
| 579 | secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; | 579 | secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; |
| 580 | 580 | ||
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index c84ac9443079..852612f5dba0 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
| @@ -51,7 +51,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) | |||
| 51 | { | 51 | { |
| 52 | struct chsc_private *private = sch->private; | 52 | struct chsc_private *private = sch->private; |
| 53 | struct chsc_request *request = private->request; | 53 | struct chsc_request *request = private->request; |
| 54 | struct irb *irb = (struct irb *)__LC_IRB; | 54 | struct irb *irb = (struct irb *)&S390_lowcore.irb; |
| 55 | 55 | ||
| 56 | CHSC_LOG(4, "irb"); | 56 | CHSC_LOG(4, "irb"); |
| 57 | CHSC_LOG_HEX(4, irb, sizeof(*irb)); | 57 | CHSC_LOG_HEX(4, irb, sizeof(*irb)); |
| @@ -237,7 +237,7 @@ static int chsc_async(struct chsc_async_area *chsc_area, | |||
| 237 | int ret = -ENODEV; | 237 | int ret = -ENODEV; |
| 238 | char dbf[10]; | 238 | char dbf[10]; |
| 239 | 239 | ||
| 240 | chsc_area->header.key = PAGE_DEFAULT_KEY; | 240 | chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; |
| 241 | while ((sch = chsc_get_next_subchannel(sch))) { | 241 | while ((sch = chsc_get_next_subchannel(sch))) { |
| 242 | spin_lock(sch->lock); | 242 | spin_lock(sch->lock); |
| 243 | private = sch->private; | 243 | private = sch->private; |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 126f240715a4..f736cdcf08ad 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
| @@ -625,8 +625,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs) | |||
| 625 | /* | 625 | /* |
| 626 | * Get interrupt information from lowcore | 626 | * Get interrupt information from lowcore |
| 627 | */ | 627 | */ |
| 628 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | 628 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; |
| 629 | irb = (struct irb *) __LC_IRB; | 629 | irb = (struct irb *)&S390_lowcore.irb; |
| 630 | do { | 630 | do { |
| 631 | kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; | 631 | kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; |
| 632 | /* | 632 | /* |
| @@ -661,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) | |||
| 661 | * We don't do this for VM because a tpi drops the cpu | 661 | * We don't do this for VM because a tpi drops the cpu |
| 662 | * out of the sie which costs more cycles than it saves. | 662 | * out of the sie which costs more cycles than it saves. |
| 663 | */ | 663 | */ |
| 664 | } while (!MACHINE_IS_VM && tpi (NULL) != 0); | 664 | } while (MACHINE_IS_LPAR && tpi(NULL) != 0); |
| 665 | irq_exit(); | 665 | irq_exit(); |
| 666 | set_irq_regs(old_regs); | 666 | set_irq_regs(old_regs); |
| 667 | } | 667 | } |
| @@ -682,10 +682,10 @@ static int cio_tpi(void) | |||
| 682 | struct irb *irb; | 682 | struct irb *irb; |
| 683 | int irq_context; | 683 | int irq_context; |
| 684 | 684 | ||
| 685 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | 685 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; |
| 686 | if (tpi(NULL) != 1) | 686 | if (tpi(NULL) != 1) |
| 687 | return 0; | 687 | return 0; |
| 688 | irb = (struct irb *) __LC_IRB; | 688 | irb = (struct irb *)&S390_lowcore.irb; |
| 689 | /* Store interrupt response block to lowcore. */ | 689 | /* Store interrupt response block to lowcore. */ |
| 690 | if (tsch(tpi_info->schid, irb) != 0) | 690 | if (tsch(tpi_info->schid, irb) != 0) |
| 691 | /* Not status pending or not operational. */ | 691 | /* Not status pending or not operational. */ |
| @@ -885,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid) | |||
| 885 | struct tpi_info ti; | 885 | struct tpi_info ti; |
| 886 | 886 | ||
| 887 | if (tpi(&ti)) { | 887 | if (tpi(&ti)) { |
| 888 | tsch(ti.schid, (struct irb *)__LC_IRB); | 888 | tsch(ti.schid, (struct irb *)&S390_lowcore.irb); |
| 889 | if (schid_equal(&ti.schid, &schid)) | 889 | if (schid_equal(&ti.schid, &schid)) |
| 890 | return 0; | 890 | return 0; |
| 891 | } | 891 | } |
| @@ -1083,7 +1083,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | |||
| 1083 | struct subchannel_id schid; | 1083 | struct subchannel_id schid; |
| 1084 | struct schib schib; | 1084 | struct schib schib; |
| 1085 | 1085 | ||
| 1086 | schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; | 1086 | schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; |
| 1087 | if (!schid.one) | 1087 | if (!schid.one) |
| 1088 | return -ENODEV; | 1088 | return -ENODEV; |
| 1089 | if (stsch(schid, &schib)) | 1089 | if (stsch(schid, &schib)) |
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c index d157665d0e76..425f741a280c 100644 --- a/drivers/s390/cio/crw.c +++ b/drivers/s390/cio/crw.c | |||
| @@ -8,15 +8,16 @@ | |||
| 8 | * Heiko Carstens <heiko.carstens@de.ibm.com>, | 8 | * Heiko Carstens <heiko.carstens@de.ibm.com>, |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/semaphore.h> | ||
| 12 | #include <linux/mutex.h> | 11 | #include <linux/mutex.h> |
| 13 | #include <linux/kthread.h> | 12 | #include <linux/kthread.h> |
| 14 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <linux/wait.h> | ||
| 15 | #include <asm/crw.h> | 15 | #include <asm/crw.h> |
| 16 | 16 | ||
| 17 | static struct semaphore crw_semaphore; | ||
| 18 | static DEFINE_MUTEX(crw_handler_mutex); | 17 | static DEFINE_MUTEX(crw_handler_mutex); |
| 19 | static crw_handler_t crw_handlers[NR_RSCS]; | 18 | static crw_handler_t crw_handlers[NR_RSCS]; |
| 19 | static atomic_t crw_nr_req = ATOMIC_INIT(0); | ||
| 20 | static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q); | ||
| 20 | 21 | ||
| 21 | /** | 22 | /** |
| 22 | * crw_register_handler() - register a channel report word handler | 23 | * crw_register_handler() - register a channel report word handler |
| @@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc) | |||
| 59 | static int crw_collect_info(void *unused) | 60 | static int crw_collect_info(void *unused) |
| 60 | { | 61 | { |
| 61 | struct crw crw[2]; | 62 | struct crw crw[2]; |
| 62 | int ccode; | 63 | int ccode, signal; |
| 63 | unsigned int chain; | 64 | unsigned int chain; |
| 64 | int ignore; | ||
| 65 | 65 | ||
| 66 | repeat: | 66 | repeat: |
| 67 | ignore = down_interruptible(&crw_semaphore); | 67 | signal = wait_event_interruptible(crw_handler_wait_q, |
| 68 | atomic_read(&crw_nr_req) > 0); | ||
| 69 | if (unlikely(signal)) | ||
| 70 | atomic_inc(&crw_nr_req); | ||
| 68 | chain = 0; | 71 | chain = 0; |
| 69 | while (1) { | 72 | while (1) { |
| 70 | crw_handler_t handler; | 73 | crw_handler_t handler; |
| @@ -122,25 +125,23 @@ repeat: | |||
| 122 | /* chain is always 0 or 1 here. */ | 125 | /* chain is always 0 or 1 here. */ |
| 123 | chain = crw[chain].chn ? chain + 1 : 0; | 126 | chain = crw[chain].chn ? chain + 1 : 0; |
| 124 | } | 127 | } |
| 128 | if (atomic_dec_and_test(&crw_nr_req)) | ||
| 129 | wake_up(&crw_handler_wait_q); | ||
| 125 | goto repeat; | 130 | goto repeat; |
| 126 | return 0; | 131 | return 0; |
| 127 | } | 132 | } |
| 128 | 133 | ||
| 129 | void crw_handle_channel_report(void) | 134 | void crw_handle_channel_report(void) |
| 130 | { | 135 | { |
| 131 | up(&crw_semaphore); | 136 | atomic_inc(&crw_nr_req); |
| 137 | wake_up(&crw_handler_wait_q); | ||
| 132 | } | 138 | } |
| 133 | 139 | ||
| 134 | /* | 140 | void crw_wait_for_channel_report(void) |
| 135 | * Separate initcall needed for semaphore initialization since | ||
| 136 | * crw_handle_channel_report might be called before crw_machine_check_init. | ||
| 137 | */ | ||
| 138 | static int __init crw_init_semaphore(void) | ||
| 139 | { | 141 | { |
| 140 | init_MUTEX_LOCKED(&crw_semaphore); | 142 | crw_handle_channel_report(); |
| 141 | return 0; | 143 | wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0); |
| 142 | } | 144 | } |
| 143 | pure_initcall(crw_init_semaphore); | ||
| 144 | 145 | ||
| 145 | /* | 146 | /* |
| 146 | * Machine checks for the channel subsystem must be enabled | 147 | * Machine checks for the channel subsystem must be enabled |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 7679aee6fa14..2769da54f2b9 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
| 19 | #include <linux/reboot.h> | 19 | #include <linux/reboot.h> |
| 20 | #include <linux/suspend.h> | 20 | #include <linux/suspend.h> |
| 21 | #include <linux/proc_fs.h> | ||
| 21 | #include <asm/isc.h> | 22 | #include <asm/isc.h> |
| 22 | #include <asm/crw.h> | 23 | #include <asm/crw.h> |
| 23 | 24 | ||
| @@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) | |||
| 232 | if (!get_device(&sch->dev)) | 233 | if (!get_device(&sch->dev)) |
| 233 | return; | 234 | return; |
| 234 | sch->todo = todo; | 235 | sch->todo = todo; |
| 235 | if (!queue_work(slow_path_wq, &sch->todo_work)) { | 236 | if (!queue_work(cio_work_q, &sch->todo_work)) { |
| 236 | /* Already queued, release workqueue ref. */ | 237 | /* Already queued, release workqueue ref. */ |
| 237 | put_device(&sch->dev); | 238 | put_device(&sch->dev); |
| 238 | } | 239 | } |
| @@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused) | |||
| 543 | } | 544 | } |
| 544 | 545 | ||
| 545 | static DECLARE_WORK(slow_path_work, css_slow_path_func); | 546 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
| 546 | struct workqueue_struct *slow_path_wq; | 547 | struct workqueue_struct *cio_work_q; |
| 547 | 548 | ||
| 548 | void css_schedule_eval(struct subchannel_id schid) | 549 | void css_schedule_eval(struct subchannel_id schid) |
| 549 | { | 550 | { |
| @@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid) | |||
| 552 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 553 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
| 553 | idset_sch_add(slow_subchannel_set, schid); | 554 | idset_sch_add(slow_subchannel_set, schid); |
| 554 | atomic_set(&css_eval_scheduled, 1); | 555 | atomic_set(&css_eval_scheduled, 1); |
| 555 | queue_work(slow_path_wq, &slow_path_work); | 556 | queue_work(cio_work_q, &slow_path_work); |
| 556 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 557 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
| 557 | } | 558 | } |
| 558 | 559 | ||
| @@ -563,7 +564,7 @@ void css_schedule_eval_all(void) | |||
| 563 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 564 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
| 564 | idset_fill(slow_subchannel_set); | 565 | idset_fill(slow_subchannel_set); |
| 565 | atomic_set(&css_eval_scheduled, 1); | 566 | atomic_set(&css_eval_scheduled, 1); |
| 566 | queue_work(slow_path_wq, &slow_path_work); | 567 | queue_work(cio_work_q, &slow_path_work); |
| 567 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 568 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
| 568 | } | 569 | } |
| 569 | 570 | ||
| @@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void) | |||
| 594 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 595 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
| 595 | idset_add_set(slow_subchannel_set, unreg_set); | 596 | idset_add_set(slow_subchannel_set, unreg_set); |
| 596 | atomic_set(&css_eval_scheduled, 1); | 597 | atomic_set(&css_eval_scheduled, 1); |
| 597 | queue_work(slow_path_wq, &slow_path_work); | 598 | queue_work(cio_work_q, &slow_path_work); |
| 598 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 599 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
| 599 | idset_free(unreg_set); | 600 | idset_free(unreg_set); |
| 600 | } | 601 | } |
| 601 | 602 | ||
| 602 | void css_wait_for_slow_path(void) | 603 | void css_wait_for_slow_path(void) |
| 603 | { | 604 | { |
| 604 | flush_workqueue(slow_path_wq); | 605 | flush_workqueue(cio_work_q); |
| 605 | } | 606 | } |
| 606 | 607 | ||
| 607 | /* Schedule reprobing of all unregistered subchannels. */ | 608 | /* Schedule reprobing of all unregistered subchannels. */ |
| @@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void) | |||
| 992 | ret = css_bus_init(); | 993 | ret = css_bus_init(); |
| 993 | if (ret) | 994 | if (ret) |
| 994 | return ret; | 995 | return ret; |
| 995 | 996 | cio_work_q = create_singlethread_workqueue("cio"); | |
| 997 | if (!cio_work_q) { | ||
| 998 | ret = -ENOMEM; | ||
| 999 | goto out_bus; | ||
| 1000 | } | ||
| 996 | ret = io_subchannel_init(); | 1001 | ret = io_subchannel_init(); |
| 997 | if (ret) | 1002 | if (ret) |
| 998 | css_bus_cleanup(); | 1003 | goto out_wq; |
| 999 | 1004 | ||
| 1000 | return ret; | 1005 | return ret; |
| 1006 | out_wq: | ||
| 1007 | destroy_workqueue(cio_work_q); | ||
| 1008 | out_bus: | ||
| 1009 | css_bus_cleanup(); | ||
| 1010 | return ret; | ||
| 1001 | } | 1011 | } |
| 1002 | subsys_initcall(channel_subsystem_init); | 1012 | subsys_initcall(channel_subsystem_init); |
| 1003 | 1013 | ||
| @@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused) | |||
| 1006 | struct css_driver *cssdrv = to_cssdriver(drv); | 1016 | struct css_driver *cssdrv = to_cssdriver(drv); |
| 1007 | 1017 | ||
| 1008 | if (cssdrv->settle) | 1018 | if (cssdrv->settle) |
| 1009 | cssdrv->settle(); | 1019 | return cssdrv->settle(); |
| 1010 | return 0; | 1020 | return 0; |
| 1011 | } | 1021 | } |
| 1012 | 1022 | ||
| 1023 | int css_complete_work(void) | ||
| 1024 | { | ||
| 1025 | int ret; | ||
| 1026 | |||
| 1027 | /* Wait for the evaluation of subchannels to finish. */ | ||
| 1028 | ret = wait_event_interruptible(css_eval_wq, | ||
| 1029 | atomic_read(&css_eval_scheduled) == 0); | ||
| 1030 | if (ret) | ||
| 1031 | return -EINTR; | ||
| 1032 | flush_workqueue(cio_work_q); | ||
| 1033 | /* Wait for the subchannel type specific initialization to finish */ | ||
| 1034 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | ||
| 1035 | } | ||
| 1036 | |||
| 1037 | |||
| 1013 | /* | 1038 | /* |
| 1014 | * Wait for the initialization of devices to finish, to make sure we are | 1039 | * Wait for the initialization of devices to finish, to make sure we are |
| 1015 | * done with our setup if the search for the root device starts. | 1040 | * done with our setup if the search for the root device starts. |
| @@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void) | |||
| 1018 | { | 1043 | { |
| 1019 | /* Start initial subchannel evaluation. */ | 1044 | /* Start initial subchannel evaluation. */ |
| 1020 | css_schedule_eval_all(); | 1045 | css_schedule_eval_all(); |
| 1021 | /* Wait for the evaluation of subchannels to finish. */ | 1046 | css_complete_work(); |
| 1022 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); | 1047 | return 0; |
| 1023 | /* Wait for the subchannel type specific initialization to finish */ | ||
| 1024 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | ||
| 1025 | } | 1048 | } |
| 1026 | subsys_initcall_sync(channel_subsystem_init_sync); | 1049 | subsys_initcall_sync(channel_subsystem_init_sync); |
| 1027 | 1050 | ||
| 1051 | #ifdef CONFIG_PROC_FS | ||
| 1052 | static ssize_t cio_settle_write(struct file *file, const char __user *buf, | ||
| 1053 | size_t count, loff_t *ppos) | ||
| 1054 | { | ||
| 1055 | int ret; | ||
| 1056 | |||
| 1057 | /* Handle pending CRW's. */ | ||
| 1058 | crw_wait_for_channel_report(); | ||
| 1059 | ret = css_complete_work(); | ||
| 1060 | |||
| 1061 | return ret ? ret : count; | ||
| 1062 | } | ||
| 1063 | |||
| 1064 | static const struct file_operations cio_settle_proc_fops = { | ||
| 1065 | .write = cio_settle_write, | ||
| 1066 | }; | ||
| 1067 | |||
| 1068 | static int __init cio_settle_init(void) | ||
| 1069 | { | ||
| 1070 | struct proc_dir_entry *entry; | ||
| 1071 | |||
| 1072 | entry = proc_create("cio_settle", S_IWUSR, NULL, | ||
| 1073 | &cio_settle_proc_fops); | ||
| 1074 | if (!entry) | ||
| 1075 | return -ENOMEM; | ||
| 1076 | return 0; | ||
| 1077 | } | ||
| 1078 | device_initcall(cio_settle_init); | ||
| 1079 | #endif /*CONFIG_PROC_FS*/ | ||
| 1080 | |||
| 1028 | int sch_is_pseudo_sch(struct subchannel *sch) | 1081 | int sch_is_pseudo_sch(struct subchannel *sch) |
| 1029 | { | 1082 | { |
| 1030 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; | 1083 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index fe84b92cde60..7e37886de231 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
| @@ -95,7 +95,7 @@ struct css_driver { | |||
| 95 | int (*freeze)(struct subchannel *); | 95 | int (*freeze)(struct subchannel *); |
| 96 | int (*thaw) (struct subchannel *); | 96 | int (*thaw) (struct subchannel *); |
| 97 | int (*restore)(struct subchannel *); | 97 | int (*restore)(struct subchannel *); |
| 98 | void (*settle)(void); | 98 | int (*settle)(void); |
| 99 | const char *name; | 99 | const char *name; |
| 100 | }; | 100 | }; |
| 101 | 101 | ||
| @@ -146,12 +146,13 @@ extern struct channel_subsystem *channel_subsystems[]; | |||
| 146 | /* Helper functions to build lists for the slow path. */ | 146 | /* Helper functions to build lists for the slow path. */ |
| 147 | void css_schedule_eval(struct subchannel_id schid); | 147 | void css_schedule_eval(struct subchannel_id schid); |
| 148 | void css_schedule_eval_all(void); | 148 | void css_schedule_eval_all(void); |
| 149 | int css_complete_work(void); | ||
| 149 | 150 | ||
| 150 | int sch_is_pseudo_sch(struct subchannel *); | 151 | int sch_is_pseudo_sch(struct subchannel *); |
| 151 | struct schib; | 152 | struct schib; |
| 152 | int css_sch_is_valid(struct schib *); | 153 | int css_sch_is_valid(struct schib *); |
| 153 | 154 | ||
| 154 | extern struct workqueue_struct *slow_path_wq; | 155 | extern struct workqueue_struct *cio_work_q; |
| 155 | void css_wait_for_slow_path(void); | 156 | void css_wait_for_slow_path(void); |
| 156 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); | 157 | void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); |
| 157 | #endif | 158 | #endif |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index a6c7d5426fb2..c6abb75c4615 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
| @@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int); | |||
| 136 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, | 136 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, |
| 137 | int); | 137 | int); |
| 138 | static void recovery_func(unsigned long data); | 138 | static void recovery_func(unsigned long data); |
| 139 | struct workqueue_struct *ccw_device_work; | ||
| 140 | wait_queue_head_t ccw_device_init_wq; | 139 | wait_queue_head_t ccw_device_init_wq; |
| 141 | atomic_t ccw_device_init_count; | 140 | atomic_t ccw_device_init_count; |
| 142 | 141 | ||
| @@ -159,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch) | |||
| 159 | return 0; | 158 | return 0; |
| 160 | } | 159 | } |
| 161 | 160 | ||
| 162 | static void io_subchannel_settle(void) | 161 | static int io_subchannel_settle(void) |
| 163 | { | 162 | { |
| 164 | wait_event(ccw_device_init_wq, | 163 | int ret; |
| 165 | atomic_read(&ccw_device_init_count) == 0); | 164 | |
| 166 | flush_workqueue(ccw_device_work); | 165 | ret = wait_event_interruptible(ccw_device_init_wq, |
| 166 | atomic_read(&ccw_device_init_count) == 0); | ||
| 167 | if (ret) | ||
| 168 | return -EINTR; | ||
| 169 | flush_workqueue(cio_work_q); | ||
| 170 | return 0; | ||
| 167 | } | 171 | } |
| 168 | 172 | ||
| 169 | static struct css_driver io_subchannel_driver = { | 173 | static struct css_driver io_subchannel_driver = { |
| @@ -188,27 +192,13 @@ int __init io_subchannel_init(void) | |||
| 188 | atomic_set(&ccw_device_init_count, 0); | 192 | atomic_set(&ccw_device_init_count, 0); |
| 189 | setup_timer(&recovery_timer, recovery_func, 0); | 193 | setup_timer(&recovery_timer, recovery_func, 0); |
| 190 | 194 | ||
| 191 | ccw_device_work = create_singlethread_workqueue("cio"); | 195 | ret = bus_register(&ccw_bus_type); |
| 192 | if (!ccw_device_work) | 196 | if (ret) |
| 193 | return -ENOMEM; | 197 | return ret; |
| 194 | slow_path_wq = create_singlethread_workqueue("kslowcrw"); | ||
| 195 | if (!slow_path_wq) { | ||
| 196 | ret = -ENOMEM; | ||
| 197 | goto out_err; | ||
| 198 | } | ||
| 199 | if ((ret = bus_register (&ccw_bus_type))) | ||
| 200 | goto out_err; | ||
| 201 | |||
| 202 | ret = css_driver_register(&io_subchannel_driver); | 198 | ret = css_driver_register(&io_subchannel_driver); |
| 203 | if (ret) | 199 | if (ret) |
| 204 | goto out_err; | 200 | bus_unregister(&ccw_bus_type); |
| 205 | 201 | ||
| 206 | return 0; | ||
| 207 | out_err: | ||
| 208 | if (ccw_device_work) | ||
| 209 | destroy_workqueue(ccw_device_work); | ||
| 210 | if (slow_path_wq) | ||
| 211 | destroy_workqueue(slow_path_wq); | ||
| 212 | return ret; | 202 | return ret; |
| 213 | } | 203 | } |
| 214 | 204 | ||
| @@ -1348,7 +1338,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) | |||
| 1348 | /* Not operational. */ | 1338 | /* Not operational. */ |
| 1349 | if (!cdev) | 1339 | if (!cdev) |
| 1350 | return IO_SCH_UNREG; | 1340 | return IO_SCH_UNREG; |
| 1351 | if (!ccw_device_notify(cdev, CIO_GONE)) | 1341 | if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) |
| 1352 | return IO_SCH_UNREG; | 1342 | return IO_SCH_UNREG; |
| 1353 | return IO_SCH_ORPH_UNREG; | 1343 | return IO_SCH_ORPH_UNREG; |
| 1354 | } | 1344 | } |
| @@ -1356,12 +1346,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) | |||
| 1356 | if (!cdev) | 1346 | if (!cdev) |
| 1357 | return IO_SCH_ATTACH; | 1347 | return IO_SCH_ATTACH; |
| 1358 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1348 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
| 1359 | if (!ccw_device_notify(cdev, CIO_GONE)) | 1349 | if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) |
| 1360 | return IO_SCH_UNREG_ATTACH; | 1350 | return IO_SCH_UNREG_ATTACH; |
| 1361 | return IO_SCH_ORPH_ATTACH; | 1351 | return IO_SCH_ORPH_ATTACH; |
| 1362 | } | 1352 | } |
| 1363 | if ((sch->schib.pmcw.pam & sch->opm) == 0) { | 1353 | if ((sch->schib.pmcw.pam & sch->opm) == 0) { |
| 1364 | if (!ccw_device_notify(cdev, CIO_NO_PATH)) | 1354 | if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) |
| 1365 | return IO_SCH_UNREG; | 1355 | return IO_SCH_UNREG; |
| 1366 | return IO_SCH_DISC; | 1356 | return IO_SCH_DISC; |
| 1367 | } | 1357 | } |
| @@ -1410,6 +1400,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) | |||
| 1410 | rc = 0; | 1400 | rc = 0; |
| 1411 | goto out_unlock; | 1401 | goto out_unlock; |
| 1412 | case IO_SCH_VERIFY: | 1402 | case IO_SCH_VERIFY: |
| 1403 | if (cdev->private->flags.resuming == 1) { | ||
| 1404 | if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) { | ||
| 1405 | ccw_device_set_notoper(cdev); | ||
| 1406 | break; | ||
| 1407 | } | ||
| 1408 | } | ||
| 1413 | /* Trigger path verification. */ | 1409 | /* Trigger path verification. */ |
| 1414 | io_subchannel_verify(sch); | 1410 | io_subchannel_verify(sch); |
| 1415 | rc = 0; | 1411 | rc = 0; |
| @@ -1448,7 +1444,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) | |||
| 1448 | break; | 1444 | break; |
| 1449 | case IO_SCH_UNREG_ATTACH: | 1445 | case IO_SCH_UNREG_ATTACH: |
| 1450 | /* Unregister ccw device. */ | 1446 | /* Unregister ccw device. */ |
| 1451 | ccw_device_unregister(cdev); | 1447 | if (!cdev->private->flags.resuming) |
| 1448 | ccw_device_unregister(cdev); | ||
| 1452 | break; | 1449 | break; |
| 1453 | default: | 1450 | default: |
| 1454 | break; | 1451 | break; |
| @@ -1457,7 +1454,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) | |||
| 1457 | switch (action) { | 1454 | switch (action) { |
| 1458 | case IO_SCH_ORPH_UNREG: | 1455 | case IO_SCH_ORPH_UNREG: |
| 1459 | case IO_SCH_UNREG: | 1456 | case IO_SCH_UNREG: |
| 1460 | css_sch_device_unregister(sch); | 1457 | if (!cdev || !cdev->private->flags.resuming) |
| 1458 | css_sch_device_unregister(sch); | ||
| 1461 | break; | 1459 | break; |
| 1462 | case IO_SCH_ORPH_ATTACH: | 1460 | case IO_SCH_ORPH_ATTACH: |
| 1463 | case IO_SCH_UNREG_ATTACH: | 1461 | case IO_SCH_UNREG_ATTACH: |
| @@ -1779,26 +1777,42 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) | |||
| 1779 | { | 1777 | { |
| 1780 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | 1778 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 1781 | 1779 | ||
| 1782 | if (cio_is_console(sch->schid)) | 1780 | spin_lock_irq(sch->lock); |
| 1783 | goto out; | 1781 | if (cio_is_console(sch->schid)) { |
| 1782 | cio_enable_subchannel(sch, (u32)(addr_t)sch); | ||
| 1783 | goto out_unlock; | ||
| 1784 | } | ||
| 1784 | /* | 1785 | /* |
| 1785 | * While we were sleeping, devices may have gone or become | 1786 | * While we were sleeping, devices may have gone or become |
| 1786 | * available again. Kick re-detection. | 1787 | * available again. Kick re-detection. |
| 1787 | */ | 1788 | */ |
| 1788 | spin_lock_irq(sch->lock); | ||
| 1789 | cdev->private->flags.resuming = 1; | 1789 | cdev->private->flags.resuming = 1; |
| 1790 | css_schedule_eval(sch->schid); | ||
| 1791 | spin_unlock_irq(sch->lock); | ||
| 1792 | css_complete_work(); | ||
| 1793 | |||
| 1794 | /* cdev may have been moved to a different subchannel. */ | ||
| 1795 | sch = to_subchannel(cdev->dev.parent); | ||
| 1796 | spin_lock_irq(sch->lock); | ||
| 1797 | if (cdev->private->state != DEV_STATE_ONLINE && | ||
| 1798 | cdev->private->state != DEV_STATE_OFFLINE) | ||
| 1799 | goto out_unlock; | ||
| 1800 | |||
| 1790 | ccw_device_recognition(cdev); | 1801 | ccw_device_recognition(cdev); |
| 1791 | spin_unlock_irq(sch->lock); | 1802 | spin_unlock_irq(sch->lock); |
| 1792 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || | 1803 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || |
| 1793 | cdev->private->state == DEV_STATE_DISCONNECTED); | 1804 | cdev->private->state == DEV_STATE_DISCONNECTED); |
| 1794 | out: | 1805 | spin_lock_irq(sch->lock); |
| 1806 | |||
| 1807 | out_unlock: | ||
| 1795 | cdev->private->flags.resuming = 0; | 1808 | cdev->private->flags.resuming = 0; |
| 1809 | spin_unlock_irq(sch->lock); | ||
| 1796 | } | 1810 | } |
| 1797 | 1811 | ||
| 1798 | static int resume_handle_boxed(struct ccw_device *cdev) | 1812 | static int resume_handle_boxed(struct ccw_device *cdev) |
| 1799 | { | 1813 | { |
| 1800 | cdev->private->state = DEV_STATE_BOXED; | 1814 | cdev->private->state = DEV_STATE_BOXED; |
| 1801 | if (ccw_device_notify(cdev, CIO_BOXED)) | 1815 | if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) |
| 1802 | return 0; | 1816 | return 0; |
| 1803 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | 1817 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
| 1804 | return -ENODEV; | 1818 | return -ENODEV; |
| @@ -1807,7 +1821,7 @@ static int resume_handle_boxed(struct ccw_device *cdev) | |||
| 1807 | static int resume_handle_disc(struct ccw_device *cdev) | 1821 | static int resume_handle_disc(struct ccw_device *cdev) |
| 1808 | { | 1822 | { |
| 1809 | cdev->private->state = DEV_STATE_DISCONNECTED; | 1823 | cdev->private->state = DEV_STATE_DISCONNECTED; |
| 1810 | if (ccw_device_notify(cdev, CIO_GONE)) | 1824 | if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) |
| 1811 | return 0; | 1825 | return 0; |
| 1812 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | 1826 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
| 1813 | return -ENODEV; | 1827 | return -ENODEV; |
| @@ -1816,40 +1830,31 @@ static int resume_handle_disc(struct ccw_device *cdev) | |||
| 1816 | static int ccw_device_pm_restore(struct device *dev) | 1830 | static int ccw_device_pm_restore(struct device *dev) |
| 1817 | { | 1831 | { |
| 1818 | struct ccw_device *cdev = to_ccwdev(dev); | 1832 | struct ccw_device *cdev = to_ccwdev(dev); |
| 1819 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | 1833 | struct subchannel *sch; |
| 1820 | int ret = 0, cm_enabled; | 1834 | int ret = 0; |
| 1821 | 1835 | ||
| 1822 | __ccw_device_pm_restore(cdev); | 1836 | __ccw_device_pm_restore(cdev); |
| 1837 | sch = to_subchannel(cdev->dev.parent); | ||
| 1823 | spin_lock_irq(sch->lock); | 1838 | spin_lock_irq(sch->lock); |
| 1824 | if (cio_is_console(sch->schid)) { | 1839 | if (cio_is_console(sch->schid)) |
| 1825 | cio_enable_subchannel(sch, (u32)(addr_t)sch); | ||
| 1826 | spin_unlock_irq(sch->lock); | ||
| 1827 | goto out_restore; | 1840 | goto out_restore; |
| 1828 | } | 1841 | |
| 1829 | cdev->private->flags.donotify = 0; | ||
| 1830 | /* check recognition results */ | 1842 | /* check recognition results */ |
| 1831 | switch (cdev->private->state) { | 1843 | switch (cdev->private->state) { |
| 1832 | case DEV_STATE_OFFLINE: | 1844 | case DEV_STATE_OFFLINE: |
| 1845 | case DEV_STATE_ONLINE: | ||
| 1846 | cdev->private->flags.donotify = 0; | ||
| 1833 | break; | 1847 | break; |
| 1834 | case DEV_STATE_BOXED: | 1848 | case DEV_STATE_BOXED: |
| 1835 | ret = resume_handle_boxed(cdev); | 1849 | ret = resume_handle_boxed(cdev); |
| 1836 | spin_unlock_irq(sch->lock); | ||
| 1837 | if (ret) | 1850 | if (ret) |
| 1838 | goto out; | 1851 | goto out_unlock; |
| 1839 | goto out_restore; | 1852 | goto out_restore; |
| 1840 | case DEV_STATE_DISCONNECTED: | ||
| 1841 | goto out_disc_unlock; | ||
| 1842 | default: | 1853 | default: |
| 1843 | goto out_unreg_unlock; | 1854 | ret = resume_handle_disc(cdev); |
| 1844 | } | 1855 | if (ret) |
| 1845 | /* check if the device id has changed */ | 1856 | goto out_unlock; |
| 1846 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1857 | goto out_restore; |
| 1847 | CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno " | ||
| 1848 | "changed from %04x to %04x)\n", | ||
| 1849 | sch->schid.ssid, sch->schid.sch_no, | ||
| 1850 | cdev->private->dev_id.devno, | ||
| 1851 | sch->schib.pmcw.dev); | ||
| 1852 | goto out_unreg_unlock; | ||
| 1853 | } | 1858 | } |
| 1854 | /* check if the device type has changed */ | 1859 | /* check if the device type has changed */ |
| 1855 | if (!ccw_device_test_sense_data(cdev)) { | 1860 | if (!ccw_device_test_sense_data(cdev)) { |
| @@ -1858,24 +1863,30 @@ static int ccw_device_pm_restore(struct device *dev) | |||
| 1858 | ret = -ENODEV; | 1863 | ret = -ENODEV; |
| 1859 | goto out_unlock; | 1864 | goto out_unlock; |
| 1860 | } | 1865 | } |
| 1861 | if (!cdev->online) { | 1866 | if (!cdev->online) |
| 1862 | ret = 0; | ||
| 1863 | goto out_unlock; | 1867 | goto out_unlock; |
| 1864 | } | ||
| 1865 | ret = ccw_device_online(cdev); | ||
| 1866 | if (ret) | ||
| 1867 | goto out_disc_unlock; | ||
| 1868 | 1868 | ||
| 1869 | cm_enabled = cdev->private->cmb != NULL; | 1869 | if (ccw_device_online(cdev)) { |
| 1870 | ret = resume_handle_disc(cdev); | ||
| 1871 | if (ret) | ||
| 1872 | goto out_unlock; | ||
| 1873 | goto out_restore; | ||
| 1874 | } | ||
| 1870 | spin_unlock_irq(sch->lock); | 1875 | spin_unlock_irq(sch->lock); |
| 1871 | |||
| 1872 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | 1876 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); |
| 1873 | if (cdev->private->state != DEV_STATE_ONLINE) { | 1877 | spin_lock_irq(sch->lock); |
| 1874 | spin_lock_irq(sch->lock); | 1878 | |
| 1875 | goto out_disc_unlock; | 1879 | if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { |
| 1880 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | ||
| 1881 | ret = -ENODEV; | ||
| 1882 | goto out_unlock; | ||
| 1876 | } | 1883 | } |
| 1877 | if (cm_enabled) { | 1884 | |
| 1885 | /* reenable cmf, if needed */ | ||
| 1886 | if (cdev->private->cmb) { | ||
| 1887 | spin_unlock_irq(sch->lock); | ||
| 1878 | ret = ccw_set_cmf(cdev, 1); | 1888 | ret = ccw_set_cmf(cdev, 1); |
| 1889 | spin_lock_irq(sch->lock); | ||
| 1879 | if (ret) { | 1890 | if (ret) { |
| 1880 | CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " | 1891 | CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " |
| 1881 | "(rc=%d)\n", cdev->private->dev_id.ssid, | 1892 | "(rc=%d)\n", cdev->private->dev_id.ssid, |
| @@ -1885,21 +1896,11 @@ static int ccw_device_pm_restore(struct device *dev) | |||
| 1885 | } | 1896 | } |
| 1886 | 1897 | ||
| 1887 | out_restore: | 1898 | out_restore: |
| 1899 | spin_unlock_irq(sch->lock); | ||
| 1888 | if (cdev->online && cdev->drv && cdev->drv->restore) | 1900 | if (cdev->online && cdev->drv && cdev->drv->restore) |
| 1889 | ret = cdev->drv->restore(cdev); | 1901 | ret = cdev->drv->restore(cdev); |
| 1890 | out: | ||
| 1891 | return ret; | 1902 | return ret; |
| 1892 | 1903 | ||
| 1893 | out_disc_unlock: | ||
| 1894 | ret = resume_handle_disc(cdev); | ||
| 1895 | spin_unlock_irq(sch->lock); | ||
| 1896 | if (ret) | ||
| 1897 | return ret; | ||
| 1898 | goto out_restore; | ||
| 1899 | |||
| 1900 | out_unreg_unlock: | ||
| 1901 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); | ||
| 1902 | ret = -ENODEV; | ||
| 1903 | out_unlock: | 1904 | out_unlock: |
| 1904 | spin_unlock_irq(sch->lock); | 1905 | spin_unlock_irq(sch->lock); |
| 1905 | return ret; | 1906 | return ret; |
| @@ -2028,7 +2029,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) | |||
| 2028 | /* Get workqueue ref. */ | 2029 | /* Get workqueue ref. */ |
| 2029 | if (!get_device(&cdev->dev)) | 2030 | if (!get_device(&cdev->dev)) |
| 2030 | return; | 2031 | return; |
| 2031 | if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { | 2032 | if (!queue_work(cio_work_q, &cdev->private->todo_work)) { |
| 2032 | /* Already queued, release workqueue ref. */ | 2033 | /* Already queued, release workqueue ref. */ |
| 2033 | put_device(&cdev->dev); | 2034 | put_device(&cdev->dev); |
| 2034 | } | 2035 | } |
| @@ -2041,5 +2042,4 @@ EXPORT_SYMBOL(ccw_driver_register); | |||
| 2041 | EXPORT_SYMBOL(ccw_driver_unregister); | 2042 | EXPORT_SYMBOL(ccw_driver_unregister); |
| 2042 | EXPORT_SYMBOL(get_ccwdev_by_busid); | 2043 | EXPORT_SYMBOL(get_ccwdev_by_busid); |
| 2043 | EXPORT_SYMBOL(ccw_bus_type); | 2044 | EXPORT_SYMBOL(ccw_bus_type); |
| 2044 | EXPORT_SYMBOL(ccw_device_work); | ||
| 2045 | EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); | 2045 | EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index bcfe13e42638..379de2d1ec49 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | #include <asm/ccwdev.h> | 4 | #include <asm/ccwdev.h> |
| 5 | #include <asm/atomic.h> | 5 | #include <asm/atomic.h> |
| 6 | #include <linux/wait.h> | 6 | #include <linux/wait.h> |
| 7 | 7 | #include <linux/notifier.h> | |
| 8 | #include "io_sch.h" | 8 | #include "io_sch.h" |
| 9 | 9 | ||
| 10 | /* | 10 | /* |
| @@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev) | |||
| 71 | cdev->private->state == DEV_STATE_BOXED); | 71 | cdev->private->state == DEV_STATE_BOXED); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | extern struct workqueue_struct *ccw_device_work; | ||
| 75 | extern wait_queue_head_t ccw_device_init_wq; | 74 | extern wait_queue_head_t ccw_device_init_wq; |
| 76 | extern atomic_t ccw_device_init_count; | 75 | extern atomic_t ccw_device_init_count; |
| 77 | int __init io_subchannel_init(void); | 76 | int __init io_subchannel_init(void); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index ae760658a131..c56ab94612f9 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
| @@ -313,21 +313,43 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) | |||
| 313 | } | 313 | } |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | /** | ||
| 317 | * ccw_device_notify() - inform the device's driver about an event | ||
| 318 | * @cdev: device for which an event occured | ||
| 319 | * @event: event that occurred | ||
| 320 | * | ||
| 321 | * Returns: | ||
| 322 | * -%EINVAL if the device is offline or has no driver. | ||
| 323 | * -%EOPNOTSUPP if the device's driver has no notifier registered. | ||
| 324 | * %NOTIFY_OK if the driver wants to keep the device. | ||
| 325 | * %NOTIFY_BAD if the driver doesn't want to keep the device. | ||
| 326 | */ | ||
| 316 | int ccw_device_notify(struct ccw_device *cdev, int event) | 327 | int ccw_device_notify(struct ccw_device *cdev, int event) |
| 317 | { | 328 | { |
| 329 | int ret = -EINVAL; | ||
| 330 | |||
| 318 | if (!cdev->drv) | 331 | if (!cdev->drv) |
| 319 | return 0; | 332 | goto out; |
| 320 | if (!cdev->online) | 333 | if (!cdev->online) |
| 321 | return 0; | 334 | goto out; |
| 322 | CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", | 335 | CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", |
| 323 | cdev->private->dev_id.ssid, cdev->private->dev_id.devno, | 336 | cdev->private->dev_id.ssid, cdev->private->dev_id.devno, |
| 324 | event); | 337 | event); |
| 325 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | 338 | if (!cdev->drv->notify) { |
| 339 | ret = -EOPNOTSUPP; | ||
| 340 | goto out; | ||
| 341 | } | ||
| 342 | if (cdev->drv->notify(cdev, event)) | ||
| 343 | ret = NOTIFY_OK; | ||
| 344 | else | ||
| 345 | ret = NOTIFY_BAD; | ||
| 346 | out: | ||
| 347 | return ret; | ||
| 326 | } | 348 | } |
| 327 | 349 | ||
| 328 | static void ccw_device_oper_notify(struct ccw_device *cdev) | 350 | static void ccw_device_oper_notify(struct ccw_device *cdev) |
| 329 | { | 351 | { |
| 330 | if (ccw_device_notify(cdev, CIO_OPER)) { | 352 | if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { |
| 331 | /* Reenable channel measurements, if needed. */ | 353 | /* Reenable channel measurements, if needed. */ |
| 332 | ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); | 354 | ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); |
| 333 | return; | 355 | return; |
| @@ -361,14 +383,15 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
| 361 | case DEV_STATE_BOXED: | 383 | case DEV_STATE_BOXED: |
| 362 | CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", | 384 | CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", |
| 363 | cdev->private->dev_id.devno, sch->schid.sch_no); | 385 | cdev->private->dev_id.devno, sch->schid.sch_no); |
| 364 | if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) | 386 | if (cdev->online && |
| 387 | ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) | ||
| 365 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | 388 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
| 366 | cdev->private->flags.donotify = 0; | 389 | cdev->private->flags.donotify = 0; |
| 367 | break; | 390 | break; |
| 368 | case DEV_STATE_NOT_OPER: | 391 | case DEV_STATE_NOT_OPER: |
| 369 | CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", | 392 | CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", |
| 370 | cdev->private->dev_id.devno, sch->schid.sch_no); | 393 | cdev->private->dev_id.devno, sch->schid.sch_no); |
| 371 | if (!ccw_device_notify(cdev, CIO_GONE)) | 394 | if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) |
| 372 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | 395 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
| 373 | else | 396 | else |
| 374 | ccw_device_set_disconnected(cdev); | 397 | ccw_device_set_disconnected(cdev); |
| @@ -378,7 +401,7 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
| 378 | CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " | 401 | CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " |
| 379 | "%04x\n", cdev->private->dev_id.devno, | 402 | "%04x\n", cdev->private->dev_id.devno, |
| 380 | sch->schid.sch_no); | 403 | sch->schid.sch_no); |
| 381 | if (!ccw_device_notify(cdev, CIO_NO_PATH)) | 404 | if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) |
| 382 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | 405 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
| 383 | else | 406 | else |
| 384 | ccw_device_set_disconnected(cdev); | 407 | ccw_device_set_disconnected(cdev); |
| @@ -586,7 +609,7 @@ ccw_device_offline(struct ccw_device *cdev) | |||
| 586 | static void ccw_device_generic_notoper(struct ccw_device *cdev, | 609 | static void ccw_device_generic_notoper(struct ccw_device *cdev, |
| 587 | enum dev_event dev_event) | 610 | enum dev_event dev_event) |
| 588 | { | 611 | { |
| 589 | if (!ccw_device_notify(cdev, CIO_GONE)) | 612 | if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) |
| 590 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | 613 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
| 591 | else | 614 | else |
| 592 | ccw_device_set_disconnected(cdev); | 615 | ccw_device_set_disconnected(cdev); |
| @@ -667,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 667 | struct irb *irb; | 690 | struct irb *irb; |
| 668 | int is_cmd; | 691 | int is_cmd; |
| 669 | 692 | ||
| 670 | irb = (struct irb *) __LC_IRB; | 693 | irb = (struct irb *)&S390_lowcore.irb; |
| 671 | is_cmd = !scsw_is_tm(&irb->scsw); | 694 | is_cmd = !scsw_is_tm(&irb->scsw); |
| 672 | /* Check for unsolicited interrupt. */ | 695 | /* Check for unsolicited interrupt. */ |
| 673 | if (!scsw_is_solicited(&irb->scsw)) { | 696 | if (!scsw_is_solicited(&irb->scsw)) { |
| @@ -732,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 732 | { | 755 | { |
| 733 | struct irb *irb; | 756 | struct irb *irb; |
| 734 | 757 | ||
| 735 | irb = (struct irb *) __LC_IRB; | 758 | irb = (struct irb *)&S390_lowcore.irb; |
| 736 | /* Check for unsolicited interrupt. */ | 759 | /* Check for unsolicited interrupt. */ |
| 737 | if (scsw_stctl(&irb->scsw) == | 760 | if (scsw_stctl(&irb->scsw) == |
| 738 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 761 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 44f2f6a97f33..48aa0647432b 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
| @@ -208,18 +208,27 @@ struct qdio_dev_perf_stat { | |||
| 208 | unsigned int eqbs_partial; | 208 | unsigned int eqbs_partial; |
| 209 | unsigned int sqbs; | 209 | unsigned int sqbs; |
| 210 | unsigned int sqbs_partial; | 210 | unsigned int sqbs_partial; |
| 211 | } ____cacheline_aligned; | ||
| 212 | |||
| 213 | struct qdio_queue_perf_stat { | ||
| 214 | /* | ||
| 215 | * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. | ||
| 216 | * Since max. 127 SBALs are scanned reuse entry for 128 as queue full | ||
| 217 | * aka 127 SBALs found. | ||
| 218 | */ | ||
| 219 | unsigned int nr_sbals[8]; | ||
| 220 | unsigned int nr_sbal_error; | ||
| 221 | unsigned int nr_sbal_nop; | ||
| 222 | unsigned int nr_sbal_total; | ||
| 211 | }; | 223 | }; |
| 212 | 224 | ||
| 213 | struct qdio_input_q { | 225 | struct qdio_input_q { |
| 214 | /* input buffer acknowledgement flag */ | 226 | /* input buffer acknowledgement flag */ |
| 215 | int polling; | 227 | int polling; |
| 216 | |||
| 217 | /* first ACK'ed buffer */ | 228 | /* first ACK'ed buffer */ |
| 218 | int ack_start; | 229 | int ack_start; |
| 219 | |||
| 220 | /* how much sbals are acknowledged with qebsm */ | 230 | /* how much sbals are acknowledged with qebsm */ |
| 221 | int ack_count; | 231 | int ack_count; |
| 222 | |||
| 223 | /* last time of noticing incoming data */ | 232 | /* last time of noticing incoming data */ |
| 224 | u64 timestamp; | 233 | u64 timestamp; |
| 225 | }; | 234 | }; |
| @@ -227,40 +236,27 @@ struct qdio_input_q { | |||
| 227 | struct qdio_output_q { | 236 | struct qdio_output_q { |
| 228 | /* PCIs are enabled for the queue */ | 237 | /* PCIs are enabled for the queue */ |
| 229 | int pci_out_enabled; | 238 | int pci_out_enabled; |
| 230 | |||
| 231 | /* IQDIO: output multiple buffers (enhanced SIGA) */ | 239 | /* IQDIO: output multiple buffers (enhanced SIGA) */ |
| 232 | int use_enh_siga; | 240 | int use_enh_siga; |
| 233 | |||
| 234 | /* timer to check for more outbound work */ | 241 | /* timer to check for more outbound work */ |
| 235 | struct timer_list timer; | 242 | struct timer_list timer; |
| 236 | }; | 243 | }; |
| 237 | 244 | ||
| 245 | /* | ||
| 246 | * Note on cache alignment: grouped slsb and write mostly data at the beginning | ||
| 247 | * sbal[] is read-only and starts on a new cacheline followed by read mostly. | ||
| 248 | */ | ||
| 238 | struct qdio_q { | 249 | struct qdio_q { |
| 239 | struct slsb slsb; | 250 | struct slsb slsb; |
| 251 | |||
| 240 | union { | 252 | union { |
| 241 | struct qdio_input_q in; | 253 | struct qdio_input_q in; |
| 242 | struct qdio_output_q out; | 254 | struct qdio_output_q out; |
| 243 | } u; | 255 | } u; |
| 244 | 256 | ||
| 245 | /* queue number */ | ||
| 246 | int nr; | ||
| 247 | |||
| 248 | /* bitmask of queue number */ | ||
| 249 | int mask; | ||
| 250 | |||
| 251 | /* input or output queue */ | ||
| 252 | int is_input_q; | ||
| 253 | |||
| 254 | /* list of thinint input queues */ | ||
| 255 | struct list_head entry; | ||
| 256 | |||
| 257 | /* upper-layer program handler */ | ||
| 258 | qdio_handler_t (*handler); | ||
| 259 | |||
| 260 | /* | 257 | /* |
| 261 | * inbound: next buffer the program should check for | 258 | * inbound: next buffer the program should check for |
| 262 | * outbound: next buffer to check for having been processed | 259 | * outbound: next buffer to check if adapter processed it |
| 263 | * by the card | ||
| 264 | */ | 260 | */ |
| 265 | int first_to_check; | 261 | int first_to_check; |
| 266 | 262 | ||
| @@ -273,16 +269,32 @@ struct qdio_q { | |||
| 273 | /* number of buffers in use by the adapter */ | 269 | /* number of buffers in use by the adapter */ |
| 274 | atomic_t nr_buf_used; | 270 | atomic_t nr_buf_used; |
| 275 | 271 | ||
| 276 | struct qdio_irq *irq_ptr; | ||
| 277 | struct dentry *debugfs_q; | ||
| 278 | struct tasklet_struct tasklet; | ||
| 279 | |||
| 280 | /* error condition during a data transfer */ | 272 | /* error condition during a data transfer */ |
| 281 | unsigned int qdio_error; | 273 | unsigned int qdio_error; |
| 282 | 274 | ||
| 283 | struct sl *sl; | 275 | struct tasklet_struct tasklet; |
| 284 | struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; | 276 | struct qdio_queue_perf_stat q_stats; |
| 277 | |||
| 278 | struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned; | ||
| 279 | |||
| 280 | /* queue number */ | ||
| 281 | int nr; | ||
| 282 | |||
| 283 | /* bitmask of queue number */ | ||
| 284 | int mask; | ||
| 285 | |||
| 286 | /* input or output queue */ | ||
| 287 | int is_input_q; | ||
| 288 | |||
| 289 | /* list of thinint input queues */ | ||
| 290 | struct list_head entry; | ||
| 285 | 291 | ||
| 292 | /* upper-layer program handler */ | ||
| 293 | qdio_handler_t (*handler); | ||
| 294 | |||
| 295 | struct dentry *debugfs_q; | ||
| 296 | struct qdio_irq *irq_ptr; | ||
| 297 | struct sl *sl; | ||
| 286 | /* | 298 | /* |
| 287 | * Warning: Leave this member at the end so it won't be cleared in | 299 | * Warning: Leave this member at the end so it won't be cleared in |
| 288 | * qdio_fill_qs. A page is allocated under this pointer and used for | 300 | * qdio_fill_qs. A page is allocated under this pointer and used for |
| @@ -317,12 +329,8 @@ struct qdio_irq { | |||
| 317 | struct qdio_ssqd_desc ssqd_desc; | 329 | struct qdio_ssqd_desc ssqd_desc; |
| 318 | void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); | 330 | void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); |
| 319 | 331 | ||
| 320 | struct qdio_dev_perf_stat perf_stat; | ||
| 321 | int perf_stat_enabled; | 332 | int perf_stat_enabled; |
| 322 | /* | 333 | |
| 323 | * Warning: Leave these members together at the end so they won't be | ||
| 324 | * cleared in qdio_setup_irq. | ||
| 325 | */ | ||
| 326 | struct qdr *qdr; | 334 | struct qdr *qdr; |
| 327 | unsigned long chsc_page; | 335 | unsigned long chsc_page; |
| 328 | 336 | ||
| @@ -331,6 +339,7 @@ struct qdio_irq { | |||
| 331 | 339 | ||
| 332 | debug_info_t *debug_area; | 340 | debug_info_t *debug_area; |
| 333 | struct mutex setup_mutex; | 341 | struct mutex setup_mutex; |
| 342 | struct qdio_dev_perf_stat perf_stat; | ||
| 334 | }; | 343 | }; |
| 335 | 344 | ||
| 336 | /* helper functions */ | 345 | /* helper functions */ |
| @@ -341,9 +350,20 @@ struct qdio_irq { | |||
| 341 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ | 350 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ |
| 342 | css_general_characteristics.aif_osa) | 351 | css_general_characteristics.aif_osa) |
| 343 | 352 | ||
| 344 | #define qperf(qdev,attr) qdev->perf_stat.attr | 353 | #define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr)) |
| 345 | #define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \ | 354 | |
| 346 | q->irq_ptr->perf_stat.attr++ | 355 | #define qperf_inc(__q, __attr) \ |
| 356 | ({ \ | ||
| 357 | struct qdio_irq *qdev = (__q)->irq_ptr; \ | ||
| 358 | if (qdev->perf_stat_enabled) \ | ||
| 359 | (qdev->perf_stat.__attr)++; \ | ||
| 360 | }) | ||
| 361 | |||
| 362 | static inline void account_sbals_error(struct qdio_q *q, int count) | ||
| 363 | { | ||
| 364 | q->q_stats.nr_sbal_error += count; | ||
| 365 | q->q_stats.nr_sbal_total += count; | ||
| 366 | } | ||
| 347 | 367 | ||
| 348 | /* the highest iqdio queue is used for multicast */ | 368 | /* the highest iqdio queue is used for multicast */ |
| 349 | static inline int multicast_outbound(struct qdio_q *q) | 369 | static inline int multicast_outbound(struct qdio_q *q) |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f49761ff9a00..c94eb2a0fa2e 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
| @@ -60,7 +60,7 @@ static int qstat_show(struct seq_file *m, void *v) | |||
| 60 | seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); | 60 | seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); |
| 61 | seq_printf(m, "polling: %d ack start: %d ack count: %d\n", | 61 | seq_printf(m, "polling: %d ack start: %d ack count: %d\n", |
| 62 | q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); | 62 | q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); |
| 63 | seq_printf(m, "slsb buffer states:\n"); | 63 | seq_printf(m, "SBAL states:\n"); |
| 64 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | 64 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); |
| 65 | 65 | ||
| 66 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { | 66 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { |
| @@ -97,6 +97,20 @@ static int qstat_show(struct seq_file *m, void *v) | |||
| 97 | } | 97 | } |
| 98 | seq_printf(m, "\n"); | 98 | seq_printf(m, "\n"); |
| 99 | seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); | 99 | seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); |
| 100 | |||
| 101 | seq_printf(m, "\nSBAL statistics:"); | ||
| 102 | if (!q->irq_ptr->perf_stat_enabled) { | ||
| 103 | seq_printf(m, " disabled\n"); | ||
| 104 | return 0; | ||
| 105 | } | ||
| 106 | |||
| 107 | seq_printf(m, "\n1 2.. 4.. 8.. " | ||
| 108 | "16.. 32.. 64.. 127\n"); | ||
| 109 | for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++) | ||
| 110 | seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]); | ||
| 111 | seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n", | ||
| 112 | q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop, | ||
| 113 | q->q_stats.nr_sbal_total); | ||
| 100 | return 0; | 114 | return 0; |
| 101 | } | 115 | } |
| 102 | 116 | ||
| @@ -181,9 +195,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, | |||
| 181 | { | 195 | { |
| 182 | struct seq_file *seq = file->private_data; | 196 | struct seq_file *seq = file->private_data; |
| 183 | struct qdio_irq *irq_ptr = seq->private; | 197 | struct qdio_irq *irq_ptr = seq->private; |
| 198 | struct qdio_q *q; | ||
| 184 | unsigned long val; | 199 | unsigned long val; |
| 185 | char buf[8]; | 200 | char buf[8]; |
| 186 | int ret; | 201 | int ret, i; |
| 187 | 202 | ||
| 188 | if (!irq_ptr) | 203 | if (!irq_ptr) |
| 189 | return 0; | 204 | return 0; |
| @@ -201,6 +216,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, | |||
| 201 | case 0: | 216 | case 0: |
| 202 | irq_ptr->perf_stat_enabled = 0; | 217 | irq_ptr->perf_stat_enabled = 0; |
| 203 | memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); | 218 | memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); |
| 219 | for_each_input_queue(irq_ptr, q, i) | ||
| 220 | memset(&q->q_stats, 0, sizeof(q->q_stats)); | ||
| 221 | for_each_output_queue(irq_ptr, q, i) | ||
| 222 | memset(&q->q_stats, 0, sizeof(q->q_stats)); | ||
| 204 | break; | 223 | break; |
| 205 | case 1: | 224 | case 1: |
| 206 | irq_ptr->perf_stat_enabled = 1; | 225 | irq_ptr->perf_stat_enabled = 1; |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 62b654af9237..232ef047ba34 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
| @@ -392,6 +392,20 @@ static inline void qdio_stop_polling(struct qdio_q *q) | |||
| 392 | set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); | 392 | set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); |
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | static inline void account_sbals(struct qdio_q *q, int count) | ||
| 396 | { | ||
| 397 | int pos = 0; | ||
| 398 | |||
| 399 | q->q_stats.nr_sbal_total += count; | ||
| 400 | if (count == QDIO_MAX_BUFFERS_MASK) { | ||
| 401 | q->q_stats.nr_sbals[7]++; | ||
| 402 | return; | ||
| 403 | } | ||
| 404 | while (count >>= 1) | ||
| 405 | pos++; | ||
| 406 | q->q_stats.nr_sbals[pos]++; | ||
| 407 | } | ||
| 408 | |||
| 395 | static void announce_buffer_error(struct qdio_q *q, int count) | 409 | static void announce_buffer_error(struct qdio_q *q, int count) |
| 396 | { | 410 | { |
| 397 | q->qdio_error |= QDIO_ERROR_SLSB_STATE; | 411 | q->qdio_error |= QDIO_ERROR_SLSB_STATE; |
| @@ -487,16 +501,22 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) | |||
| 487 | q->first_to_check = add_buf(q->first_to_check, count); | 501 | q->first_to_check = add_buf(q->first_to_check, count); |
| 488 | if (atomic_sub(count, &q->nr_buf_used) == 0) | 502 | if (atomic_sub(count, &q->nr_buf_used) == 0) |
| 489 | qperf_inc(q, inbound_queue_full); | 503 | qperf_inc(q, inbound_queue_full); |
| 504 | if (q->irq_ptr->perf_stat_enabled) | ||
| 505 | account_sbals(q, count); | ||
| 490 | break; | 506 | break; |
| 491 | case SLSB_P_INPUT_ERROR: | 507 | case SLSB_P_INPUT_ERROR: |
| 492 | announce_buffer_error(q, count); | 508 | announce_buffer_error(q, count); |
| 493 | /* process the buffer, the upper layer will take care of it */ | 509 | /* process the buffer, the upper layer will take care of it */ |
| 494 | q->first_to_check = add_buf(q->first_to_check, count); | 510 | q->first_to_check = add_buf(q->first_to_check, count); |
| 495 | atomic_sub(count, &q->nr_buf_used); | 511 | atomic_sub(count, &q->nr_buf_used); |
| 512 | if (q->irq_ptr->perf_stat_enabled) | ||
| 513 | account_sbals_error(q, count); | ||
| 496 | break; | 514 | break; |
| 497 | case SLSB_CU_INPUT_EMPTY: | 515 | case SLSB_CU_INPUT_EMPTY: |
| 498 | case SLSB_P_INPUT_NOT_INIT: | 516 | case SLSB_P_INPUT_NOT_INIT: |
| 499 | case SLSB_P_INPUT_ACK: | 517 | case SLSB_P_INPUT_ACK: |
| 518 | if (q->irq_ptr->perf_stat_enabled) | ||
| 519 | q->q_stats.nr_sbal_nop++; | ||
| 500 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); | 520 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); |
| 501 | break; | 521 | break; |
| 502 | default: | 522 | default: |
| @@ -514,7 +534,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q) | |||
| 514 | 534 | ||
| 515 | if ((bufnr != q->last_move) || q->qdio_error) { | 535 | if ((bufnr != q->last_move) || q->qdio_error) { |
| 516 | q->last_move = bufnr; | 536 | q->last_move = bufnr; |
| 517 | if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) | 537 | if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) |
| 518 | q->u.in.timestamp = get_usecs(); | 538 | q->u.in.timestamp = get_usecs(); |
| 519 | return 1; | 539 | return 1; |
| 520 | } else | 540 | } else |
| @@ -643,15 +663,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) | |||
| 643 | 663 | ||
| 644 | atomic_sub(count, &q->nr_buf_used); | 664 | atomic_sub(count, &q->nr_buf_used); |
| 645 | q->first_to_check = add_buf(q->first_to_check, count); | 665 | q->first_to_check = add_buf(q->first_to_check, count); |
| 666 | if (q->irq_ptr->perf_stat_enabled) | ||
| 667 | account_sbals(q, count); | ||
| 646 | break; | 668 | break; |
| 647 | case SLSB_P_OUTPUT_ERROR: | 669 | case SLSB_P_OUTPUT_ERROR: |
| 648 | announce_buffer_error(q, count); | 670 | announce_buffer_error(q, count); |
| 649 | /* process the buffer, the upper layer will take care of it */ | 671 | /* process the buffer, the upper layer will take care of it */ |
| 650 | q->first_to_check = add_buf(q->first_to_check, count); | 672 | q->first_to_check = add_buf(q->first_to_check, count); |
| 651 | atomic_sub(count, &q->nr_buf_used); | 673 | atomic_sub(count, &q->nr_buf_used); |
| 674 | if (q->irq_ptr->perf_stat_enabled) | ||
| 675 | account_sbals_error(q, count); | ||
| 652 | break; | 676 | break; |
| 653 | case SLSB_CU_OUTPUT_PRIMED: | 677 | case SLSB_CU_OUTPUT_PRIMED: |
| 654 | /* the adapter has not fetched the output yet */ | 678 | /* the adapter has not fetched the output yet */ |
| 679 | if (q->irq_ptr->perf_stat_enabled) | ||
| 680 | q->q_stats.nr_sbal_nop++; | ||
| 655 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); | 681 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); |
| 656 | break; | 682 | break; |
| 657 | case SLSB_P_OUTPUT_NOT_INIT: | 683 | case SLSB_P_OUTPUT_NOT_INIT: |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 8c2dea5fa2b4..7f4a75465140 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
| @@ -333,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, | |||
| 333 | irq_ptr->qdr->qdf0[i + nr].slsba = | 333 | irq_ptr->qdr->qdf0[i + nr].slsba = |
| 334 | (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; | 334 | (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; |
| 335 | 335 | ||
| 336 | irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; | 336 | irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; |
| 337 | irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; | 337 | irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; |
| 338 | irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; | 338 | irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; |
| 339 | irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; | 339 | irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static void setup_qdr(struct qdio_irq *irq_ptr, | 342 | static void setup_qdr(struct qdio_irq *irq_ptr, |
| @@ -350,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr, | |||
| 350 | irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ | 350 | irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ |
| 351 | irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; | 351 | irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; |
| 352 | irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; | 352 | irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; |
| 353 | irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; | 353 | irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; |
| 354 | 354 | ||
| 355 | for (i = 0; i < qdio_init->no_input_qs; i++) | 355 | for (i = 0; i < qdio_init->no_input_qs; i++) |
| 356 | __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); | 356 | __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); |
| @@ -382,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
| 382 | struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; | 382 | struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; |
| 383 | int rc; | 383 | int rc; |
| 384 | 384 | ||
| 385 | memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); | 385 | memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); |
| 386 | memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); | ||
| 387 | memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); | ||
| 388 | memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); | ||
| 389 | memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); | ||
| 390 | |||
| 391 | irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; | ||
| 392 | irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; | ||
| 393 | |||
| 386 | /* wipes qib.ac, required by ar7063 */ | 394 | /* wipes qib.ac, required by ar7063 */ |
| 387 | memset(irq_ptr->qdr, 0, sizeof(struct qdr)); | 395 | memset(irq_ptr->qdr, 0, sizeof(struct qdr)); |
| 388 | 396 | ||
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 091d904d3182..9942c1031b25 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
| @@ -198,8 +198,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | |||
| 198 | .code = 0x0021, | 198 | .code = 0x0021, |
| 199 | }; | 199 | }; |
| 200 | scssc_area->operation_code = 0; | 200 | scssc_area->operation_code = 0; |
| 201 | scssc_area->ks = PAGE_DEFAULT_KEY; | 201 | scssc_area->ks = PAGE_DEFAULT_KEY >> 4; |
| 202 | scssc_area->kc = PAGE_DEFAULT_KEY; | 202 | scssc_area->kc = PAGE_DEFAULT_KEY >> 4; |
| 203 | scssc_area->isc = QDIO_AIRQ_ISC; | 203 | scssc_area->isc = QDIO_AIRQ_ISC; |
| 204 | scssc_area->schid = irq_ptr->schid; | 204 | scssc_area->schid = irq_ptr->schid; |
| 205 | 205 | ||
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index c68be24e27d9..ba50fe02e572 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/miscdevice.h> | 33 | #include <linux/miscdevice.h> |
| 34 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
| 35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
| 36 | #include <linux/seq_file.h> | ||
| 36 | #include <linux/compat.h> | 37 | #include <linux/compat.h> |
| 37 | #include <linux/smp_lock.h> | 38 | #include <linux/smp_lock.h> |
| 38 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
| @@ -912,126 +913,105 @@ static struct miscdevice zcrypt_misc_device = { | |||
| 912 | */ | 913 | */ |
| 913 | static struct proc_dir_entry *zcrypt_entry; | 914 | static struct proc_dir_entry *zcrypt_entry; |
| 914 | 915 | ||
| 915 | static int sprintcl(unsigned char *outaddr, unsigned char *addr, | 916 | static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) |
| 916 | unsigned int len) | ||
| 917 | { | 917 | { |
| 918 | int hl, i; | 918 | int i; |
| 919 | 919 | ||
| 920 | hl = 0; | ||
| 921 | for (i = 0; i < len; i++) | 920 | for (i = 0; i < len; i++) |
| 922 | hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); | 921 | seq_printf(m, "%01x", (unsigned int) addr[i]); |
| 923 | hl += sprintf(outaddr+hl, " "); | 922 | seq_putc(m, ' '); |
| 924 | return hl; | ||
| 925 | } | 923 | } |
| 926 | 924 | ||
| 927 | static int sprintrw(unsigned char *outaddr, unsigned char *addr, | 925 | static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) |
| 928 | unsigned int len) | ||
| 929 | { | 926 | { |
| 930 | int hl, inl, c, cx; | 927 | int inl, c, cx; |
| 931 | 928 | ||
| 932 | hl = sprintf(outaddr, " "); | 929 | seq_printf(m, " "); |
| 933 | inl = 0; | 930 | inl = 0; |
| 934 | for (c = 0; c < (len / 16); c++) { | 931 | for (c = 0; c < (len / 16); c++) { |
| 935 | hl += sprintcl(outaddr+hl, addr+inl, 16); | 932 | sprintcl(m, addr+inl, 16); |
| 936 | inl += 16; | 933 | inl += 16; |
| 937 | } | 934 | } |
| 938 | cx = len%16; | 935 | cx = len%16; |
| 939 | if (cx) { | 936 | if (cx) { |
| 940 | hl += sprintcl(outaddr+hl, addr+inl, cx); | 937 | sprintcl(m, addr+inl, cx); |
| 941 | inl += cx; | 938 | inl += cx; |
| 942 | } | 939 | } |
| 943 | hl += sprintf(outaddr+hl, "\n"); | 940 | seq_putc(m, '\n'); |
| 944 | return hl; | ||
| 945 | } | 941 | } |
| 946 | 942 | ||
| 947 | static int sprinthx(unsigned char *title, unsigned char *outaddr, | 943 | static void sprinthx(unsigned char *title, struct seq_file *m, |
| 948 | unsigned char *addr, unsigned int len) | 944 | unsigned char *addr, unsigned int len) |
| 949 | { | 945 | { |
| 950 | int hl, inl, r, rx; | 946 | int inl, r, rx; |
| 951 | 947 | ||
| 952 | hl = sprintf(outaddr, "\n%s\n", title); | 948 | seq_printf(m, "\n%s\n", title); |
| 953 | inl = 0; | 949 | inl = 0; |
| 954 | for (r = 0; r < (len / 64); r++) { | 950 | for (r = 0; r < (len / 64); r++) { |
| 955 | hl += sprintrw(outaddr+hl, addr+inl, 64); | 951 | sprintrw(m, addr+inl, 64); |
| 956 | inl += 64; | 952 | inl += 64; |
| 957 | } | 953 | } |
| 958 | rx = len % 64; | 954 | rx = len % 64; |
| 959 | if (rx) { | 955 | if (rx) { |
| 960 | hl += sprintrw(outaddr+hl, addr+inl, rx); | 956 | sprintrw(m, addr+inl, rx); |
| 961 | inl += rx; | 957 | inl += rx; |
| 962 | } | 958 | } |
| 963 | hl += sprintf(outaddr+hl, "\n"); | 959 | seq_putc(m, '\n'); |
| 964 | return hl; | ||
| 965 | } | 960 | } |
| 966 | 961 | ||
| 967 | static int sprinthx4(unsigned char *title, unsigned char *outaddr, | 962 | static void sprinthx4(unsigned char *title, struct seq_file *m, |
| 968 | unsigned int *array, unsigned int len) | 963 | unsigned int *array, unsigned int len) |
| 969 | { | 964 | { |
| 970 | int hl, r; | 965 | int r; |
| 971 | 966 | ||
| 972 | hl = sprintf(outaddr, "\n%s\n", title); | 967 | seq_printf(m, "\n%s\n", title); |
| 973 | for (r = 0; r < len; r++) { | 968 | for (r = 0; r < len; r++) { |
| 974 | if ((r % 8) == 0) | 969 | if ((r % 8) == 0) |
| 975 | hl += sprintf(outaddr+hl, " "); | 970 | seq_printf(m, " "); |
| 976 | hl += sprintf(outaddr+hl, "%08X ", array[r]); | 971 | seq_printf(m, "%08X ", array[r]); |
| 977 | if ((r % 8) == 7) | 972 | if ((r % 8) == 7) |
| 978 | hl += sprintf(outaddr+hl, "\n"); | 973 | seq_putc(m, '\n'); |
| 979 | } | 974 | } |
| 980 | hl += sprintf(outaddr+hl, "\n"); | 975 | seq_putc(m, '\n'); |
| 981 | return hl; | ||
| 982 | } | 976 | } |
| 983 | 977 | ||
| 984 | static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, | 978 | static int zcrypt_proc_show(struct seq_file *m, void *v) |
| 985 | int count, int *eof, void *data) | ||
| 986 | { | 979 | { |
| 987 | unsigned char *workarea; | 980 | char workarea[sizeof(int) * AP_DEVICES]; |
| 988 | int len; | 981 | |
| 989 | 982 | seq_printf(m, "\nzcrypt version: %d.%d.%d\n", | |
| 990 | len = 0; | 983 | ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); |
| 991 | 984 | seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); | |
| 992 | /* resp_buff is a page. Use the right half for a work area */ | 985 | seq_printf(m, "Total device count: %d\n", zcrypt_device_count); |
| 993 | workarea = resp_buff + 2000; | 986 | seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); |
| 994 | len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", | 987 | seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); |
| 995 | ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); | 988 | seq_printf(m, "PCIXCC MCL2 count: %d\n", |
| 996 | len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", | 989 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); |
| 997 | ap_domain_index); | 990 | seq_printf(m, "PCIXCC MCL3 count: %d\n", |
| 998 | len += sprintf(resp_buff + len, "Total device count: %d\n", | 991 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); |
| 999 | zcrypt_device_count); | 992 | seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); |
| 1000 | len += sprintf(resp_buff + len, "PCICA count: %d\n", | 993 | seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); |
| 1001 | zcrypt_count_type(ZCRYPT_PCICA)); | 994 | seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); |
| 1002 | len += sprintf(resp_buff + len, "PCICC count: %d\n", | 995 | seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); |
| 1003 | zcrypt_count_type(ZCRYPT_PCICC)); | 996 | seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); |
| 1004 | len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", | 997 | seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); |
| 1005 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); | 998 | seq_printf(m, "Total open handles: %d\n\n", |
| 1006 | len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", | 999 | atomic_read(&zcrypt_open_count)); |
| 1007 | zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); | ||
| 1008 | len += sprintf(resp_buff + len, "CEX2C count: %d\n", | ||
| 1009 | zcrypt_count_type(ZCRYPT_CEX2C)); | ||
| 1010 | len += sprintf(resp_buff + len, "CEX2A count: %d\n", | ||
| 1011 | zcrypt_count_type(ZCRYPT_CEX2A)); | ||
| 1012 | len += sprintf(resp_buff + len, "CEX3C count: %d\n", | ||
| 1013 | zcrypt_count_type(ZCRYPT_CEX3C)); | ||
| 1014 | len += sprintf(resp_buff + len, "CEX3A count: %d\n", | ||
| 1015 | zcrypt_count_type(ZCRYPT_CEX3A)); | ||
| 1016 | len += sprintf(resp_buff + len, "requestq count: %d\n", | ||
| 1017 | zcrypt_requestq_count()); | ||
| 1018 | len += sprintf(resp_buff + len, "pendingq count: %d\n", | ||
| 1019 | zcrypt_pendingq_count()); | ||
| 1020 | len += sprintf(resp_buff + len, "Total open handles: %d\n\n", | ||
| 1021 | atomic_read(&zcrypt_open_count)); | ||
| 1022 | zcrypt_status_mask(workarea); | 1000 | zcrypt_status_mask(workarea); |
| 1023 | len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " | 1001 | sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " |
| 1024 | "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", | 1002 | "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", |
| 1025 | resp_buff+len, workarea, AP_DEVICES); | 1003 | m, workarea, AP_DEVICES); |
| 1026 | zcrypt_qdepth_mask(workarea); | 1004 | zcrypt_qdepth_mask(workarea); |
| 1027 | len += sprinthx("Waiting work element counts", | 1005 | sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); |
| 1028 | resp_buff+len, workarea, AP_DEVICES); | ||
| 1029 | zcrypt_perdev_reqcnt((int *) workarea); | 1006 | zcrypt_perdev_reqcnt((int *) workarea); |
| 1030 | len += sprinthx4("Per-device successfully completed request counts", | 1007 | sprinthx4("Per-device successfully completed request counts", |
| 1031 | resp_buff+len,(unsigned int *) workarea, AP_DEVICES); | 1008 | m, (unsigned int *) workarea, AP_DEVICES); |
| 1032 | *eof = 1; | 1009 | return 0; |
| 1033 | memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); | 1010 | } |
| 1034 | return len; | 1011 | |
| 1012 | static int zcrypt_proc_open(struct inode *inode, struct file *file) | ||
| 1013 | { | ||
| 1014 | return single_open(file, zcrypt_proc_show, NULL); | ||
| 1035 | } | 1015 | } |
| 1036 | 1016 | ||
| 1037 | static void zcrypt_disable_card(int index) | 1017 | static void zcrypt_disable_card(int index) |
| @@ -1061,11 +1041,11 @@ static void zcrypt_enable_card(int index) | |||
| 1061 | spin_unlock_bh(&zcrypt_device_lock); | 1041 | spin_unlock_bh(&zcrypt_device_lock); |
| 1062 | } | 1042 | } |
| 1063 | 1043 | ||
| 1064 | static int zcrypt_status_write(struct file *file, const char __user *buffer, | 1044 | static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, |
| 1065 | unsigned long count, void *data) | 1045 | size_t count, loff_t *pos) |
| 1066 | { | 1046 | { |
| 1067 | unsigned char *lbuf, *ptr; | 1047 | unsigned char *lbuf, *ptr; |
| 1068 | unsigned long local_count; | 1048 | size_t local_count; |
| 1069 | int j; | 1049 | int j; |
| 1070 | 1050 | ||
| 1071 | if (count <= 0) | 1051 | if (count <= 0) |
| @@ -1115,6 +1095,15 @@ out: | |||
| 1115 | return count; | 1095 | return count; |
| 1116 | } | 1096 | } |
| 1117 | 1097 | ||
| 1098 | static const struct file_operations zcrypt_proc_fops = { | ||
| 1099 | .owner = THIS_MODULE, | ||
| 1100 | .open = zcrypt_proc_open, | ||
| 1101 | .read = seq_read, | ||
| 1102 | .llseek = seq_lseek, | ||
| 1103 | .release = single_release, | ||
| 1104 | .write = zcrypt_proc_write, | ||
| 1105 | }; | ||
| 1106 | |||
| 1118 | static int zcrypt_rng_device_count; | 1107 | static int zcrypt_rng_device_count; |
| 1119 | static u32 *zcrypt_rng_buffer; | 1108 | static u32 *zcrypt_rng_buffer; |
| 1120 | static int zcrypt_rng_buffer_index; | 1109 | static int zcrypt_rng_buffer_index; |
| @@ -1197,14 +1186,11 @@ int __init zcrypt_api_init(void) | |||
| 1197 | goto out; | 1186 | goto out; |
| 1198 | 1187 | ||
| 1199 | /* Set up the proc file system */ | 1188 | /* Set up the proc file system */ |
| 1200 | zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); | 1189 | zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); |
| 1201 | if (!zcrypt_entry) { | 1190 | if (!zcrypt_entry) { |
| 1202 | rc = -ENOMEM; | 1191 | rc = -ENOMEM; |
| 1203 | goto out_misc; | 1192 | goto out_misc; |
| 1204 | } | 1193 | } |
| 1205 | zcrypt_entry->data = NULL; | ||
| 1206 | zcrypt_entry->read_proc = zcrypt_status_read; | ||
| 1207 | zcrypt_entry->write_proc = zcrypt_status_write; | ||
| 1208 | 1194 | ||
| 1209 | return 0; | 1195 | return 0; |
| 1210 | 1196 | ||
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 2930fc763ac5..b2fc4fd63f7f 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
| @@ -340,11 +340,11 @@ static void kvm_extint_handler(u16 code) | |||
| 340 | return; | 340 | return; |
| 341 | 341 | ||
| 342 | /* The LSB might be overloaded, we have to mask it */ | 342 | /* The LSB might be overloaded, we have to mask it */ |
| 343 | vq = (struct virtqueue *) ((*(long *) __LC_PFAULT_INTPARM) & ~1UL); | 343 | vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); |
| 344 | 344 | ||
| 345 | /* We use the LSB of extparam, to decide, if this interrupt is a config | 345 | /* We use the LSB of extparam, to decide, if this interrupt is a config |
| 346 | * change or a "standard" interrupt */ | 346 | * change or a "standard" interrupt */ |
| 347 | config_changed = (*(int *) __LC_EXT_PARAMS & 1); | 347 | config_changed = S390_lowcore.ext_params & 1; |
| 348 | 348 | ||
| 349 | if (config_changed) { | 349 | if (config_changed) { |
| 350 | struct virtio_driver *drv; | 350 | struct virtio_driver *drv; |
