diff options
133 files changed, 6863 insertions, 3083 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-css b/Documentation/ABI/testing/sysfs-bus-css new file mode 100644 index 000000000000..b585ec258a08 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-css | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | What: /sys/bus/css/devices/.../type | ||
| 2 | Date: March 2008 | ||
| 3 | Contact: Cornelia Huck <cornelia.huck@de.ibm.com> | ||
| 4 | linux-s390@vger.kernel.org | ||
| 5 | Description: Contains the subchannel type, as reported by the hardware. | ||
| 6 | This attribute is present for all subchannel types. | ||
| 7 | |||
| 8 | What: /sys/bus/css/devices/.../modalias | ||
| 9 | Date: March 2008 | ||
| 10 | Contact: Cornelia Huck <cornelia.huck@de.ibm.com> | ||
| 11 | linux-s390@vger.kernel.org | ||
| 12 | Description: Contains the module alias as reported with uevents. | ||
| 13 | It is of the format css:t<type> and present for all | ||
| 14 | subchannel types. | ||
| 15 | |||
| 16 | What: /sys/bus/css/drivers/io_subchannel/.../chpids | ||
| 17 | Date: December 2002 | ||
| 18 | Contact: Cornelia Huck <cornelia.huck@de.ibm.com> | ||
| 19 | linux-s390@vger.kernel.org | ||
| 20 | Description: Contains the ids of the channel paths used by this | ||
| 21 | subchannel, as reported by the channel subsystem | ||
| 22 | during subchannel recognition. | ||
| 23 | Note: This is an I/O-subchannel specific attribute. | ||
| 24 | Users: s390-tools, HAL | ||
| 25 | |||
| 26 | What: /sys/bus/css/drivers/io_subchannel/.../pimpampom | ||
| 27 | Date: December 2002 | ||
| 28 | Contact: Cornelia Huck <cornelia.huck@de.ibm.com> | ||
| 29 | linux-s390@vger.kernel.org | ||
| 30 | Description: Contains the PIM/PAM/POM values, as reported by the | ||
| 31 | channel subsystem when last queried by the common I/O | ||
| 32 | layer (this implies that this attribute is not neccessarily | ||
| 33 | in sync with the values current in the channel subsystem). | ||
| 34 | Note: This is an I/O-subchannel specific attribute. | ||
| 35 | Users: s390-tools, HAL | ||
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt index 240ce7a56c40..3bb5f466a90d 100644 --- a/Documentation/ioctl-number.txt +++ b/Documentation/ioctl-number.txt | |||
| @@ -117,6 +117,7 @@ Code Seq# Include File Comments | |||
| 117 | <mailto:natalia@nikhefk.nikhef.nl> | 117 | <mailto:natalia@nikhefk.nikhef.nl> |
| 118 | 'c' 00-7F linux/comstats.h conflict! | 118 | 'c' 00-7F linux/comstats.h conflict! |
| 119 | 'c' 00-7F linux/coda.h conflict! | 119 | 'c' 00-7F linux/coda.h conflict! |
| 120 | 'c' 80-9F asm-s390/chsc.h | ||
| 120 | 'd' 00-FF linux/char/drm/drm/h conflict! | 121 | 'd' 00-FF linux/char/drm/drm/h conflict! |
| 121 | 'd' 00-DF linux/video_decoder.h conflict! | 122 | 'd' 00-DF linux/video_decoder.h conflict! |
| 122 | 'd' F0-FF linux/digi1.h | 123 | 'd' F0-FF linux/digi1.h |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 107e492cb47e..5dc8f8028d52 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -146,6 +146,7 @@ config MATHEMU | |||
| 146 | config COMPAT | 146 | config COMPAT |
| 147 | bool "Kernel support for 31 bit emulation" | 147 | bool "Kernel support for 31 bit emulation" |
| 148 | depends on 64BIT | 148 | depends on 64BIT |
| 149 | select COMPAT_BINFMT_ELF | ||
| 149 | help | 150 | help |
| 150 | Select this option if you want to enable your system kernel to | 151 | Select this option if you want to enable your system kernel to |
| 151 | handle system-calls from ELF binaries for 31 bit ESA. This option | 152 | handle system-calls from ELF binaries for 31 bit ESA. This option |
| @@ -312,6 +313,10 @@ config ARCH_SPARSEMEM_DEFAULT | |||
| 312 | config ARCH_SELECT_MEMORY_MODEL | 313 | config ARCH_SELECT_MEMORY_MODEL |
| 313 | def_bool y | 314 | def_bool y |
| 314 | 315 | ||
| 316 | config ARCH_ENABLE_MEMORY_HOTPLUG | ||
| 317 | def_bool y | ||
| 318 | depends on SPARSEMEM | ||
| 319 | |||
| 315 | source "mm/Kconfig" | 320 | source "mm/Kconfig" |
| 316 | 321 | ||
| 317 | comment "I/O subsystem configuration" | 322 | comment "I/O subsystem configuration" |
| @@ -344,6 +349,22 @@ config QDIO_DEBUG | |||
| 344 | 349 | ||
| 345 | If unsure, say N. | 350 | If unsure, say N. |
| 346 | 351 | ||
| 352 | config CHSC_SCH | ||
| 353 | tristate "Support for CHSC subchannels" | ||
| 354 | help | ||
| 355 | This driver allows usage of CHSC subchannels. A CHSC subchannel | ||
| 356 | is usually present on LPAR only. | ||
| 357 | The driver creates a device /dev/chsc, which may be used to | ||
| 358 | obtain I/O configuration information about the machine and | ||
| 359 | to issue asynchronous chsc commands (DANGEROUS). | ||
| 360 | You will usually only want to use this interface on a special | ||
| 361 | LPAR designated for system management. | ||
| 362 | |||
| 363 | To compile this driver as a module, choose M here: the | ||
| 364 | module will be called chsc_sch. | ||
| 365 | |||
| 366 | If unsure, say N. | ||
| 367 | |||
| 347 | comment "Misc" | 368 | comment "Misc" |
| 348 | 369 | ||
| 349 | config IPL | 370 | config IPL |
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h index db3ae8505103..17a2636fec0a 100644 --- a/arch/s390/appldata/appldata.h +++ b/arch/s390/appldata/appldata.h | |||
| @@ -3,13 +3,11 @@ | |||
| 3 | * | 3 | * |
| 4 | * Definitions and interface for Linux - z/VM Monitor Stream. | 4 | * Definitions and interface for Linux - z/VM Monitor Stream. |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. | 6 | * Copyright IBM Corp. 2003, 2008 |
| 7 | * | 7 | * |
| 8 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 8 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | //#define APPLDATA_DEBUG /* Debug messages on/off */ | ||
| 12 | |||
| 13 | #define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */ | 11 | #define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */ |
| 14 | /* data buffer */ | 12 | /* data buffer */ |
| 15 | #define APPLDATA_MAX_PROCS 100 | 13 | #define APPLDATA_MAX_PROCS 100 |
| @@ -32,12 +30,6 @@ | |||
| 32 | #define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) | 30 | #define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) |
| 33 | #define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) | 31 | #define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) |
| 34 | 32 | ||
| 35 | #ifdef APPLDATA_DEBUG | ||
| 36 | #define P_DEBUG(x...) printk(KERN_DEBUG MY_PRINT_NAME " debug: " x) | ||
| 37 | #else | ||
| 38 | #define P_DEBUG(x...) do {} while (0) | ||
| 39 | #endif | ||
| 40 | |||
| 41 | struct appldata_ops { | 33 | struct appldata_ops { |
| 42 | struct list_head list; | 34 | struct list_head list; |
| 43 | struct ctl_table_header *sysctl_header; | 35 | struct ctl_table_header *sysctl_header; |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index ad40729bec3d..9cb3d92447a3 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * Exports appldata_register_ops() and appldata_unregister_ops() for the | 5 | * Exports appldata_register_ops() and appldata_unregister_ops() for the |
| 6 | * data gathering modules. | 6 | * data gathering modules. |
| 7 | * | 7 | * |
| 8 | * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. | 8 | * Copyright IBM Corp. 2003, 2008 |
| 9 | * | 9 | * |
| 10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
| 11 | */ | 11 | */ |
| @@ -108,9 +108,6 @@ static LIST_HEAD(appldata_ops_list); | |||
| 108 | */ | 108 | */ |
| 109 | static void appldata_timer_function(unsigned long data) | 109 | static void appldata_timer_function(unsigned long data) |
| 110 | { | 110 | { |
| 111 | P_DEBUG(" -= Timer =-\n"); | ||
| 112 | P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(), | ||
| 113 | atomic_read(&appldata_expire_count)); | ||
| 114 | if (atomic_dec_and_test(&appldata_expire_count)) { | 111 | if (atomic_dec_and_test(&appldata_expire_count)) { |
| 115 | atomic_set(&appldata_expire_count, num_online_cpus()); | 112 | atomic_set(&appldata_expire_count, num_online_cpus()); |
| 116 | queue_work(appldata_wq, (struct work_struct *) data); | 113 | queue_work(appldata_wq, (struct work_struct *) data); |
| @@ -128,14 +125,11 @@ static void appldata_work_fn(struct work_struct *work) | |||
| 128 | struct appldata_ops *ops; | 125 | struct appldata_ops *ops; |
| 129 | int i; | 126 | int i; |
| 130 | 127 | ||
| 131 | P_DEBUG(" -= Work Queue =-\n"); | ||
| 132 | i = 0; | 128 | i = 0; |
| 133 | get_online_cpus(); | 129 | get_online_cpus(); |
| 134 | spin_lock(&appldata_ops_lock); | 130 | spin_lock(&appldata_ops_lock); |
| 135 | list_for_each(lh, &appldata_ops_list) { | 131 | list_for_each(lh, &appldata_ops_list) { |
| 136 | ops = list_entry(lh, struct appldata_ops, list); | 132 | ops = list_entry(lh, struct appldata_ops, list); |
| 137 | P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n", | ||
| 138 | ++i, ops->active, ops->name); | ||
| 139 | if (ops->active == 1) { | 133 | if (ops->active == 1) { |
| 140 | ops->callback(ops->data); | 134 | ops->callback(ops->data); |
| 141 | } | 135 | } |
| @@ -212,7 +206,6 @@ __appldata_vtimer_setup(int cmd) | |||
| 212 | 0, 1); | 206 | 0, 1); |
| 213 | } | 207 | } |
| 214 | appldata_timer_active = 1; | 208 | appldata_timer_active = 1; |
| 215 | P_INFO("Monitoring timer started.\n"); | ||
| 216 | break; | 209 | break; |
| 217 | case APPLDATA_DEL_TIMER: | 210 | case APPLDATA_DEL_TIMER: |
| 218 | for_each_online_cpu(i) | 211 | for_each_online_cpu(i) |
| @@ -221,7 +214,6 @@ __appldata_vtimer_setup(int cmd) | |||
| 221 | break; | 214 | break; |
| 222 | appldata_timer_active = 0; | 215 | appldata_timer_active = 0; |
| 223 | atomic_set(&appldata_expire_count, num_online_cpus()); | 216 | atomic_set(&appldata_expire_count, num_online_cpus()); |
| 224 | P_INFO("Monitoring timer stopped.\n"); | ||
| 225 | break; | 217 | break; |
| 226 | case APPLDATA_MOD_TIMER: | 218 | case APPLDATA_MOD_TIMER: |
| 227 | per_cpu_interval = (u64) (appldata_interval*1000 / | 219 | per_cpu_interval = (u64) (appldata_interval*1000 / |
| @@ -313,10 +305,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, | |||
| 313 | } | 305 | } |
| 314 | interval = 0; | 306 | interval = 0; |
| 315 | sscanf(buf, "%i", &interval); | 307 | sscanf(buf, "%i", &interval); |
| 316 | if (interval <= 0) { | 308 | if (interval <= 0) |
| 317 | P_ERROR("Timer CPU interval has to be > 0!\n"); | ||
| 318 | return -EINVAL; | 309 | return -EINVAL; |
| 319 | } | ||
| 320 | 310 | ||
| 321 | get_online_cpus(); | 311 | get_online_cpus(); |
| 322 | spin_lock(&appldata_timer_lock); | 312 | spin_lock(&appldata_timer_lock); |
| @@ -324,9 +314,6 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, | |||
| 324 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); | 314 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); |
| 325 | spin_unlock(&appldata_timer_lock); | 315 | spin_unlock(&appldata_timer_lock); |
| 326 | put_online_cpus(); | 316 | put_online_cpus(); |
| 327 | |||
| 328 | P_INFO("Monitoring CPU interval set to %u milliseconds.\n", | ||
| 329 | interval); | ||
| 330 | out: | 317 | out: |
| 331 | *lenp = len; | 318 | *lenp = len; |
| 332 | *ppos += len; | 319 | *ppos += len; |
| @@ -406,23 +393,16 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
| 406 | P_ERROR("START DIAG 0xDC for %s failed, " | 393 | P_ERROR("START DIAG 0xDC for %s failed, " |
| 407 | "return code: %d\n", ops->name, rc); | 394 | "return code: %d\n", ops->name, rc); |
| 408 | module_put(ops->owner); | 395 | module_put(ops->owner); |
| 409 | } else { | 396 | } else |
| 410 | P_INFO("Monitoring %s data enabled, " | ||
| 411 | "DIAG 0xDC started.\n", ops->name); | ||
| 412 | ops->active = 1; | 397 | ops->active = 1; |
| 413 | } | ||
| 414 | } else if ((buf[0] == '0') && (ops->active == 1)) { | 398 | } else if ((buf[0] == '0') && (ops->active == 1)) { |
| 415 | ops->active = 0; | 399 | ops->active = 0; |
| 416 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, | 400 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, |
| 417 | (unsigned long) ops->data, ops->size, | 401 | (unsigned long) ops->data, ops->size, |
| 418 | ops->mod_lvl); | 402 | ops->mod_lvl); |
| 419 | if (rc != 0) { | 403 | if (rc != 0) |
| 420 | P_ERROR("STOP DIAG 0xDC for %s failed, " | 404 | P_ERROR("STOP DIAG 0xDC for %s failed, " |
| 421 | "return code: %d\n", ops->name, rc); | 405 | "return code: %d\n", ops->name, rc); |
| 422 | } else { | ||
| 423 | P_INFO("Monitoring %s data disabled, " | ||
| 424 | "DIAG 0xDC stopped.\n", ops->name); | ||
| 425 | } | ||
| 426 | module_put(ops->owner); | 406 | module_put(ops->owner); |
| 427 | } | 407 | } |
| 428 | spin_unlock(&appldata_ops_lock); | 408 | spin_unlock(&appldata_ops_lock); |
| @@ -468,7 +448,6 @@ int appldata_register_ops(struct appldata_ops *ops) | |||
| 468 | ops->sysctl_header = register_sysctl_table(ops->ctl_table); | 448 | ops->sysctl_header = register_sysctl_table(ops->ctl_table); |
| 469 | if (!ops->sysctl_header) | 449 | if (!ops->sysctl_header) |
| 470 | goto out; | 450 | goto out; |
| 471 | P_INFO("%s-ops registered!\n", ops->name); | ||
| 472 | return 0; | 451 | return 0; |
| 473 | out: | 452 | out: |
| 474 | spin_lock(&appldata_ops_lock); | 453 | spin_lock(&appldata_ops_lock); |
| @@ -490,7 +469,6 @@ void appldata_unregister_ops(struct appldata_ops *ops) | |||
| 490 | spin_unlock(&appldata_ops_lock); | 469 | spin_unlock(&appldata_ops_lock); |
| 491 | unregister_sysctl_table(ops->sysctl_header); | 470 | unregister_sysctl_table(ops->sysctl_header); |
| 492 | kfree(ops->ctl_table); | 471 | kfree(ops->ctl_table); |
| 493 | P_INFO("%s-ops unregistered!\n", ops->name); | ||
| 494 | } | 472 | } |
| 495 | /********************** module-ops management <END> **************************/ | 473 | /********************** module-ops management <END> **************************/ |
| 496 | 474 | ||
| @@ -553,14 +531,9 @@ static int __init appldata_init(void) | |||
| 553 | { | 531 | { |
| 554 | int i; | 532 | int i; |
| 555 | 533 | ||
| 556 | P_DEBUG("sizeof(parameter_list) = %lu\n", | ||
| 557 | sizeof(struct appldata_parameter_list)); | ||
| 558 | |||
| 559 | appldata_wq = create_singlethread_workqueue("appldata"); | 534 | appldata_wq = create_singlethread_workqueue("appldata"); |
| 560 | if (!appldata_wq) { | 535 | if (!appldata_wq) |
| 561 | P_ERROR("Could not create work queue\n"); | ||
| 562 | return -ENOMEM; | 536 | return -ENOMEM; |
| 563 | } | ||
| 564 | 537 | ||
| 565 | get_online_cpus(); | 538 | get_online_cpus(); |
| 566 | for_each_online_cpu(i) | 539 | for_each_online_cpu(i) |
| @@ -571,8 +544,6 @@ static int __init appldata_init(void) | |||
| 571 | register_hotcpu_notifier(&appldata_nb); | 544 | register_hotcpu_notifier(&appldata_nb); |
| 572 | 545 | ||
| 573 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table); | 546 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table); |
| 574 | |||
| 575 | P_DEBUG("Base interface initialized.\n"); | ||
| 576 | return 0; | 547 | return 0; |
| 577 | } | 548 | } |
| 578 | 549 | ||
| @@ -584,7 +555,9 @@ EXPORT_SYMBOL_GPL(appldata_register_ops); | |||
| 584 | EXPORT_SYMBOL_GPL(appldata_unregister_ops); | 555 | EXPORT_SYMBOL_GPL(appldata_unregister_ops); |
| 585 | EXPORT_SYMBOL_GPL(appldata_diag); | 556 | EXPORT_SYMBOL_GPL(appldata_diag); |
| 586 | 557 | ||
| 558 | #ifdef CONFIG_SWAP | ||
| 587 | EXPORT_SYMBOL_GPL(si_swapinfo); | 559 | EXPORT_SYMBOL_GPL(si_swapinfo); |
| 560 | #endif | ||
| 588 | EXPORT_SYMBOL_GPL(nr_threads); | 561 | EXPORT_SYMBOL_GPL(nr_threads); |
| 589 | EXPORT_SYMBOL_GPL(nr_running); | 562 | EXPORT_SYMBOL_GPL(nr_running); |
| 590 | EXPORT_SYMBOL_GPL(nr_iowait); | 563 | EXPORT_SYMBOL_GPL(nr_iowait); |
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 51181ccdb87b..3ed56b7d1b2f 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c | |||
| @@ -14,14 +14,13 @@ | |||
| 14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
| 15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
| 16 | #include <linux/kernel_stat.h> | 16 | #include <linux/kernel_stat.h> |
| 17 | #include <asm/io.h> | ||
| 18 | #include <linux/pagemap.h> | 17 | #include <linux/pagemap.h> |
| 19 | #include <linux/swap.h> | 18 | #include <linux/swap.h> |
| 19 | #include <asm/io.h> | ||
| 20 | 20 | ||
| 21 | #include "appldata.h" | 21 | #include "appldata.h" |
| 22 | 22 | ||
| 23 | 23 | ||
| 24 | #define MY_PRINT_NAME "appldata_mem" /* for debug messages, etc. */ | ||
| 25 | #define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */ | 24 | #define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */ |
| 26 | 25 | ||
| 27 | /* | 26 | /* |
| @@ -70,30 +69,6 @@ static struct appldata_mem_data { | |||
| 70 | } __attribute__((packed)) appldata_mem_data; | 69 | } __attribute__((packed)) appldata_mem_data; |
| 71 | 70 | ||
| 72 | 71 | ||
| 73 | static inline void appldata_debug_print(struct appldata_mem_data *mem_data) | ||
| 74 | { | ||
| 75 | P_DEBUG("--- MEM - RECORD ---\n"); | ||
| 76 | P_DEBUG("pgpgin = %8lu KB\n", mem_data->pgpgin); | ||
| 77 | P_DEBUG("pgpgout = %8lu KB\n", mem_data->pgpgout); | ||
| 78 | P_DEBUG("pswpin = %8lu Pages\n", mem_data->pswpin); | ||
| 79 | P_DEBUG("pswpout = %8lu Pages\n", mem_data->pswpout); | ||
| 80 | P_DEBUG("pgalloc = %8lu \n", mem_data->pgalloc); | ||
| 81 | P_DEBUG("pgfault = %8lu \n", mem_data->pgfault); | ||
| 82 | P_DEBUG("pgmajfault = %8lu \n", mem_data->pgmajfault); | ||
| 83 | P_DEBUG("sharedram = %8lu KB\n", mem_data->sharedram); | ||
| 84 | P_DEBUG("totalram = %8lu KB\n", mem_data->totalram); | ||
| 85 | P_DEBUG("freeram = %8lu KB\n", mem_data->freeram); | ||
| 86 | P_DEBUG("totalhigh = %8lu KB\n", mem_data->totalhigh); | ||
| 87 | P_DEBUG("freehigh = %8lu KB\n", mem_data->freehigh); | ||
| 88 | P_DEBUG("bufferram = %8lu KB\n", mem_data->bufferram); | ||
| 89 | P_DEBUG("cached = %8lu KB\n", mem_data->cached); | ||
| 90 | P_DEBUG("totalswap = %8lu KB\n", mem_data->totalswap); | ||
| 91 | P_DEBUG("freeswap = %8lu KB\n", mem_data->freeswap); | ||
| 92 | P_DEBUG("sync_count_1 = %u\n", mem_data->sync_count_1); | ||
| 93 | P_DEBUG("sync_count_2 = %u\n", mem_data->sync_count_2); | ||
| 94 | P_DEBUG("timestamp = %lX\n", mem_data->timestamp); | ||
| 95 | } | ||
| 96 | |||
| 97 | /* | 72 | /* |
| 98 | * appldata_get_mem_data() | 73 | * appldata_get_mem_data() |
| 99 | * | 74 | * |
| @@ -140,9 +115,6 @@ static void appldata_get_mem_data(void *data) | |||
| 140 | 115 | ||
| 141 | mem_data->timestamp = get_clock(); | 116 | mem_data->timestamp = get_clock(); |
| 142 | mem_data->sync_count_2++; | 117 | mem_data->sync_count_2++; |
| 143 | #ifdef APPLDATA_DEBUG | ||
| 144 | appldata_debug_print(mem_data); | ||
| 145 | #endif | ||
| 146 | } | 118 | } |
| 147 | 119 | ||
| 148 | 120 | ||
| @@ -164,17 +136,7 @@ static struct appldata_ops ops = { | |||
| 164 | */ | 136 | */ |
| 165 | static int __init appldata_mem_init(void) | 137 | static int __init appldata_mem_init(void) |
| 166 | { | 138 | { |
| 167 | int rc; | 139 | return appldata_register_ops(&ops); |
| 168 | |||
| 169 | P_DEBUG("sizeof(mem) = %lu\n", sizeof(struct appldata_mem_data)); | ||
| 170 | |||
| 171 | rc = appldata_register_ops(&ops); | ||
| 172 | if (rc != 0) { | ||
| 173 | P_ERROR("Error registering ops, rc = %i\n", rc); | ||
| 174 | } else { | ||
| 175 | P_DEBUG("%s-ops registered!\n", ops.name); | ||
| 176 | } | ||
| 177 | return rc; | ||
| 178 | } | 140 | } |
| 179 | 141 | ||
| 180 | /* | 142 | /* |
| @@ -185,7 +147,6 @@ static int __init appldata_mem_init(void) | |||
| 185 | static void __exit appldata_mem_exit(void) | 147 | static void __exit appldata_mem_exit(void) |
| 186 | { | 148 | { |
| 187 | appldata_unregister_ops(&ops); | 149 | appldata_unregister_ops(&ops); |
| 188 | P_DEBUG("%s-ops unregistered!\n", ops.name); | ||
| 189 | } | 150 | } |
| 190 | 151 | ||
| 191 | 152 | ||
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 4d8344336001..3b746556e1a3 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c | |||
| @@ -21,9 +21,6 @@ | |||
| 21 | #include "appldata.h" | 21 | #include "appldata.h" |
| 22 | 22 | ||
| 23 | 23 | ||
| 24 | #define MY_PRINT_NAME "appldata_net_sum" /* for debug messages, etc. */ | ||
| 25 | |||
| 26 | |||
| 27 | /* | 24 | /* |
| 28 | * Network data | 25 | * Network data |
| 29 | * | 26 | * |
| @@ -60,26 +57,6 @@ static struct appldata_net_sum_data { | |||
| 60 | } __attribute__((packed)) appldata_net_sum_data; | 57 | } __attribute__((packed)) appldata_net_sum_data; |
| 61 | 58 | ||
| 62 | 59 | ||
| 63 | static inline void appldata_print_debug(struct appldata_net_sum_data *net_data) | ||
| 64 | { | ||
| 65 | P_DEBUG("--- NET - RECORD ---\n"); | ||
| 66 | |||
| 67 | P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces); | ||
| 68 | P_DEBUG("rx_packets = %8lu\n", net_data->rx_packets); | ||
| 69 | P_DEBUG("tx_packets = %8lu\n", net_data->tx_packets); | ||
| 70 | P_DEBUG("rx_bytes = %8lu\n", net_data->rx_bytes); | ||
| 71 | P_DEBUG("tx_bytes = %8lu\n", net_data->tx_bytes); | ||
| 72 | P_DEBUG("rx_errors = %8lu\n", net_data->rx_errors); | ||
| 73 | P_DEBUG("tx_errors = %8lu\n", net_data->tx_errors); | ||
| 74 | P_DEBUG("rx_dropped = %8lu\n", net_data->rx_dropped); | ||
| 75 | P_DEBUG("tx_dropped = %8lu\n", net_data->tx_dropped); | ||
| 76 | P_DEBUG("collisions = %8lu\n", net_data->collisions); | ||
| 77 | |||
| 78 | P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1); | ||
| 79 | P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2); | ||
| 80 | P_DEBUG("timestamp = %lX\n", net_data->timestamp); | ||
| 81 | } | ||
| 82 | |||
| 83 | /* | 60 | /* |
| 84 | * appldata_get_net_sum_data() | 61 | * appldata_get_net_sum_data() |
| 85 | * | 62 | * |
| @@ -135,9 +112,6 @@ static void appldata_get_net_sum_data(void *data) | |||
| 135 | 112 | ||
| 136 | net_data->timestamp = get_clock(); | 113 | net_data->timestamp = get_clock(); |
| 137 | net_data->sync_count_2++; | 114 | net_data->sync_count_2++; |
| 138 | #ifdef APPLDATA_DEBUG | ||
| 139 | appldata_print_debug(net_data); | ||
| 140 | #endif | ||
| 141 | } | 115 | } |
| 142 | 116 | ||
| 143 | 117 | ||
| @@ -159,17 +133,7 @@ static struct appldata_ops ops = { | |||
| 159 | */ | 133 | */ |
| 160 | static int __init appldata_net_init(void) | 134 | static int __init appldata_net_init(void) |
| 161 | { | 135 | { |
| 162 | int rc; | 136 | return appldata_register_ops(&ops); |
| 163 | |||
| 164 | P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data)); | ||
| 165 | |||
| 166 | rc = appldata_register_ops(&ops); | ||
| 167 | if (rc != 0) { | ||
| 168 | P_ERROR("Error registering ops, rc = %i\n", rc); | ||
| 169 | } else { | ||
| 170 | P_DEBUG("%s-ops registered!\n", ops.name); | ||
| 171 | } | ||
| 172 | return rc; | ||
| 173 | } | 137 | } |
| 174 | 138 | ||
| 175 | /* | 139 | /* |
| @@ -180,7 +144,6 @@ static int __init appldata_net_init(void) | |||
| 180 | static void __exit appldata_net_exit(void) | 144 | static void __exit appldata_net_exit(void) |
| 181 | { | 145 | { |
| 182 | appldata_unregister_ops(&ops); | 146 | appldata_unregister_ops(&ops); |
| 183 | P_DEBUG("%s-ops unregistered!\n", ops.name); | ||
| 184 | } | 147 | } |
| 185 | 148 | ||
| 186 | 149 | ||
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 6b3eafe10453..eb44f9f8ab91 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c | |||
| @@ -89,44 +89,6 @@ static struct appldata_ops ops = { | |||
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | 91 | ||
| 92 | static inline void appldata_print_debug(struct appldata_os_data *os_data) | ||
| 93 | { | ||
| 94 | int a0, a1, a2, i; | ||
| 95 | |||
| 96 | P_DEBUG("--- OS - RECORD ---\n"); | ||
| 97 | P_DEBUG("nr_threads = %u\n", os_data->nr_threads); | ||
| 98 | P_DEBUG("nr_running = %u\n", os_data->nr_running); | ||
| 99 | P_DEBUG("nr_iowait = %u\n", os_data->nr_iowait); | ||
| 100 | P_DEBUG("avenrun(int) = %8x / %8x / %8x\n", os_data->avenrun[0], | ||
| 101 | os_data->avenrun[1], os_data->avenrun[2]); | ||
| 102 | a0 = os_data->avenrun[0]; | ||
| 103 | a1 = os_data->avenrun[1]; | ||
| 104 | a2 = os_data->avenrun[2]; | ||
| 105 | P_DEBUG("avenrun(float) = %d.%02d / %d.%02d / %d.%02d\n", | ||
| 106 | LOAD_INT(a0), LOAD_FRAC(a0), LOAD_INT(a1), LOAD_FRAC(a1), | ||
| 107 | LOAD_INT(a2), LOAD_FRAC(a2)); | ||
| 108 | |||
| 109 | P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus); | ||
| 110 | for (i = 0; i < os_data->nr_cpus; i++) { | ||
| 111 | P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, " | ||
| 112 | "idle = %u, irq = %u, softirq = %u, iowait = %u, " | ||
| 113 | "steal = %u\n", | ||
| 114 | os_data->os_cpu[i].cpu_id, | ||
| 115 | os_data->os_cpu[i].per_cpu_user, | ||
| 116 | os_data->os_cpu[i].per_cpu_nice, | ||
| 117 | os_data->os_cpu[i].per_cpu_system, | ||
| 118 | os_data->os_cpu[i].per_cpu_idle, | ||
| 119 | os_data->os_cpu[i].per_cpu_irq, | ||
| 120 | os_data->os_cpu[i].per_cpu_softirq, | ||
| 121 | os_data->os_cpu[i].per_cpu_iowait, | ||
| 122 | os_data->os_cpu[i].per_cpu_steal); | ||
| 123 | } | ||
| 124 | |||
| 125 | P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1); | ||
| 126 | P_DEBUG("sync_count_2 = %u\n", os_data->sync_count_2); | ||
| 127 | P_DEBUG("timestamp = %lX\n", os_data->timestamp); | ||
| 128 | } | ||
| 129 | |||
| 130 | /* | 92 | /* |
| 131 | * appldata_get_os_data() | 93 | * appldata_get_os_data() |
| 132 | * | 94 | * |
| @@ -180,13 +142,10 @@ static void appldata_get_os_data(void *data) | |||
| 180 | APPLDATA_START_INTERVAL_REC, | 142 | APPLDATA_START_INTERVAL_REC, |
| 181 | (unsigned long) ops.data, new_size, | 143 | (unsigned long) ops.data, new_size, |
| 182 | ops.mod_lvl); | 144 | ops.mod_lvl); |
| 183 | if (rc != 0) { | 145 | if (rc != 0) |
| 184 | P_ERROR("os: START NEW DIAG 0xDC failed, " | 146 | P_ERROR("os: START NEW DIAG 0xDC failed, " |
| 185 | "return code: %d, new size = %i\n", rc, | 147 | "return code: %d, new size = %i\n", rc, |
| 186 | new_size); | 148 | new_size); |
| 187 | P_INFO("os: stopping old record now\n"); | ||
| 188 | } else | ||
| 189 | P_INFO("os: new record size = %i\n", new_size); | ||
| 190 | 149 | ||
| 191 | rc = appldata_diag(APPLDATA_RECORD_OS_ID, | 150 | rc = appldata_diag(APPLDATA_RECORD_OS_ID, |
| 192 | APPLDATA_STOP_REC, | 151 | APPLDATA_STOP_REC, |
| @@ -204,9 +163,6 @@ static void appldata_get_os_data(void *data) | |||
| 204 | } | 163 | } |
| 205 | os_data->timestamp = get_clock(); | 164 | os_data->timestamp = get_clock(); |
| 206 | os_data->sync_count_2++; | 165 | os_data->sync_count_2++; |
| 207 | #ifdef APPLDATA_DEBUG | ||
| 208 | appldata_print_debug(os_data); | ||
| 209 | #endif | ||
| 210 | } | 166 | } |
| 211 | 167 | ||
| 212 | 168 | ||
| @@ -227,12 +183,9 @@ static int __init appldata_os_init(void) | |||
| 227 | rc = -ENOMEM; | 183 | rc = -ENOMEM; |
| 228 | goto out; | 184 | goto out; |
| 229 | } | 185 | } |
| 230 | P_DEBUG("max. sizeof(os) = %i, sizeof(os_cpu) = %lu\n", max_size, | ||
| 231 | sizeof(struct appldata_os_per_cpu)); | ||
| 232 | 186 | ||
| 233 | appldata_os_data = kzalloc(max_size, GFP_DMA); | 187 | appldata_os_data = kzalloc(max_size, GFP_DMA); |
| 234 | if (appldata_os_data == NULL) { | 188 | if (appldata_os_data == NULL) { |
| 235 | P_ERROR("No memory for %s!\n", ops.name); | ||
| 236 | rc = -ENOMEM; | 189 | rc = -ENOMEM; |
| 237 | goto out; | 190 | goto out; |
| 238 | } | 191 | } |
| @@ -240,17 +193,12 @@ static int __init appldata_os_init(void) | |||
| 240 | appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu); | 193 | appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu); |
| 241 | appldata_os_data->cpu_offset = offsetof(struct appldata_os_data, | 194 | appldata_os_data->cpu_offset = offsetof(struct appldata_os_data, |
| 242 | os_cpu); | 195 | os_cpu); |
| 243 | P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset); | ||
| 244 | 196 | ||
| 245 | ops.data = appldata_os_data; | 197 | ops.data = appldata_os_data; |
| 246 | ops.callback = &appldata_get_os_data; | 198 | ops.callback = &appldata_get_os_data; |
| 247 | rc = appldata_register_ops(&ops); | 199 | rc = appldata_register_ops(&ops); |
| 248 | if (rc != 0) { | 200 | if (rc != 0) |
| 249 | P_ERROR("Error registering ops, rc = %i\n", rc); | ||
| 250 | kfree(appldata_os_data); | 201 | kfree(appldata_os_data); |
| 251 | } else { | ||
| 252 | P_DEBUG("%s-ops registered!\n", ops.name); | ||
| 253 | } | ||
| 254 | out: | 202 | out: |
| 255 | return rc; | 203 | return rc; |
| 256 | } | 204 | } |
| @@ -264,7 +212,6 @@ static void __exit appldata_os_exit(void) | |||
| 264 | { | 212 | { |
| 265 | appldata_unregister_ops(&ops); | 213 | appldata_unregister_ops(&ops); |
| 266 | kfree(appldata_os_data); | 214 | kfree(appldata_os_data); |
| 267 | P_DEBUG("%s-ops unregistered!\n", ops.name); | ||
| 268 | } | 215 | } |
| 269 | 216 | ||
| 270 | 217 | ||
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index 0cfefddd8375..6a4300b3ff52 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c | |||
| @@ -185,11 +185,8 @@ static int __init prng_init(void) | |||
| 185 | prng_seed(16); | 185 | prng_seed(16); |
| 186 | 186 | ||
| 187 | ret = misc_register(&prng_dev); | 187 | ret = misc_register(&prng_dev); |
| 188 | if (ret) { | 188 | if (ret) |
| 189 | printk(KERN_WARNING | ||
| 190 | "Could not register misc device for PRNG.\n"); | ||
| 191 | goto out_buf; | 189 | goto out_buf; |
| 192 | } | ||
| 193 | return 0; | 190 | return 0; |
| 194 | 191 | ||
| 195 | out_buf: | 192 | out_buf: |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 4b010ff814c9..7383781f3e6a 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
| @@ -150,33 +150,24 @@ static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
| 150 | unsigned long nr_segs, loff_t offset) | 150 | unsigned long nr_segs, loff_t offset) |
| 151 | { | 151 | { |
| 152 | char *data; | 152 | char *data; |
| 153 | size_t len; | 153 | ssize_t ret; |
| 154 | struct file *filp = iocb->ki_filp; | 154 | struct file *filp = iocb->ki_filp; |
| 155 | /* XXX: temporary */ | 155 | /* XXX: temporary */ |
| 156 | char __user *buf = iov[0].iov_base; | 156 | char __user *buf = iov[0].iov_base; |
| 157 | size_t count = iov[0].iov_len; | 157 | size_t count = iov[0].iov_len; |
| 158 | 158 | ||
| 159 | if (nr_segs != 1) { | 159 | if (nr_segs != 1) |
| 160 | count = -EINVAL; | 160 | return -EINVAL; |
| 161 | goto out; | ||
| 162 | } | ||
| 163 | 161 | ||
| 164 | data = filp->private_data; | 162 | data = filp->private_data; |
| 165 | len = strlen(data); | 163 | ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data)); |
| 166 | if (offset > len) { | 164 | if (ret <= 0) |
| 167 | count = 0; | 165 | return ret; |
| 168 | goto out; | 166 | |
| 169 | } | 167 | iocb->ki_pos += ret; |
| 170 | if (count > len - offset) | ||
| 171 | count = len - offset; | ||
| 172 | if (copy_to_user(buf, data + offset, count)) { | ||
| 173 | count = -EFAULT; | ||
| 174 | goto out; | ||
| 175 | } | ||
| 176 | iocb->ki_pos += count; | ||
| 177 | file_accessed(filp); | 168 | file_accessed(filp); |
| 178 | out: | 169 | |
| 179 | return count; | 170 | return ret; |
| 180 | } | 171 | } |
| 181 | static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, | 172 | static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, |
| 182 | unsigned long nr_segs, loff_t offset) | 173 | unsigned long nr_segs, loff_t offset) |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 6302f5082588..50f657e77344 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
| @@ -7,9 +7,14 @@ | |||
| 7 | # | 7 | # |
| 8 | CFLAGS_smp.o := -Wno-nonnull | 8 | CFLAGS_smp.o := -Wno-nonnull |
| 9 | 9 | ||
| 10 | # | ||
| 11 | # Pass UTS_MACHINE for user_regset definition | ||
| 12 | # | ||
| 13 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | ||
| 14 | |||
| 10 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ | 15 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ |
| 11 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 16 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
| 12 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o | 17 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o |
| 13 | 18 | ||
| 14 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 19 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
| 15 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 20 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
| @@ -23,7 +28,7 @@ obj-$(CONFIG_AUDIT) += audit.o | |||
| 23 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 28 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
| 24 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | 29 | obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ |
| 25 | compat_wrapper.o compat_exec_domain.o \ | 30 | compat_wrapper.o compat_exec_domain.o \ |
| 26 | binfmt_elf32.o $(compat-obj-y) | 31 | $(compat-obj-y) |
| 27 | 32 | ||
| 28 | obj-$(CONFIG_VIRT_TIMER) += vtime.o | 33 | obj-$(CONFIG_VIRT_TIMER) += vtime.o |
| 29 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 34 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c deleted file mode 100644 index 3e1c315b736d..000000000000 --- a/arch/s390/kernel/binfmt_elf32.c +++ /dev/null | |||
| @@ -1,214 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Support for 32-bit Linux for S390 ELF binaries. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
| 5 | * Author(s): Gerhard Tonn (ton@de.ibm.com) | ||
| 6 | * | ||
| 7 | * Heavily inspired by the 32-bit Sparc compat code which is | ||
| 8 | * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) | ||
| 9 | * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
| 10 | */ | ||
| 11 | |||
| 12 | #define __ASMS390_ELF_H | ||
| 13 | |||
| 14 | #include <linux/time.h> | ||
| 15 | |||
| 16 | /* | ||
| 17 | * These are used to set parameters in the core dumps. | ||
| 18 | */ | ||
| 19 | #define ELF_CLASS ELFCLASS32 | ||
| 20 | #define ELF_DATA ELFDATA2MSB | ||
| 21 | #define ELF_ARCH EM_S390 | ||
| 22 | |||
| 23 | /* | ||
| 24 | * This is used to ensure we don't load something for the wrong architecture. | ||
| 25 | */ | ||
| 26 | #define elf_check_arch(x) \ | ||
| 27 | (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ | ||
| 28 | && (x)->e_ident[EI_CLASS] == ELF_CLASS) | ||
| 29 | |||
| 30 | /* ELF register definitions */ | ||
| 31 | #define NUM_GPRS 16 | ||
| 32 | #define NUM_FPRS 16 | ||
| 33 | #define NUM_ACRS 16 | ||
| 34 | |||
| 35 | /* For SVR4/S390 the function pointer to be registered with `atexit` is | ||
| 36 | passed in R14. */ | ||
| 37 | #define ELF_PLAT_INIT(_r, load_addr) \ | ||
| 38 | do { \ | ||
| 39 | _r->gprs[14] = 0; \ | ||
| 40 | } while(0) | ||
| 41 | |||
| 42 | #define USE_ELF_CORE_DUMP | ||
| 43 | #define ELF_EXEC_PAGESIZE 4096 | ||
| 44 | |||
| 45 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
| 46 | use of this is to invoke "./ld.so someprog" to test out a new version of | ||
| 47 | the loader. We need to make sure that it is out of the way of the program | ||
| 48 | that it will "exec", and that there is sufficient room for the brk. */ | ||
| 49 | |||
| 50 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | ||
| 51 | |||
| 52 | /* Wow, the "main" arch needs arch dependent functions too.. :) */ | ||
| 53 | |||
| 54 | /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is | ||
| 55 | now struct_user_regs, they are different) */ | ||
| 56 | |||
| 57 | #define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg); | ||
| 58 | |||
| 59 | #define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs) | ||
| 60 | |||
| 61 | #define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs) | ||
| 62 | |||
| 63 | /* This yields a mask that user programs can use to figure out what | ||
| 64 | instruction set this CPU supports. */ | ||
| 65 | |||
| 66 | #define ELF_HWCAP (0) | ||
| 67 | |||
| 68 | /* This yields a string that ld.so will use to load implementation | ||
| 69 | specific libraries for optimization. This is more specific in | ||
| 70 | intent than poking at uname or /proc/cpuinfo. | ||
| 71 | |||
| 72 | For the moment, we have only optimizations for the Intel generations, | ||
| 73 | but that could change... */ | ||
| 74 | |||
| 75 | #define ELF_PLATFORM (NULL) | ||
| 76 | |||
| 77 | #define SET_PERSONALITY(ex, ibcs2) \ | ||
| 78 | do { \ | ||
| 79 | if (ibcs2) \ | ||
| 80 | set_personality(PER_SVR4); \ | ||
| 81 | else if (current->personality != PER_LINUX32) \ | ||
| 82 | set_personality(PER_LINUX); \ | ||
| 83 | set_thread_flag(TIF_31BIT); \ | ||
| 84 | } while (0) | ||
| 85 | |||
| 86 | #include "compat_linux.h" | ||
| 87 | |||
| 88 | typedef _s390_fp_regs32 elf_fpregset_t; | ||
| 89 | |||
| 90 | typedef struct | ||
| 91 | { | ||
| 92 | |||
| 93 | _psw_t32 psw; | ||
| 94 | __u32 gprs[__NUM_GPRS]; | ||
| 95 | __u32 acrs[__NUM_ACRS]; | ||
| 96 | __u32 orig_gpr2; | ||
| 97 | } s390_regs32; | ||
| 98 | typedef s390_regs32 elf_gregset_t; | ||
| 99 | |||
| 100 | static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs) | ||
| 101 | { | ||
| 102 | int i; | ||
| 103 | |||
| 104 | memcpy(®s->psw.mask, &ptregs->psw.mask, 4); | ||
| 105 | memcpy(®s->psw.addr, (char *)&ptregs->psw.addr + 4, 4); | ||
| 106 | for (i = 0; i < NUM_GPRS; i++) | ||
| 107 | regs->gprs[i] = ptregs->gprs[i]; | ||
| 108 | save_access_regs(regs->acrs); | ||
| 109 | regs->orig_gpr2 = ptregs->orig_gpr2; | ||
| 110 | return 1; | ||
| 111 | } | ||
| 112 | |||
| 113 | static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs) | ||
| 114 | { | ||
| 115 | struct pt_regs *ptregs = task_pt_regs(tsk); | ||
| 116 | int i; | ||
| 117 | |||
| 118 | memcpy(®s->psw.mask, &ptregs->psw.mask, 4); | ||
| 119 | memcpy(®s->psw.addr, (char *)&ptregs->psw.addr + 4, 4); | ||
| 120 | for (i = 0; i < NUM_GPRS; i++) | ||
| 121 | regs->gprs[i] = ptregs->gprs[i]; | ||
| 122 | memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs)); | ||
| 123 | regs->orig_gpr2 = ptregs->orig_gpr2; | ||
| 124 | return 1; | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
| 128 | { | ||
| 129 | if (tsk == current) | ||
| 130 | save_fp_regs((s390_fp_regs *) fpregs); | ||
| 131 | else | ||
| 132 | memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t)); | ||
| 133 | return 1; | ||
| 134 | } | ||
| 135 | |||
| 136 | #include <asm/processor.h> | ||
| 137 | #include <asm/pgalloc.h> | ||
| 138 | #include <linux/module.h> | ||
| 139 | #include <linux/elfcore.h> | ||
| 140 | #include <linux/binfmts.h> | ||
| 141 | #include <linux/compat.h> | ||
| 142 | |||
| 143 | #define elf_prstatus elf_prstatus32 | ||
| 144 | struct elf_prstatus32 | ||
| 145 | { | ||
| 146 | struct elf_siginfo pr_info; /* Info associated with signal */ | ||
| 147 | short pr_cursig; /* Current signal */ | ||
| 148 | u32 pr_sigpend; /* Set of pending signals */ | ||
| 149 | u32 pr_sighold; /* Set of held signals */ | ||
| 150 | pid_t pr_pid; | ||
| 151 | pid_t pr_ppid; | ||
| 152 | pid_t pr_pgrp; | ||
| 153 | pid_t pr_sid; | ||
| 154 | struct compat_timeval pr_utime; /* User time */ | ||
| 155 | struct compat_timeval pr_stime; /* System time */ | ||
| 156 | struct compat_timeval pr_cutime; /* Cumulative user time */ | ||
| 157 | struct compat_timeval pr_cstime; /* Cumulative system time */ | ||
| 158 | elf_gregset_t pr_reg; /* GP registers */ | ||
| 159 | int pr_fpvalid; /* True if math co-processor being used. */ | ||
| 160 | }; | ||
| 161 | |||
| 162 | #define elf_prpsinfo elf_prpsinfo32 | ||
| 163 | struct elf_prpsinfo32 | ||
| 164 | { | ||
| 165 | char pr_state; /* numeric process state */ | ||
| 166 | char pr_sname; /* char for pr_state */ | ||
| 167 | char pr_zomb; /* zombie */ | ||
| 168 | char pr_nice; /* nice val */ | ||
| 169 | u32 pr_flag; /* flags */ | ||
| 170 | u16 pr_uid; | ||
| 171 | u16 pr_gid; | ||
| 172 | pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; | ||
| 173 | /* Lots missing */ | ||
| 174 | char pr_fname[16]; /* filename of executable */ | ||
| 175 | char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ | ||
| 176 | }; | ||
| 177 | |||
| 178 | #include <linux/highuid.h> | ||
| 179 | |||
| 180 | /* | ||
| 181 | #define init_elf_binfmt init_elf32_binfmt | ||
| 182 | */ | ||
| 183 | |||
| 184 | #undef start_thread | ||
| 185 | #define start_thread start_thread31 | ||
| 186 | |||
| 187 | static inline void start_thread31(struct pt_regs *regs, unsigned long new_psw, | ||
| 188 | unsigned long new_stackp) | ||
| 189 | { | ||
| 190 | set_fs(USER_DS); | ||
| 191 | regs->psw.mask = psw_user32_bits; | ||
| 192 | regs->psw.addr = new_psw; | ||
| 193 | regs->gprs[15] = new_stackp; | ||
| 194 | crst_table_downgrade(current->mm, 1UL << 31); | ||
| 195 | } | ||
| 196 | |||
| 197 | MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries," | ||
| 198 | " Copyright 2000 IBM Corporation"); | ||
| 199 | MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>"); | ||
| 200 | |||
| 201 | #undef MODULE_DESCRIPTION | ||
| 202 | #undef MODULE_AUTHOR | ||
| 203 | |||
| 204 | #undef cputime_to_timeval | ||
| 205 | #define cputime_to_timeval cputime_to_compat_timeval | ||
| 206 | static inline void | ||
| 207 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) | ||
| 208 | { | ||
| 209 | value->tv_usec = cputime % 1000000; | ||
| 210 | value->tv_sec = cputime / 1000000; | ||
| 211 | } | ||
| 212 | |||
| 213 | #include "../../../fs/binfmt_elf.c" | ||
| 214 | |||
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h index 419aef913ee1..cde81fa64f89 100644 --- a/arch/s390/kernel/compat_ptrace.h +++ b/arch/s390/kernel/compat_ptrace.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #ifndef _PTRACE32_H | 1 | #ifndef _PTRACE32_H |
| 2 | #define _PTRACE32_H | 2 | #define _PTRACE32_H |
| 3 | 3 | ||
| 4 | #include "compat_linux.h" /* needed for _psw_t32 */ | 4 | #include "compat_linux.h" /* needed for psw_compat_t */ |
| 5 | 5 | ||
| 6 | typedef struct { | 6 | typedef struct { |
| 7 | __u32 cr[3]; | 7 | __u32 cr[3]; |
| @@ -38,7 +38,7 @@ typedef struct { | |||
| 38 | 38 | ||
| 39 | struct user_regs_struct32 | 39 | struct user_regs_struct32 |
| 40 | { | 40 | { |
| 41 | _psw_t32 psw; | 41 | psw_compat_t psw; |
| 42 | u32 gprs[NUM_GPRS]; | 42 | u32 gprs[NUM_GPRS]; |
| 43 | u32 acrs[NUM_ACRS]; | 43 | u32 acrs[NUM_ACRS]; |
| 44 | u32 orig_gpr2; | 44 | u32 orig_gpr2; |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index c93d1296cc0a..d80fcd4a7fe1 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
| @@ -1079,7 +1079,6 @@ __init debug_init(void) | |||
| 1079 | s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); | 1079 | s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); |
| 1080 | mutex_lock(&debug_mutex); | 1080 | mutex_lock(&debug_mutex); |
| 1081 | debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL); | 1081 | debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL); |
| 1082 | printk(KERN_INFO "debug: Initialization complete\n"); | ||
| 1083 | initialized = 1; | 1082 | initialized = 1; |
| 1084 | mutex_unlock(&debug_mutex); | 1083 | mutex_unlock(&debug_mutex); |
| 1085 | 1084 | ||
| @@ -1193,7 +1192,6 @@ debug_get_uint(char *buf) | |||
| 1193 | for(; isspace(*buf); buf++); | 1192 | for(; isspace(*buf); buf++); |
| 1194 | rc = simple_strtoul(buf, &buf, 10); | 1193 | rc = simple_strtoul(buf, &buf, 10); |
| 1195 | if(*buf){ | 1194 | if(*buf){ |
| 1196 | printk("debug: no integer specified!\n"); | ||
| 1197 | rc = -EINVAL; | 1195 | rc = -EINVAL; |
| 1198 | } | 1196 | } |
| 1199 | return rc; | 1197 | return rc; |
| @@ -1340,19 +1338,12 @@ static void debug_flush(debug_info_t* id, int area) | |||
| 1340 | memset(id->areas[i][j], 0, PAGE_SIZE); | 1338 | memset(id->areas[i][j], 0, PAGE_SIZE); |
| 1341 | } | 1339 | } |
| 1342 | } | 1340 | } |
| 1343 | printk(KERN_INFO "debug: %s: all areas flushed\n",id->name); | ||
| 1344 | } else if(area >= 0 && area < id->nr_areas) { | 1341 | } else if(area >= 0 && area < id->nr_areas) { |
| 1345 | id->active_entries[area] = 0; | 1342 | id->active_entries[area] = 0; |
| 1346 | id->active_pages[area] = 0; | 1343 | id->active_pages[area] = 0; |
| 1347 | for(i = 0; i < id->pages_per_area; i++) { | 1344 | for(i = 0; i < id->pages_per_area; i++) { |
| 1348 | memset(id->areas[area][i],0,PAGE_SIZE); | 1345 | memset(id->areas[area][i],0,PAGE_SIZE); |
| 1349 | } | 1346 | } |
| 1350 | printk(KERN_INFO "debug: %s: area %i has been flushed\n", | ||
| 1351 | id->name, area); | ||
| 1352 | } else { | ||
| 1353 | printk(KERN_INFO | ||
| 1354 | "debug: %s: area %i cannot be flushed (range: %i - %i)\n", | ||
| 1355 | id->name, area, 0, id->nr_areas-1); | ||
| 1356 | } | 1347 | } |
| 1357 | spin_unlock_irqrestore(&id->lock,flags); | 1348 | spin_unlock_irqrestore(&id->lock,flags); |
| 1358 | } | 1349 | } |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index d0e09684b9ce..2a2ca268b1dd 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/pfn.h> | 15 | #include <linux/pfn.h> |
| 16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
| 17 | #include <asm/ebcdic.h> | ||
| 17 | #include <asm/ipl.h> | 18 | #include <asm/ipl.h> |
| 18 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
| 19 | #include <asm/processor.h> | 20 | #include <asm/processor.h> |
| @@ -26,12 +27,40 @@ | |||
| 26 | /* | 27 | /* |
| 27 | * Create a Kernel NSS if the SAVESYS= parameter is defined | 28 | * Create a Kernel NSS if the SAVESYS= parameter is defined |
| 28 | */ | 29 | */ |
| 29 | #define DEFSYS_CMD_SIZE 96 | 30 | #define DEFSYS_CMD_SIZE 128 |
| 30 | #define SAVESYS_CMD_SIZE 32 | 31 | #define SAVESYS_CMD_SIZE 32 |
| 31 | 32 | ||
| 32 | char kernel_nss_name[NSS_NAME_SIZE + 1]; | 33 | char kernel_nss_name[NSS_NAME_SIZE + 1]; |
| 33 | 34 | ||
| 35 | static void __init setup_boot_command_line(void); | ||
| 36 | |||
| 37 | |||
| 34 | #ifdef CONFIG_SHARED_KERNEL | 38 | #ifdef CONFIG_SHARED_KERNEL |
| 39 | int __init savesys_ipl_nss(char *cmd, const int cmdlen); | ||
| 40 | |||
| 41 | asm( | ||
| 42 | " .section .init.text,\"ax\",@progbits\n" | ||
| 43 | " .align 4\n" | ||
| 44 | " .type savesys_ipl_nss, @function\n" | ||
| 45 | "savesys_ipl_nss:\n" | ||
| 46 | #ifdef CONFIG_64BIT | ||
| 47 | " stmg 6,15,48(15)\n" | ||
| 48 | " lgr 14,3\n" | ||
| 49 | " sam31\n" | ||
| 50 | " diag 2,14,0x8\n" | ||
| 51 | " sam64\n" | ||
| 52 | " lgr 2,14\n" | ||
| 53 | " lmg 6,15,48(15)\n" | ||
| 54 | #else | ||
| 55 | " stm 6,15,24(15)\n" | ||
| 56 | " lr 14,3\n" | ||
| 57 | " diag 2,14,0x8\n" | ||
| 58 | " lr 2,14\n" | ||
| 59 | " lm 6,15,24(15)\n" | ||
| 60 | #endif | ||
| 61 | " br 14\n" | ||
| 62 | " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); | ||
| 63 | |||
| 35 | static noinline __init void create_kernel_nss(void) | 64 | static noinline __init void create_kernel_nss(void) |
| 36 | { | 65 | { |
| 37 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; | 66 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; |
| @@ -39,6 +68,7 @@ static noinline __init void create_kernel_nss(void) | |||
| 39 | unsigned int sinitrd_pfn, einitrd_pfn; | 68 | unsigned int sinitrd_pfn, einitrd_pfn; |
| 40 | #endif | 69 | #endif |
| 41 | int response; | 70 | int response; |
| 71 | size_t len; | ||
| 42 | char *savesys_ptr; | 72 | char *savesys_ptr; |
| 43 | char upper_command_line[COMMAND_LINE_SIZE]; | 73 | char upper_command_line[COMMAND_LINE_SIZE]; |
| 44 | char defsys_cmd[DEFSYS_CMD_SIZE]; | 74 | char defsys_cmd[DEFSYS_CMD_SIZE]; |
| @@ -49,8 +79,8 @@ static noinline __init void create_kernel_nss(void) | |||
| 49 | return; | 79 | return; |
| 50 | 80 | ||
| 51 | /* Convert COMMAND_LINE to upper case */ | 81 | /* Convert COMMAND_LINE to upper case */ |
| 52 | for (i = 0; i < strlen(COMMAND_LINE); i++) | 82 | for (i = 0; i < strlen(boot_command_line); i++) |
| 53 | upper_command_line[i] = toupper(COMMAND_LINE[i]); | 83 | upper_command_line[i] = toupper(boot_command_line[i]); |
| 54 | 84 | ||
| 55 | savesys_ptr = strstr(upper_command_line, "SAVESYS="); | 85 | savesys_ptr = strstr(upper_command_line, "SAVESYS="); |
| 56 | 86 | ||
| @@ -83,7 +113,8 @@ static noinline __init void create_kernel_nss(void) | |||
| 83 | } | 113 | } |
| 84 | #endif | 114 | #endif |
| 85 | 115 | ||
| 86 | sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); | 116 | sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13", |
| 117 | defsys_cmd, min_size); | ||
| 87 | sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", | 118 | sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", |
| 88 | kernel_nss_name, kernel_nss_name); | 119 | kernel_nss_name, kernel_nss_name); |
| 89 | 120 | ||
| @@ -94,13 +125,24 @@ static noinline __init void create_kernel_nss(void) | |||
| 94 | return; | 125 | return; |
| 95 | } | 126 | } |
| 96 | 127 | ||
| 97 | __cpcmd(savesys_cmd, NULL, 0, &response); | 128 | len = strlen(savesys_cmd); |
| 129 | ASCEBC(savesys_cmd, len); | ||
| 130 | response = savesys_ipl_nss(savesys_cmd, len); | ||
| 98 | 131 | ||
| 99 | if (response != strlen(savesys_cmd)) { | 132 | /* On success: response is equal to the command size, |
| 133 | * max SAVESYS_CMD_SIZE | ||
| 134 | * On error: response contains the numeric portion of cp error message. | ||
| 135 | * for SAVESYS it will be >= 263 | ||
| 136 | */ | ||
| 137 | if (response > SAVESYS_CMD_SIZE) { | ||
| 100 | kernel_nss_name[0] = '\0'; | 138 | kernel_nss_name[0] = '\0'; |
| 101 | return; | 139 | return; |
| 102 | } | 140 | } |
| 103 | 141 | ||
| 142 | /* re-setup boot command line with new ipl vm parms */ | ||
| 143 | ipl_update_parameters(); | ||
| 144 | setup_boot_command_line(); | ||
| 145 | |||
| 104 | ipl_flags = IPL_NSS_VALID; | 146 | ipl_flags = IPL_NSS_VALID; |
| 105 | } | 147 | } |
| 106 | 148 | ||
| @@ -141,109 +183,11 @@ static noinline __init void detect_machine_type(void) | |||
| 141 | if (cpuinfo->cpu_id.version == 0xff) | 183 | if (cpuinfo->cpu_id.version == 0xff) |
| 142 | machine_flags |= MACHINE_FLAG_VM; | 184 | machine_flags |= MACHINE_FLAG_VM; |
| 143 | 185 | ||
| 144 | /* Running on a P/390 ? */ | ||
| 145 | if (cpuinfo->cpu_id.machine == 0x7490) | ||
| 146 | machine_flags |= MACHINE_FLAG_P390; | ||
| 147 | |||
| 148 | /* Running under KVM ? */ | 186 | /* Running under KVM ? */ |
| 149 | if (cpuinfo->cpu_id.version == 0xfe) | 187 | if (cpuinfo->cpu_id.version == 0xfe) |
| 150 | machine_flags |= MACHINE_FLAG_KVM; | 188 | machine_flags |= MACHINE_FLAG_KVM; |
| 151 | } | 189 | } |
| 152 | 190 | ||
| 153 | #ifdef CONFIG_64BIT | ||
| 154 | static noinline __init int memory_fast_detect(void) | ||
| 155 | { | ||
| 156 | unsigned long val0 = 0; | ||
| 157 | unsigned long val1 = 0xc; | ||
| 158 | int ret = -ENOSYS; | ||
| 159 | |||
| 160 | if (ipl_flags & IPL_NSS_VALID) | ||
| 161 | return -ENOSYS; | ||
| 162 | |||
| 163 | asm volatile( | ||
| 164 | " diag %1,%2,0x260\n" | ||
| 165 | "0: lhi %0,0\n" | ||
| 166 | "1:\n" | ||
| 167 | EX_TABLE(0b,1b) | ||
| 168 | : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc"); | ||
| 169 | |||
| 170 | if (ret || val0 != val1) | ||
| 171 | return -ENOSYS; | ||
| 172 | |||
| 173 | memory_chunk[0].size = val0 + 1; | ||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | #else | ||
| 177 | static inline int memory_fast_detect(void) | ||
| 178 | { | ||
| 179 | return -ENOSYS; | ||
| 180 | } | ||
| 181 | #endif | ||
| 182 | |||
| 183 | static inline __init unsigned long __tprot(unsigned long addr) | ||
| 184 | { | ||
| 185 | int cc = -1; | ||
| 186 | |||
| 187 | asm volatile( | ||
| 188 | " tprot 0(%1),0\n" | ||
| 189 | "0: ipm %0\n" | ||
| 190 | " srl %0,28\n" | ||
| 191 | "1:\n" | ||
| 192 | EX_TABLE(0b,1b) | ||
| 193 | : "+d" (cc) : "a" (addr) : "cc"); | ||
| 194 | return (unsigned long)cc; | ||
| 195 | } | ||
| 196 | |||
| 197 | /* Checking memory in 128KB increments. */ | ||
| 198 | #define CHUNK_INCR (1UL << 17) | ||
| 199 | #define ADDR2G (1UL << 31) | ||
| 200 | |||
| 201 | static noinline __init void find_memory_chunks(unsigned long memsize) | ||
| 202 | { | ||
| 203 | unsigned long addr = 0, old_addr = 0; | ||
| 204 | unsigned long old_cc = CHUNK_READ_WRITE; | ||
| 205 | unsigned long cc; | ||
| 206 | int chunk = 0; | ||
| 207 | |||
| 208 | while (chunk < MEMORY_CHUNKS) { | ||
| 209 | cc = __tprot(addr); | ||
| 210 | while (cc == old_cc) { | ||
| 211 | addr += CHUNK_INCR; | ||
| 212 | if (memsize && addr >= memsize) | ||
| 213 | break; | ||
| 214 | #ifndef CONFIG_64BIT | ||
| 215 | if (addr == ADDR2G) | ||
| 216 | break; | ||
| 217 | #endif | ||
| 218 | cc = __tprot(addr); | ||
| 219 | } | ||
| 220 | |||
| 221 | if (old_addr != addr && | ||
| 222 | (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) { | ||
| 223 | memory_chunk[chunk].addr = old_addr; | ||
| 224 | memory_chunk[chunk].size = addr - old_addr; | ||
| 225 | memory_chunk[chunk].type = old_cc; | ||
| 226 | chunk++; | ||
| 227 | } | ||
| 228 | |||
| 229 | old_addr = addr; | ||
| 230 | old_cc = cc; | ||
| 231 | |||
| 232 | #ifndef CONFIG_64BIT | ||
| 233 | if (addr == ADDR2G) | ||
| 234 | break; | ||
| 235 | #endif | ||
| 236 | /* | ||
| 237 | * Finish memory detection at the first hole | ||
| 238 | * if storage size is unknown. | ||
| 239 | */ | ||
| 240 | if (cc == -1UL && !memsize) | ||
| 241 | break; | ||
| 242 | if (memsize && addr >= memsize) | ||
| 243 | break; | ||
| 244 | } | ||
| 245 | } | ||
| 246 | |||
| 247 | static __init void early_pgm_check_handler(void) | 191 | static __init void early_pgm_check_handler(void) |
| 248 | { | 192 | { |
| 249 | unsigned long addr; | 193 | unsigned long addr; |
| @@ -380,23 +324,61 @@ static __init void detect_machine_facilities(void) | |||
| 380 | #endif | 324 | #endif |
| 381 | } | 325 | } |
| 382 | 326 | ||
| 327 | static __init void rescue_initrd(void) | ||
| 328 | { | ||
| 329 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 330 | /* | ||
| 331 | * Move the initrd right behind the bss section in case it starts | ||
| 332 | * within the bss section. So we don't overwrite it when the bss | ||
| 333 | * section gets cleared. | ||
| 334 | */ | ||
| 335 | if (!INITRD_START || !INITRD_SIZE) | ||
| 336 | return; | ||
| 337 | if (INITRD_START >= (unsigned long) __bss_stop) | ||
| 338 | return; | ||
| 339 | memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE); | ||
| 340 | INITRD_START = (unsigned long) __bss_stop; | ||
| 341 | #endif | ||
| 342 | } | ||
| 343 | |||
| 344 | /* Set up boot command line */ | ||
| 345 | static void __init setup_boot_command_line(void) | ||
| 346 | { | ||
| 347 | char *parm = NULL; | ||
| 348 | |||
| 349 | /* copy arch command line */ | ||
| 350 | strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); | ||
| 351 | boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0; | ||
| 352 | |||
| 353 | /* append IPL PARM data to the boot command line */ | ||
| 354 | if (MACHINE_IS_VM) { | ||
| 355 | parm = boot_command_line + strlen(boot_command_line); | ||
| 356 | *parm++ = ' '; | ||
| 357 | get_ipl_vmparm(parm); | ||
| 358 | if (parm[0] == '=') | ||
| 359 | memmove(boot_command_line, parm + 1, strlen(parm)); | ||
| 360 | } | ||
| 361 | } | ||
| 362 | |||
| 363 | |||
| 383 | /* | 364 | /* |
| 384 | * Save ipl parameters, clear bss memory, initialize storage keys | 365 | * Save ipl parameters, clear bss memory, initialize storage keys |
| 385 | * and create a kernel NSS at startup if the SAVESYS= parm is defined | 366 | * and create a kernel NSS at startup if the SAVESYS= parm is defined |
| 386 | */ | 367 | */ |
| 387 | void __init startup_init(void) | 368 | void __init startup_init(void) |
| 388 | { | 369 | { |
| 389 | unsigned long long memsize; | ||
| 390 | |||
| 391 | ipl_save_parameters(); | 370 | ipl_save_parameters(); |
| 371 | rescue_initrd(); | ||
| 392 | clear_bss_section(); | 372 | clear_bss_section(); |
| 393 | init_kernel_storage_key(); | 373 | init_kernel_storage_key(); |
| 394 | lockdep_init(); | 374 | lockdep_init(); |
| 395 | lockdep_off(); | 375 | lockdep_off(); |
| 396 | detect_machine_type(); | ||
| 397 | create_kernel_nss(); | ||
| 398 | sort_main_extable(); | 376 | sort_main_extable(); |
| 399 | setup_lowcore_early(); | 377 | setup_lowcore_early(); |
| 378 | detect_machine_type(); | ||
| 379 | ipl_update_parameters(); | ||
| 380 | setup_boot_command_line(); | ||
| 381 | create_kernel_nss(); | ||
| 400 | detect_mvpg(); | 382 | detect_mvpg(); |
| 401 | detect_ieee(); | 383 | detect_ieee(); |
| 402 | detect_csp(); | 384 | detect_csp(); |
| @@ -404,18 +386,7 @@ void __init startup_init(void) | |||
| 404 | detect_diag44(); | 386 | detect_diag44(); |
| 405 | detect_machine_facilities(); | 387 | detect_machine_facilities(); |
| 406 | setup_hpage(); | 388 | setup_hpage(); |
| 407 | sclp_read_info_early(); | ||
| 408 | sclp_facilities_detect(); | 389 | sclp_facilities_detect(); |
| 409 | memsize = sclp_memory_detect(); | 390 | detect_memory_layout(memory_chunk); |
| 410 | #ifndef CONFIG_64BIT | ||
| 411 | /* | ||
| 412 | * Can't deal with more than 2G in 31 bit addressing mode, so | ||
| 413 | * limit the value in order to avoid strange side effects. | ||
| 414 | */ | ||
| 415 | if (memsize > ADDR2G) | ||
| 416 | memsize = ADDR2G; | ||
| 417 | #endif | ||
| 418 | if (memory_fast_detect() < 0) | ||
| 419 | find_memory_chunks((unsigned long) memsize); | ||
| 420 | lockdep_on(); | 391 | lockdep_on(); |
| 421 | } | 392 | } |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 532542447d66..54b2779b5e2f 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 15 | #include <linux/reboot.h> | 15 | #include <linux/reboot.h> |
| 16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
| 17 | #include <linux/fs.h> | ||
| 17 | #include <asm/ipl.h> | 18 | #include <asm/ipl.h> |
| 18 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
| 19 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
| @@ -22,6 +23,7 @@ | |||
| 22 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
| 23 | #include <asm/reset.h> | 24 | #include <asm/reset.h> |
| 24 | #include <asm/sclp.h> | 25 | #include <asm/sclp.h> |
| 26 | #include <asm/setup.h> | ||
| 25 | 27 | ||
| 26 | #define IPL_PARM_BLOCK_VERSION 0 | 28 | #define IPL_PARM_BLOCK_VERSION 0 |
| 27 | 29 | ||
| @@ -121,6 +123,7 @@ enum ipl_method { | |||
| 121 | REIPL_METHOD_FCP_RO_VM, | 123 | REIPL_METHOD_FCP_RO_VM, |
| 122 | REIPL_METHOD_FCP_DUMP, | 124 | REIPL_METHOD_FCP_DUMP, |
| 123 | REIPL_METHOD_NSS, | 125 | REIPL_METHOD_NSS, |
| 126 | REIPL_METHOD_NSS_DIAG, | ||
| 124 | REIPL_METHOD_DEFAULT, | 127 | REIPL_METHOD_DEFAULT, |
| 125 | }; | 128 | }; |
| 126 | 129 | ||
| @@ -134,14 +137,15 @@ enum dump_method { | |||
| 134 | 137 | ||
| 135 | static int diag308_set_works = 0; | 138 | static int diag308_set_works = 0; |
| 136 | 139 | ||
| 140 | static struct ipl_parameter_block ipl_block; | ||
| 141 | |||
| 137 | static int reipl_capabilities = IPL_TYPE_UNKNOWN; | 142 | static int reipl_capabilities = IPL_TYPE_UNKNOWN; |
| 138 | 143 | ||
| 139 | static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; | 144 | static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; |
| 140 | static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; | 145 | static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; |
| 141 | static struct ipl_parameter_block *reipl_block_fcp; | 146 | static struct ipl_parameter_block *reipl_block_fcp; |
| 142 | static struct ipl_parameter_block *reipl_block_ccw; | 147 | static struct ipl_parameter_block *reipl_block_ccw; |
| 143 | 148 | static struct ipl_parameter_block *reipl_block_nss; | |
| 144 | static char reipl_nss_name[NSS_NAME_SIZE + 1]; | ||
| 145 | 149 | ||
| 146 | static int dump_capabilities = DUMP_TYPE_NONE; | 150 | static int dump_capabilities = DUMP_TYPE_NONE; |
| 147 | static enum dump_type dump_type = DUMP_TYPE_NONE; | 151 | static enum dump_type dump_type = DUMP_TYPE_NONE; |
| @@ -263,6 +267,56 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
| 263 | 267 | ||
| 264 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); | 268 | static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); |
| 265 | 269 | ||
| 270 | /* VM IPL PARM routines */ | ||
| 271 | static void reipl_get_ascii_vmparm(char *dest, | ||
| 272 | const struct ipl_parameter_block *ipb) | ||
| 273 | { | ||
| 274 | int i; | ||
| 275 | int len = 0; | ||
| 276 | char has_lowercase = 0; | ||
| 277 | |||
| 278 | if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && | ||
| 279 | (ipb->ipl_info.ccw.vm_parm_len > 0)) { | ||
| 280 | |||
| 281 | len = ipb->ipl_info.ccw.vm_parm_len; | ||
| 282 | memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); | ||
| 283 | /* If at least one character is lowercase, we assume mixed | ||
| 284 | * case; otherwise we convert everything to lowercase. | ||
| 285 | */ | ||
| 286 | for (i = 0; i < len; i++) | ||
| 287 | if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */ | ||
| 288 | (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */ | ||
| 289 | (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */ | ||
| 290 | has_lowercase = 1; | ||
| 291 | break; | ||
| 292 | } | ||
| 293 | if (!has_lowercase) | ||
| 294 | EBC_TOLOWER(dest, len); | ||
| 295 | EBCASC(dest, len); | ||
| 296 | } | ||
| 297 | dest[len] = 0; | ||
| 298 | } | ||
| 299 | |||
| 300 | void get_ipl_vmparm(char *dest) | ||
| 301 | { | ||
| 302 | if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) | ||
| 303 | reipl_get_ascii_vmparm(dest, &ipl_block); | ||
| 304 | else | ||
| 305 | dest[0] = 0; | ||
| 306 | } | ||
| 307 | |||
| 308 | static ssize_t ipl_vm_parm_show(struct kobject *kobj, | ||
| 309 | struct kobj_attribute *attr, char *page) | ||
| 310 | { | ||
| 311 | char parm[DIAG308_VMPARM_SIZE + 1] = {}; | ||
| 312 | |||
| 313 | get_ipl_vmparm(parm); | ||
| 314 | return sprintf(page, "%s\n", parm); | ||
| 315 | } | ||
| 316 | |||
| 317 | static struct kobj_attribute sys_ipl_vm_parm_attr = | ||
| 318 | __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); | ||
| 319 | |||
| 266 | static ssize_t sys_ipl_device_show(struct kobject *kobj, | 320 | static ssize_t sys_ipl_device_show(struct kobject *kobj, |
| 267 | struct kobj_attribute *attr, char *page) | 321 | struct kobj_attribute *attr, char *page) |
| 268 | { | 322 | { |
| @@ -285,14 +339,8 @@ static struct kobj_attribute sys_ipl_device_attr = | |||
| 285 | static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr, | 339 | static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr, |
| 286 | char *buf, loff_t off, size_t count) | 340 | char *buf, loff_t off, size_t count) |
| 287 | { | 341 | { |
| 288 | unsigned int size = IPL_PARMBLOCK_SIZE; | 342 | return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, |
| 289 | 343 | IPL_PARMBLOCK_SIZE); | |
| 290 | if (off > size) | ||
| 291 | return 0; | ||
| 292 | if (off + count > size) | ||
| 293 | count = size - off; | ||
| 294 | memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count); | ||
| 295 | return count; | ||
| 296 | } | 344 | } |
| 297 | 345 | ||
| 298 | static struct bin_attribute ipl_parameter_attr = { | 346 | static struct bin_attribute ipl_parameter_attr = { |
| @@ -310,12 +358,7 @@ static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *att | |||
| 310 | unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; | 358 | unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; |
| 311 | void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; | 359 | void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; |
| 312 | 360 | ||
| 313 | if (off > size) | 361 | return memory_read_from_buffer(buf, count, &off, scp_data, size); |
| 314 | return 0; | ||
| 315 | if (off + count > size) | ||
| 316 | count = size - off; | ||
| 317 | memcpy(buf, scp_data + off, count); | ||
| 318 | return count; | ||
| 319 | } | 362 | } |
| 320 | 363 | ||
| 321 | static struct bin_attribute ipl_scp_data_attr = { | 364 | static struct bin_attribute ipl_scp_data_attr = { |
| @@ -370,15 +413,27 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, | |||
| 370 | static struct kobj_attribute sys_ipl_ccw_loadparm_attr = | 413 | static struct kobj_attribute sys_ipl_ccw_loadparm_attr = |
| 371 | __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); | 414 | __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); |
| 372 | 415 | ||
| 373 | static struct attribute *ipl_ccw_attrs[] = { | 416 | static struct attribute *ipl_ccw_attrs_vm[] = { |
| 374 | &sys_ipl_type_attr.attr, | 417 | &sys_ipl_type_attr.attr, |
| 375 | &sys_ipl_device_attr.attr, | 418 | &sys_ipl_device_attr.attr, |
| 376 | &sys_ipl_ccw_loadparm_attr.attr, | 419 | &sys_ipl_ccw_loadparm_attr.attr, |
| 420 | &sys_ipl_vm_parm_attr.attr, | ||
| 377 | NULL, | 421 | NULL, |
| 378 | }; | 422 | }; |
| 379 | 423 | ||
| 380 | static struct attribute_group ipl_ccw_attr_group = { | 424 | static struct attribute *ipl_ccw_attrs_lpar[] = { |
| 381 | .attrs = ipl_ccw_attrs, | 425 | &sys_ipl_type_attr.attr, |
| 426 | &sys_ipl_device_attr.attr, | ||
| 427 | &sys_ipl_ccw_loadparm_attr.attr, | ||
| 428 | NULL, | ||
| 429 | }; | ||
| 430 | |||
| 431 | static struct attribute_group ipl_ccw_attr_group_vm = { | ||
| 432 | .attrs = ipl_ccw_attrs_vm, | ||
| 433 | }; | ||
| 434 | |||
| 435 | static struct attribute_group ipl_ccw_attr_group_lpar = { | ||
| 436 | .attrs = ipl_ccw_attrs_lpar | ||
| 382 | }; | 437 | }; |
| 383 | 438 | ||
| 384 | /* NSS ipl device attributes */ | 439 | /* NSS ipl device attributes */ |
| @@ -388,6 +443,8 @@ DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name); | |||
| 388 | static struct attribute *ipl_nss_attrs[] = { | 443 | static struct attribute *ipl_nss_attrs[] = { |
| 389 | &sys_ipl_type_attr.attr, | 444 | &sys_ipl_type_attr.attr, |
| 390 | &sys_ipl_nss_name_attr.attr, | 445 | &sys_ipl_nss_name_attr.attr, |
| 446 | &sys_ipl_ccw_loadparm_attr.attr, | ||
| 447 | &sys_ipl_vm_parm_attr.attr, | ||
| 391 | NULL, | 448 | NULL, |
| 392 | }; | 449 | }; |
| 393 | 450 | ||
| @@ -450,7 +507,12 @@ static int __init ipl_init(void) | |||
| 450 | } | 507 | } |
| 451 | switch (ipl_info.type) { | 508 | switch (ipl_info.type) { |
| 452 | case IPL_TYPE_CCW: | 509 | case IPL_TYPE_CCW: |
| 453 | rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group); | 510 | if (MACHINE_IS_VM) |
| 511 | rc = sysfs_create_group(&ipl_kset->kobj, | ||
| 512 | &ipl_ccw_attr_group_vm); | ||
| 513 | else | ||
| 514 | rc = sysfs_create_group(&ipl_kset->kobj, | ||
| 515 | &ipl_ccw_attr_group_lpar); | ||
| 454 | break; | 516 | break; |
| 455 | case IPL_TYPE_FCP: | 517 | case IPL_TYPE_FCP: |
| 456 | case IPL_TYPE_FCP_DUMP: | 518 | case IPL_TYPE_FCP_DUMP: |
| @@ -481,6 +543,83 @@ static struct shutdown_action __refdata ipl_action = { | |||
| 481 | * reipl shutdown action: Reboot Linux on shutdown. | 543 | * reipl shutdown action: Reboot Linux on shutdown. |
| 482 | */ | 544 | */ |
| 483 | 545 | ||
| 546 | /* VM IPL PARM attributes */ | ||
| 547 | static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb, | ||
| 548 | char *page) | ||
| 549 | { | ||
| 550 | char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; | ||
| 551 | |||
| 552 | reipl_get_ascii_vmparm(vmparm, ipb); | ||
| 553 | return sprintf(page, "%s\n", vmparm); | ||
| 554 | } | ||
| 555 | |||
| 556 | static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb, | ||
| 557 | size_t vmparm_max, | ||
| 558 | const char *buf, size_t len) | ||
| 559 | { | ||
| 560 | int i, ip_len; | ||
| 561 | |||
| 562 | /* ignore trailing newline */ | ||
| 563 | ip_len = len; | ||
| 564 | if ((len > 0) && (buf[len - 1] == '\n')) | ||
| 565 | ip_len--; | ||
| 566 | |||
| 567 | if (ip_len > vmparm_max) | ||
| 568 | return -EINVAL; | ||
| 569 | |||
| 570 | /* parm is used to store kernel options, check for common chars */ | ||
| 571 | for (i = 0; i < ip_len; i++) | ||
| 572 | if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i]))) | ||
| 573 | return -EINVAL; | ||
| 574 | |||
| 575 | memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE); | ||
| 576 | ipb->ipl_info.ccw.vm_parm_len = ip_len; | ||
| 577 | if (ip_len > 0) { | ||
| 578 | ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID; | ||
| 579 | memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len); | ||
| 580 | ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len); | ||
| 581 | } else { | ||
| 582 | ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID; | ||
| 583 | } | ||
| 584 | |||
| 585 | return len; | ||
| 586 | } | ||
| 587 | |||
| 588 | /* NSS wrapper */ | ||
| 589 | static ssize_t reipl_nss_vmparm_show(struct kobject *kobj, | ||
| 590 | struct kobj_attribute *attr, char *page) | ||
| 591 | { | ||
| 592 | return reipl_generic_vmparm_show(reipl_block_nss, page); | ||
| 593 | } | ||
| 594 | |||
| 595 | static ssize_t reipl_nss_vmparm_store(struct kobject *kobj, | ||
| 596 | struct kobj_attribute *attr, | ||
| 597 | const char *buf, size_t len) | ||
| 598 | { | ||
| 599 | return reipl_generic_vmparm_store(reipl_block_nss, 56, buf, len); | ||
| 600 | } | ||
| 601 | |||
| 602 | /* CCW wrapper */ | ||
| 603 | static ssize_t reipl_ccw_vmparm_show(struct kobject *kobj, | ||
| 604 | struct kobj_attribute *attr, char *page) | ||
| 605 | { | ||
| 606 | return reipl_generic_vmparm_show(reipl_block_ccw, page); | ||
| 607 | } | ||
| 608 | |||
| 609 | static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj, | ||
| 610 | struct kobj_attribute *attr, | ||
| 611 | const char *buf, size_t len) | ||
| 612 | { | ||
| 613 | return reipl_generic_vmparm_store(reipl_block_ccw, 64, buf, len); | ||
| 614 | } | ||
| 615 | |||
| 616 | static struct kobj_attribute sys_reipl_nss_vmparm_attr = | ||
| 617 | __ATTR(parm, S_IRUGO | S_IWUSR, reipl_nss_vmparm_show, | ||
| 618 | reipl_nss_vmparm_store); | ||
| 619 | static struct kobj_attribute sys_reipl_ccw_vmparm_attr = | ||
| 620 | __ATTR(parm, S_IRUGO | S_IWUSR, reipl_ccw_vmparm_show, | ||
| 621 | reipl_ccw_vmparm_store); | ||
| 622 | |||
| 484 | /* FCP reipl device attributes */ | 623 | /* FCP reipl device attributes */ |
| 485 | 624 | ||
| 486 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", | 625 | DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", |
| @@ -513,27 +652,26 @@ static struct attribute_group reipl_fcp_attr_group = { | |||
| 513 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | 652 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", |
| 514 | reipl_block_ccw->ipl_info.ccw.devno); | 653 | reipl_block_ccw->ipl_info.ccw.devno); |
| 515 | 654 | ||
| 516 | static void reipl_get_ascii_loadparm(char *loadparm) | 655 | static void reipl_get_ascii_loadparm(char *loadparm, |
| 656 | struct ipl_parameter_block *ibp) | ||
| 517 | { | 657 | { |
| 518 | memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param, | 658 | memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN); |
| 519 | LOADPARM_LEN); | ||
| 520 | EBCASC(loadparm, LOADPARM_LEN); | 659 | EBCASC(loadparm, LOADPARM_LEN); |
| 521 | loadparm[LOADPARM_LEN] = 0; | 660 | loadparm[LOADPARM_LEN] = 0; |
| 522 | strstrip(loadparm); | 661 | strstrip(loadparm); |
| 523 | } | 662 | } |
| 524 | 663 | ||
| 525 | static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj, | 664 | static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb, |
| 526 | struct kobj_attribute *attr, char *page) | 665 | char *page) |
| 527 | { | 666 | { |
| 528 | char buf[LOADPARM_LEN + 1]; | 667 | char buf[LOADPARM_LEN + 1]; |
| 529 | 668 | ||
| 530 | reipl_get_ascii_loadparm(buf); | 669 | reipl_get_ascii_loadparm(buf, ipb); |
| 531 | return sprintf(page, "%s\n", buf); | 670 | return sprintf(page, "%s\n", buf); |
| 532 | } | 671 | } |
| 533 | 672 | ||
| 534 | static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj, | 673 | static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb, |
| 535 | struct kobj_attribute *attr, | 674 | const char *buf, size_t len) |
| 536 | const char *buf, size_t len) | ||
| 537 | { | 675 | { |
| 538 | int i, lp_len; | 676 | int i, lp_len; |
| 539 | 677 | ||
| @@ -552,35 +690,128 @@ static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj, | |||
| 552 | return -EINVAL; | 690 | return -EINVAL; |
| 553 | } | 691 | } |
| 554 | /* initialize loadparm with blanks */ | 692 | /* initialize loadparm with blanks */ |
| 555 | memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN); | 693 | memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN); |
| 556 | /* copy and convert to ebcdic */ | 694 | /* copy and convert to ebcdic */ |
| 557 | memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len); | 695 | memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len); |
| 558 | ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN); | 696 | ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN); |
| 559 | return len; | 697 | return len; |
| 560 | } | 698 | } |
| 561 | 699 | ||
| 700 | /* NSS wrapper */ | ||
| 701 | static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, | ||
| 702 | struct kobj_attribute *attr, char *page) | ||
| 703 | { | ||
| 704 | return reipl_generic_loadparm_show(reipl_block_nss, page); | ||
| 705 | } | ||
| 706 | |||
| 707 | static ssize_t reipl_nss_loadparm_store(struct kobject *kobj, | ||
| 708 | struct kobj_attribute *attr, | ||
| 709 | const char *buf, size_t len) | ||
| 710 | { | ||
| 711 | return reipl_generic_loadparm_store(reipl_block_nss, buf, len); | ||
| 712 | } | ||
| 713 | |||
| 714 | /* CCW wrapper */ | ||
| 715 | static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj, | ||
| 716 | struct kobj_attribute *attr, char *page) | ||
| 717 | { | ||
| 718 | return reipl_generic_loadparm_show(reipl_block_ccw, page); | ||
| 719 | } | ||
| 720 | |||
| 721 | static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj, | ||
| 722 | struct kobj_attribute *attr, | ||
| 723 | const char *buf, size_t len) | ||
| 724 | { | ||
| 725 | return reipl_generic_loadparm_store(reipl_block_ccw, buf, len); | ||
| 726 | } | ||
| 727 | |||
| 562 | static struct kobj_attribute sys_reipl_ccw_loadparm_attr = | 728 | static struct kobj_attribute sys_reipl_ccw_loadparm_attr = |
| 563 | __ATTR(loadparm, 0644, reipl_ccw_loadparm_show, | 729 | __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show, |
| 564 | reipl_ccw_loadparm_store); | 730 | reipl_ccw_loadparm_store); |
| 565 | 731 | ||
| 566 | static struct attribute *reipl_ccw_attrs[] = { | 732 | static struct attribute *reipl_ccw_attrs_vm[] = { |
| 567 | &sys_reipl_ccw_device_attr.attr, | 733 | &sys_reipl_ccw_device_attr.attr, |
| 568 | &sys_reipl_ccw_loadparm_attr.attr, | 734 | &sys_reipl_ccw_loadparm_attr.attr, |
| 735 | &sys_reipl_ccw_vmparm_attr.attr, | ||
| 569 | NULL, | 736 | NULL, |
| 570 | }; | 737 | }; |
| 571 | 738 | ||
| 572 | static struct attribute_group reipl_ccw_attr_group = { | 739 | static struct attribute *reipl_ccw_attrs_lpar[] = { |
| 740 | &sys_reipl_ccw_device_attr.attr, | ||
| 741 | &sys_reipl_ccw_loadparm_attr.attr, | ||
| 742 | NULL, | ||
| 743 | }; | ||
| 744 | |||
| 745 | static struct attribute_group reipl_ccw_attr_group_vm = { | ||
| 746 | .name = IPL_CCW_STR, | ||
| 747 | .attrs = reipl_ccw_attrs_vm, | ||
| 748 | }; | ||
| 749 | |||
| 750 | static struct attribute_group reipl_ccw_attr_group_lpar = { | ||
| 573 | .name = IPL_CCW_STR, | 751 | .name = IPL_CCW_STR, |
| 574 | .attrs = reipl_ccw_attrs, | 752 | .attrs = reipl_ccw_attrs_lpar, |
| 575 | }; | 753 | }; |
| 576 | 754 | ||
| 577 | 755 | ||
| 578 | /* NSS reipl device attributes */ | 756 | /* NSS reipl device attributes */ |
| 757 | static void reipl_get_ascii_nss_name(char *dst, | ||
| 758 | struct ipl_parameter_block *ipb) | ||
| 759 | { | ||
| 760 | memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE); | ||
| 761 | EBCASC(dst, NSS_NAME_SIZE); | ||
| 762 | dst[NSS_NAME_SIZE] = 0; | ||
| 763 | } | ||
| 764 | |||
| 765 | static ssize_t reipl_nss_name_show(struct kobject *kobj, | ||
| 766 | struct kobj_attribute *attr, char *page) | ||
| 767 | { | ||
| 768 | char nss_name[NSS_NAME_SIZE + 1] = {}; | ||
| 579 | 769 | ||
| 580 | DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name); | 770 | reipl_get_ascii_nss_name(nss_name, reipl_block_nss); |
| 771 | return sprintf(page, "%s\n", nss_name); | ||
| 772 | } | ||
| 773 | |||
| 774 | static ssize_t reipl_nss_name_store(struct kobject *kobj, | ||
| 775 | struct kobj_attribute *attr, | ||
| 776 | const char *buf, size_t len) | ||
| 777 | { | ||
| 778 | int nss_len; | ||
| 779 | |||
| 780 | /* ignore trailing newline */ | ||
| 781 | nss_len = len; | ||
| 782 | if ((len > 0) && (buf[len - 1] == '\n')) | ||
| 783 | nss_len--; | ||
| 784 | |||
| 785 | if (nss_len > NSS_NAME_SIZE) | ||
| 786 | return -EINVAL; | ||
| 787 | |||
| 788 | memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE); | ||
| 789 | if (nss_len > 0) { | ||
| 790 | reipl_block_nss->ipl_info.ccw.vm_flags |= | ||
| 791 | DIAG308_VM_FLAGS_NSS_VALID; | ||
| 792 | memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len); | ||
| 793 | ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len); | ||
| 794 | EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len); | ||
| 795 | } else { | ||
| 796 | reipl_block_nss->ipl_info.ccw.vm_flags &= | ||
| 797 | ~DIAG308_VM_FLAGS_NSS_VALID; | ||
| 798 | } | ||
| 799 | |||
| 800 | return len; | ||
| 801 | } | ||
| 802 | |||
| 803 | static struct kobj_attribute sys_reipl_nss_name_attr = | ||
| 804 | __ATTR(name, S_IRUGO | S_IWUSR, reipl_nss_name_show, | ||
| 805 | reipl_nss_name_store); | ||
| 806 | |||
| 807 | static struct kobj_attribute sys_reipl_nss_loadparm_attr = | ||
| 808 | __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nss_loadparm_show, | ||
| 809 | reipl_nss_loadparm_store); | ||
| 581 | 810 | ||
| 582 | static struct attribute *reipl_nss_attrs[] = { | 811 | static struct attribute *reipl_nss_attrs[] = { |
| 583 | &sys_reipl_nss_name_attr.attr, | 812 | &sys_reipl_nss_name_attr.attr, |
| 813 | &sys_reipl_nss_loadparm_attr.attr, | ||
| 814 | &sys_reipl_nss_vmparm_attr.attr, | ||
| 584 | NULL, | 815 | NULL, |
| 585 | }; | 816 | }; |
| 586 | 817 | ||
| @@ -617,7 +848,10 @@ static int reipl_set_type(enum ipl_type type) | |||
| 617 | reipl_method = REIPL_METHOD_FCP_DUMP; | 848 | reipl_method = REIPL_METHOD_FCP_DUMP; |
| 618 | break; | 849 | break; |
| 619 | case IPL_TYPE_NSS: | 850 | case IPL_TYPE_NSS: |
| 620 | reipl_method = REIPL_METHOD_NSS; | 851 | if (diag308_set_works) |
| 852 | reipl_method = REIPL_METHOD_NSS_DIAG; | ||
| 853 | else | ||
| 854 | reipl_method = REIPL_METHOD_NSS; | ||
| 621 | break; | 855 | break; |
| 622 | case IPL_TYPE_UNKNOWN: | 856 | case IPL_TYPE_UNKNOWN: |
| 623 | reipl_method = REIPL_METHOD_DEFAULT; | 857 | reipl_method = REIPL_METHOD_DEFAULT; |
| @@ -655,11 +889,38 @@ static struct kobj_attribute reipl_type_attr = | |||
| 655 | 889 | ||
| 656 | static struct kset *reipl_kset; | 890 | static struct kset *reipl_kset; |
| 657 | 891 | ||
| 892 | static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, | ||
| 893 | const enum ipl_method m) | ||
| 894 | { | ||
| 895 | char loadparm[LOADPARM_LEN + 1] = {}; | ||
| 896 | char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; | ||
| 897 | char nss_name[NSS_NAME_SIZE + 1] = {}; | ||
| 898 | size_t pos = 0; | ||
| 899 | |||
| 900 | reipl_get_ascii_loadparm(loadparm, ipb); | ||
| 901 | reipl_get_ascii_nss_name(nss_name, ipb); | ||
| 902 | reipl_get_ascii_vmparm(vmparm, ipb); | ||
| 903 | |||
| 904 | switch (m) { | ||
| 905 | case REIPL_METHOD_CCW_VM: | ||
| 906 | pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno); | ||
| 907 | break; | ||
| 908 | case REIPL_METHOD_NSS: | ||
| 909 | pos = sprintf(dst, "IPL %s", nss_name); | ||
| 910 | break; | ||
| 911 | default: | ||
| 912 | break; | ||
| 913 | } | ||
| 914 | if (strlen(loadparm) > 0) | ||
| 915 | pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm); | ||
| 916 | if (strlen(vmparm) > 0) | ||
| 917 | sprintf(dst + pos, " PARM %s", vmparm); | ||
| 918 | } | ||
| 919 | |||
| 658 | static void reipl_run(struct shutdown_trigger *trigger) | 920 | static void reipl_run(struct shutdown_trigger *trigger) |
| 659 | { | 921 | { |
| 660 | struct ccw_dev_id devid; | 922 | struct ccw_dev_id devid; |
| 661 | static char buf[100]; | 923 | static char buf[128]; |
| 662 | char loadparm[LOADPARM_LEN + 1]; | ||
| 663 | 924 | ||
| 664 | switch (reipl_method) { | 925 | switch (reipl_method) { |
| 665 | case REIPL_METHOD_CCW_CIO: | 926 | case REIPL_METHOD_CCW_CIO: |
| @@ -668,13 +929,7 @@ static void reipl_run(struct shutdown_trigger *trigger) | |||
| 668 | reipl_ccw_dev(&devid); | 929 | reipl_ccw_dev(&devid); |
| 669 | break; | 930 | break; |
| 670 | case REIPL_METHOD_CCW_VM: | 931 | case REIPL_METHOD_CCW_VM: |
| 671 | reipl_get_ascii_loadparm(loadparm); | 932 | get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM); |
| 672 | if (strlen(loadparm) == 0) | ||
| 673 | sprintf(buf, "IPL %X CLEAR", | ||
| 674 | reipl_block_ccw->ipl_info.ccw.devno); | ||
| 675 | else | ||
| 676 | sprintf(buf, "IPL %X CLEAR LOADPARM '%s'", | ||
| 677 | reipl_block_ccw->ipl_info.ccw.devno, loadparm); | ||
| 678 | __cpcmd(buf, NULL, 0, NULL); | 933 | __cpcmd(buf, NULL, 0, NULL); |
| 679 | break; | 934 | break; |
| 680 | case REIPL_METHOD_CCW_DIAG: | 935 | case REIPL_METHOD_CCW_DIAG: |
| @@ -691,8 +946,12 @@ static void reipl_run(struct shutdown_trigger *trigger) | |||
| 691 | case REIPL_METHOD_FCP_RO_VM: | 946 | case REIPL_METHOD_FCP_RO_VM: |
| 692 | __cpcmd("IPL", NULL, 0, NULL); | 947 | __cpcmd("IPL", NULL, 0, NULL); |
| 693 | break; | 948 | break; |
| 949 | case REIPL_METHOD_NSS_DIAG: | ||
| 950 | diag308(DIAG308_SET, reipl_block_nss); | ||
| 951 | diag308(DIAG308_IPL, NULL); | ||
| 952 | break; | ||
| 694 | case REIPL_METHOD_NSS: | 953 | case REIPL_METHOD_NSS: |
| 695 | sprintf(buf, "IPL %s", reipl_nss_name); | 954 | get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS); |
| 696 | __cpcmd(buf, NULL, 0, NULL); | 955 | __cpcmd(buf, NULL, 0, NULL); |
| 697 | break; | 956 | break; |
| 698 | case REIPL_METHOD_DEFAULT: | 957 | case REIPL_METHOD_DEFAULT: |
| @@ -707,16 +966,36 @@ static void reipl_run(struct shutdown_trigger *trigger) | |||
| 707 | disabled_wait((unsigned long) __builtin_return_address(0)); | 966 | disabled_wait((unsigned long) __builtin_return_address(0)); |
| 708 | } | 967 | } |
| 709 | 968 | ||
| 710 | static void __init reipl_probe(void) | 969 | static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) |
| 711 | { | 970 | { |
| 712 | void *buffer; | 971 | ipb->hdr.len = IPL_PARM_BLK_CCW_LEN; |
| 972 | ipb->hdr.version = IPL_PARM_BLOCK_VERSION; | ||
| 973 | ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; | ||
| 974 | ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW; | ||
| 975 | } | ||
| 713 | 976 | ||
| 714 | buffer = (void *) get_zeroed_page(GFP_KERNEL); | 977 | static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb) |
| 715 | if (!buffer) | 978 | { |
| 716 | return; | 979 | /* LOADPARM */ |
| 717 | if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK) | 980 | /* check if read scp info worked and set loadparm */ |
| 718 | diag308_set_works = 1; | 981 | if (sclp_ipl_info.is_valid) |
| 719 | free_page((unsigned long)buffer); | 982 | memcpy(ipb->ipl_info.ccw.load_parm, |
| 983 | &sclp_ipl_info.loadparm, LOADPARM_LEN); | ||
| 984 | else | ||
| 985 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ | ||
| 986 | memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN); | ||
| 987 | ipb->hdr.flags = DIAG308_FLAGS_LP_VALID; | ||
| 988 | |||
| 989 | /* VM PARM */ | ||
| 990 | if (MACHINE_IS_VM && diag308_set_works && | ||
| 991 | (ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) { | ||
| 992 | |||
| 993 | ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID; | ||
| 994 | ipb->ipl_info.ccw.vm_parm_len = | ||
| 995 | ipl_block.ipl_info.ccw.vm_parm_len; | ||
| 996 | memcpy(ipb->ipl_info.ccw.vm_parm, | ||
| 997 | ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE); | ||
| 998 | } | ||
| 720 | } | 999 | } |
| 721 | 1000 | ||
| 722 | static int __init reipl_nss_init(void) | 1001 | static int __init reipl_nss_init(void) |
| @@ -725,10 +1004,31 @@ static int __init reipl_nss_init(void) | |||
| 725 | 1004 | ||
| 726 | if (!MACHINE_IS_VM) | 1005 | if (!MACHINE_IS_VM) |
| 727 | return 0; | 1006 | return 0; |
| 1007 | |||
| 1008 | reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL); | ||
| 1009 | if (!reipl_block_nss) | ||
| 1010 | return -ENOMEM; | ||
| 1011 | |||
| 1012 | if (!diag308_set_works) | ||
| 1013 | sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO; | ||
| 1014 | |||
| 728 | rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group); | 1015 | rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group); |
| 729 | if (rc) | 1016 | if (rc) |
| 730 | return rc; | 1017 | return rc; |
| 731 | strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1); | 1018 | |
| 1019 | reipl_block_ccw_init(reipl_block_nss); | ||
| 1020 | if (ipl_info.type == IPL_TYPE_NSS) { | ||
| 1021 | memset(reipl_block_nss->ipl_info.ccw.nss_name, | ||
| 1022 | ' ', NSS_NAME_SIZE); | ||
| 1023 | memcpy(reipl_block_nss->ipl_info.ccw.nss_name, | ||
| 1024 | kernel_nss_name, strlen(kernel_nss_name)); | ||
| 1025 | ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE); | ||
| 1026 | reipl_block_nss->ipl_info.ccw.vm_flags |= | ||
| 1027 | DIAG308_VM_FLAGS_NSS_VALID; | ||
| 1028 | |||
| 1029 | reipl_block_ccw_fill_parms(reipl_block_nss); | ||
| 1030 | } | ||
| 1031 | |||
| 732 | reipl_capabilities |= IPL_TYPE_NSS; | 1032 | reipl_capabilities |= IPL_TYPE_NSS; |
| 733 | return 0; | 1033 | return 0; |
| 734 | } | 1034 | } |
| @@ -740,28 +1040,27 @@ static int __init reipl_ccw_init(void) | |||
| 740 | reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); | 1040 | reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); |
| 741 | if (!reipl_block_ccw) | 1041 | if (!reipl_block_ccw) |
| 742 | return -ENOMEM; | 1042 | return -ENOMEM; |
| 743 | rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group); | 1043 | |
| 744 | if (rc) { | 1044 | if (MACHINE_IS_VM) { |
| 745 | free_page((unsigned long)reipl_block_ccw); | 1045 | if (!diag308_set_works) |
| 746 | return rc; | 1046 | sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO; |
| 1047 | rc = sysfs_create_group(&reipl_kset->kobj, | ||
| 1048 | &reipl_ccw_attr_group_vm); | ||
| 1049 | } else { | ||
| 1050 | if(!diag308_set_works) | ||
| 1051 | sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; | ||
| 1052 | rc = sysfs_create_group(&reipl_kset->kobj, | ||
| 1053 | &reipl_ccw_attr_group_lpar); | ||
| 747 | } | 1054 | } |
| 748 | reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN; | 1055 | if (rc) |
| 749 | reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; | 1056 | return rc; |
| 750 | reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; | 1057 | |
| 751 | reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; | 1058 | reipl_block_ccw_init(reipl_block_ccw); |
| 752 | reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID; | 1059 | if (ipl_info.type == IPL_TYPE_CCW) { |
| 753 | /* check if read scp info worked and set loadparm */ | ||
| 754 | if (sclp_ipl_info.is_valid) | ||
| 755 | memcpy(reipl_block_ccw->ipl_info.ccw.load_param, | ||
| 756 | &sclp_ipl_info.loadparm, LOADPARM_LEN); | ||
| 757 | else | ||
| 758 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ | ||
| 759 | memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40, | ||
| 760 | LOADPARM_LEN); | ||
| 761 | if (!MACHINE_IS_VM && !diag308_set_works) | ||
| 762 | sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; | ||
| 763 | if (ipl_info.type == IPL_TYPE_CCW) | ||
| 764 | reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; | 1060 | reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; |
| 1061 | reipl_block_ccw_fill_parms(reipl_block_ccw); | ||
| 1062 | } | ||
| 1063 | |||
| 765 | reipl_capabilities |= IPL_TYPE_CCW; | 1064 | reipl_capabilities |= IPL_TYPE_CCW; |
| 766 | return 0; | 1065 | return 0; |
| 767 | } | 1066 | } |
| @@ -1298,7 +1597,6 @@ static void __init shutdown_actions_init(void) | |||
| 1298 | 1597 | ||
| 1299 | static int __init s390_ipl_init(void) | 1598 | static int __init s390_ipl_init(void) |
| 1300 | { | 1599 | { |
| 1301 | reipl_probe(); | ||
| 1302 | sclp_get_ipl_info(&sclp_ipl_info); | 1600 | sclp_get_ipl_info(&sclp_ipl_info); |
| 1303 | shutdown_actions_init(); | 1601 | shutdown_actions_init(); |
| 1304 | shutdown_triggers_init(); | 1602 | shutdown_triggers_init(); |
| @@ -1405,6 +1703,12 @@ void __init setup_ipl(void) | |||
| 1405 | atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); | 1703 | atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); |
| 1406 | } | 1704 | } |
| 1407 | 1705 | ||
| 1706 | void __init ipl_update_parameters(void) | ||
| 1707 | { | ||
| 1708 | if (diag308(DIAG308_STORE, &ipl_block) == DIAG308_RC_OK) | ||
| 1709 | diag308_set_works = 1; | ||
| 1710 | } | ||
| 1711 | |||
| 1408 | void __init ipl_save_parameters(void) | 1712 | void __init ipl_save_parameters(void) |
| 1409 | { | 1713 | { |
| 1410 | struct cio_iplinfo iplinfo; | 1714 | struct cio_iplinfo iplinfo; |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index ed04d1372d5d..288ad490a6dd 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
| @@ -41,10 +41,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
| 41 | if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) | 41 | if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) |
| 42 | return -EINVAL; | 42 | return -EINVAL; |
| 43 | 43 | ||
| 44 | if ((unsigned long)p->addr & 0x01) { | 44 | if ((unsigned long)p->addr & 0x01) |
| 45 | printk("Attempt to register kprobe at an unaligned address\n"); | ||
| 46 | return -EINVAL; | 45 | return -EINVAL; |
| 47 | } | ||
| 48 | 46 | ||
| 49 | /* Use the get_insn_slot() facility for correctness */ | 47 | /* Use the get_insn_slot() facility for correctness */ |
| 50 | if (!(p->ainsn.insn = get_insn_slot())) | 48 | if (!(p->ainsn.insn = get_insn_slot())) |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 3c77dd36994c..131d7ee8b416 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
| @@ -52,7 +52,6 @@ void machine_kexec_cleanup(struct kimage *image) | |||
| 52 | 52 | ||
| 53 | void machine_shutdown(void) | 53 | void machine_shutdown(void) |
| 54 | { | 54 | { |
| 55 | printk(KERN_INFO "kexec: machine_shutdown called\n"); | ||
| 56 | } | 55 | } |
| 57 | 56 | ||
| 58 | void machine_kexec(struct kimage *image) | 57 | void machine_kexec(struct kimage *image) |
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c new file mode 100644 index 000000000000..18ed7abe16c5 --- /dev/null +++ b/arch/s390/kernel/mem_detect.c | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | /* | ||
| 2 | * Copyright IBM Corp. 2008 | ||
| 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/kernel.h> | ||
| 7 | #include <linux/module.h> | ||
| 8 | #include <asm/ipl.h> | ||
| 9 | #include <asm/sclp.h> | ||
| 10 | #include <asm/setup.h> | ||
| 11 | |||
| 12 | static int memory_fast_detect(struct mem_chunk *chunk) | ||
| 13 | { | ||
| 14 | unsigned long val0 = 0; | ||
| 15 | unsigned long val1 = 0xc; | ||
| 16 | int rc = -EOPNOTSUPP; | ||
| 17 | |||
| 18 | if (ipl_flags & IPL_NSS_VALID) | ||
| 19 | return -EOPNOTSUPP; | ||
| 20 | asm volatile( | ||
| 21 | " diag %1,%2,0x260\n" | ||
| 22 | "0: lhi %0,0\n" | ||
| 23 | "1:\n" | ||
| 24 | EX_TABLE(0b,1b) | ||
| 25 | : "+d" (rc), "+d" (val0), "+d" (val1) : : "cc"); | ||
| 26 | |||
| 27 | if (rc || val0 != val1) | ||
| 28 | return -EOPNOTSUPP; | ||
| 29 | chunk->size = val0 + 1; | ||
| 30 | return 0; | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline int tprot(unsigned long addr) | ||
| 34 | { | ||
| 35 | int rc = -EFAULT; | ||
| 36 | |||
| 37 | asm volatile( | ||
| 38 | " tprot 0(%1),0\n" | ||
| 39 | "0: ipm %0\n" | ||
| 40 | " srl %0,28\n" | ||
| 41 | "1:\n" | ||
| 42 | EX_TABLE(0b,1b) | ||
| 43 | : "+d" (rc) : "a" (addr) : "cc"); | ||
| 44 | return rc; | ||
| 45 | } | ||
| 46 | |||
| 47 | #define ADDR2G (1ULL << 31) | ||
| 48 | |||
| 49 | static void find_memory_chunks(struct mem_chunk chunk[]) | ||
| 50 | { | ||
| 51 | unsigned long long memsize, rnmax, rzm; | ||
| 52 | unsigned long addr = 0, size; | ||
| 53 | int i = 0, type; | ||
| 54 | |||
| 55 | rzm = sclp_get_rzm(); | ||
| 56 | rnmax = sclp_get_rnmax(); | ||
| 57 | memsize = rzm * rnmax; | ||
| 58 | if (!rzm) | ||
| 59 | rzm = 1ULL << 17; | ||
| 60 | if (sizeof(long) == 4) { | ||
| 61 | rzm = min(ADDR2G, rzm); | ||
| 62 | memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; | ||
| 63 | } | ||
| 64 | do { | ||
| 65 | size = 0; | ||
| 66 | type = tprot(addr); | ||
| 67 | do { | ||
| 68 | size += rzm; | ||
| 69 | if (memsize && addr + size >= memsize) | ||
| 70 | break; | ||
| 71 | } while (type == tprot(addr + size)); | ||
| 72 | if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { | ||
| 73 | chunk[i].addr = addr; | ||
| 74 | chunk[i].size = size; | ||
| 75 | chunk[i].type = type; | ||
| 76 | i++; | ||
| 77 | } | ||
| 78 | addr += size; | ||
| 79 | } while (addr < memsize && i < MEMORY_CHUNKS); | ||
| 80 | } | ||
| 81 | |||
| 82 | void detect_memory_layout(struct mem_chunk chunk[]) | ||
| 83 | { | ||
| 84 | unsigned long flags, cr0; | ||
| 85 | |||
| 86 | memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); | ||
| 87 | if (memory_fast_detect(&chunk[0]) == 0) | ||
| 88 | return; | ||
| 89 | /* Disable IRQs, DAT and low address protection so tprot does the | ||
| 90 | * right thing and we don't get scheduled away with low address | ||
| 91 | * protection disabled. | ||
| 92 | */ | ||
| 93 | flags = __raw_local_irq_stnsm(0xf8); | ||
| 94 | __ctl_store(cr0, 0, 0); | ||
| 95 | __ctl_clear_bit(0, 28); | ||
| 96 | find_memory_chunks(chunk); | ||
| 97 | __ctl_load(cr0, 0, 0); | ||
| 98 | __raw_local_irq_ssm(flags); | ||
| 99 | } | ||
| 100 | EXPORT_SYMBOL(detect_memory_layout); | ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 7920861109d2..85defd01d293 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
| @@ -75,46 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
| 75 | return sf->gprs[8]; | 75 | return sf->gprs[8]; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | /* | ||
| 79 | * Need to know about CPUs going idle? | ||
| 80 | */ | ||
| 81 | static ATOMIC_NOTIFIER_HEAD(idle_chain); | ||
| 82 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | 78 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); |
| 83 | 79 | ||
| 84 | int register_idle_notifier(struct notifier_block *nb) | ||
| 85 | { | ||
| 86 | return atomic_notifier_chain_register(&idle_chain, nb); | ||
| 87 | } | ||
| 88 | EXPORT_SYMBOL(register_idle_notifier); | ||
| 89 | |||
| 90 | int unregister_idle_notifier(struct notifier_block *nb) | ||
| 91 | { | ||
| 92 | return atomic_notifier_chain_unregister(&idle_chain, nb); | ||
| 93 | } | ||
| 94 | EXPORT_SYMBOL(unregister_idle_notifier); | ||
| 95 | |||
| 96 | static int s390_idle_enter(void) | 80 | static int s390_idle_enter(void) |
| 97 | { | 81 | { |
| 98 | struct s390_idle_data *idle; | 82 | struct s390_idle_data *idle; |
| 99 | int nr_calls = 0; | ||
| 100 | void *hcpu; | ||
| 101 | int rc; | ||
| 102 | 83 | ||
| 103 | hcpu = (void *)(long)smp_processor_id(); | ||
| 104 | rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1, | ||
| 105 | &nr_calls); | ||
| 106 | if (rc == NOTIFY_BAD) { | ||
| 107 | nr_calls--; | ||
| 108 | __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
| 109 | hcpu, nr_calls, NULL); | ||
| 110 | return rc; | ||
| 111 | } | ||
| 112 | idle = &__get_cpu_var(s390_idle); | 84 | idle = &__get_cpu_var(s390_idle); |
| 113 | spin_lock(&idle->lock); | 85 | spin_lock(&idle->lock); |
| 114 | idle->idle_count++; | 86 | idle->idle_count++; |
| 115 | idle->in_idle = 1; | 87 | idle->in_idle = 1; |
| 116 | idle->idle_enter = get_clock(); | 88 | idle->idle_enter = get_clock(); |
| 117 | spin_unlock(&idle->lock); | 89 | spin_unlock(&idle->lock); |
| 90 | vtime_stop_cpu_timer(); | ||
| 118 | return NOTIFY_OK; | 91 | return NOTIFY_OK; |
| 119 | } | 92 | } |
| 120 | 93 | ||
| @@ -122,13 +95,12 @@ void s390_idle_leave(void) | |||
| 122 | { | 95 | { |
| 123 | struct s390_idle_data *idle; | 96 | struct s390_idle_data *idle; |
| 124 | 97 | ||
| 98 | vtime_start_cpu_timer(); | ||
| 125 | idle = &__get_cpu_var(s390_idle); | 99 | idle = &__get_cpu_var(s390_idle); |
| 126 | spin_lock(&idle->lock); | 100 | spin_lock(&idle->lock); |
| 127 | idle->idle_time += get_clock() - idle->idle_enter; | 101 | idle->idle_time += get_clock() - idle->idle_enter; |
| 128 | idle->in_idle = 0; | 102 | idle->in_idle = 0; |
| 129 | spin_unlock(&idle->lock); | 103 | spin_unlock(&idle->lock); |
| 130 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
| 131 | (void *)(long) smp_processor_id()); | ||
| 132 | } | 104 | } |
| 133 | 105 | ||
| 134 | extern void s390_handle_mcck(void); | 106 | extern void s390_handle_mcck(void); |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 35827b9bd4d1..2815bfe348a6 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | #include <linux/security.h> | 33 | #include <linux/security.h> |
| 34 | #include <linux/audit.h> | 34 | #include <linux/audit.h> |
| 35 | #include <linux/signal.h> | 35 | #include <linux/signal.h> |
| 36 | #include <linux/elf.h> | ||
| 37 | #include <linux/regset.h> | ||
| 36 | 38 | ||
| 37 | #include <asm/segment.h> | 39 | #include <asm/segment.h> |
| 38 | #include <asm/page.h> | 40 | #include <asm/page.h> |
| @@ -47,6 +49,11 @@ | |||
| 47 | #include "compat_ptrace.h" | 49 | #include "compat_ptrace.h" |
| 48 | #endif | 50 | #endif |
| 49 | 51 | ||
| 52 | enum s390_regset { | ||
| 53 | REGSET_GENERAL, | ||
| 54 | REGSET_FP, | ||
| 55 | }; | ||
| 56 | |||
| 50 | static void | 57 | static void |
| 51 | FixPerRegisters(struct task_struct *task) | 58 | FixPerRegisters(struct task_struct *task) |
| 52 | { | 59 | { |
| @@ -126,24 +133,10 @@ ptrace_disable(struct task_struct *child) | |||
| 126 | * struct user contain pad bytes that should be read as zeroes. | 133 | * struct user contain pad bytes that should be read as zeroes. |
| 127 | * Lovely... | 134 | * Lovely... |
| 128 | */ | 135 | */ |
| 129 | static int | 136 | static unsigned long __peek_user(struct task_struct *child, addr_t addr) |
| 130 | peek_user(struct task_struct *child, addr_t addr, addr_t data) | ||
| 131 | { | 137 | { |
| 132 | struct user *dummy = NULL; | 138 | struct user *dummy = NULL; |
| 133 | addr_t offset, tmp, mask; | 139 | addr_t offset, tmp; |
| 134 | |||
| 135 | /* | ||
| 136 | * Stupid gdb peeks/pokes the access registers in 64 bit with | ||
| 137 | * an alignment of 4. Programmers from hell... | ||
| 138 | */ | ||
| 139 | mask = __ADDR_MASK; | ||
| 140 | #ifdef CONFIG_64BIT | ||
| 141 | if (addr >= (addr_t) &dummy->regs.acrs && | ||
| 142 | addr < (addr_t) &dummy->regs.orig_gpr2) | ||
| 143 | mask = 3; | ||
| 144 | #endif | ||
| 145 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | ||
| 146 | return -EIO; | ||
| 147 | 140 | ||
| 148 | if (addr < (addr_t) &dummy->regs.acrs) { | 141 | if (addr < (addr_t) &dummy->regs.acrs) { |
| 149 | /* | 142 | /* |
| @@ -197,24 +190,18 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) | |||
| 197 | } else | 190 | } else |
| 198 | tmp = 0; | 191 | tmp = 0; |
| 199 | 192 | ||
| 200 | return put_user(tmp, (addr_t __user *) data); | 193 | return tmp; |
| 201 | } | 194 | } |
| 202 | 195 | ||
| 203 | /* | ||
| 204 | * Write a word to the user area of a process at location addr. This | ||
| 205 | * operation does have an additional problem compared to peek_user. | ||
| 206 | * Stores to the program status word and on the floating point | ||
| 207 | * control register needs to get checked for validity. | ||
| 208 | */ | ||
| 209 | static int | 196 | static int |
| 210 | poke_user(struct task_struct *child, addr_t addr, addr_t data) | 197 | peek_user(struct task_struct *child, addr_t addr, addr_t data) |
| 211 | { | 198 | { |
| 212 | struct user *dummy = NULL; | 199 | struct user *dummy = NULL; |
| 213 | addr_t offset, mask; | 200 | addr_t tmp, mask; |
| 214 | 201 | ||
| 215 | /* | 202 | /* |
| 216 | * Stupid gdb peeks/pokes the access registers in 64 bit with | 203 | * Stupid gdb peeks/pokes the access registers in 64 bit with |
| 217 | * an alignment of 4. Programmers from hell indeed... | 204 | * an alignment of 4. Programmers from hell... |
| 218 | */ | 205 | */ |
| 219 | mask = __ADDR_MASK; | 206 | mask = __ADDR_MASK; |
| 220 | #ifdef CONFIG_64BIT | 207 | #ifdef CONFIG_64BIT |
| @@ -225,6 +212,21 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
| 225 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 212 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
| 226 | return -EIO; | 213 | return -EIO; |
| 227 | 214 | ||
| 215 | tmp = __peek_user(child, addr); | ||
| 216 | return put_user(tmp, (addr_t __user *) data); | ||
| 217 | } | ||
| 218 | |||
| 219 | /* | ||
| 220 | * Write a word to the user area of a process at location addr. This | ||
| 221 | * operation does have an additional problem compared to peek_user. | ||
| 222 | * Stores to the program status word and on the floating point | ||
| 223 | * control register needs to get checked for validity. | ||
| 224 | */ | ||
| 225 | static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | ||
| 226 | { | ||
| 227 | struct user *dummy = NULL; | ||
| 228 | addr_t offset; | ||
| 229 | |||
| 228 | if (addr < (addr_t) &dummy->regs.acrs) { | 230 | if (addr < (addr_t) &dummy->regs.acrs) { |
| 229 | /* | 231 | /* |
| 230 | * psw and gprs are stored on the stack | 232 | * psw and gprs are stored on the stack |
| @@ -292,6 +294,28 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
| 292 | return 0; | 294 | return 0; |
| 293 | } | 295 | } |
| 294 | 296 | ||
| 297 | static int | ||
| 298 | poke_user(struct task_struct *child, addr_t addr, addr_t data) | ||
| 299 | { | ||
| 300 | struct user *dummy = NULL; | ||
| 301 | addr_t mask; | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Stupid gdb peeks/pokes the access registers in 64 bit with | ||
| 305 | * an alignment of 4. Programmers from hell indeed... | ||
| 306 | */ | ||
| 307 | mask = __ADDR_MASK; | ||
| 308 | #ifdef CONFIG_64BIT | ||
| 309 | if (addr >= (addr_t) &dummy->regs.acrs && | ||
| 310 | addr < (addr_t) &dummy->regs.orig_gpr2) | ||
| 311 | mask = 3; | ||
| 312 | #endif | ||
| 313 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | ||
| 314 | return -EIO; | ||
| 315 | |||
| 316 | return __poke_user(child, addr, data); | ||
| 317 | } | ||
| 318 | |||
| 295 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | 319 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) |
| 296 | { | 320 | { |
| 297 | ptrace_area parea; | 321 | ptrace_area parea; |
| @@ -367,18 +391,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
| 367 | /* | 391 | /* |
| 368 | * Same as peek_user but for a 31 bit program. | 392 | * Same as peek_user but for a 31 bit program. |
| 369 | */ | 393 | */ |
| 370 | static int | 394 | static u32 __peek_user_compat(struct task_struct *child, addr_t addr) |
| 371 | peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | ||
| 372 | { | 395 | { |
| 373 | struct user32 *dummy32 = NULL; | 396 | struct user32 *dummy32 = NULL; |
| 374 | per_struct32 *dummy_per32 = NULL; | 397 | per_struct32 *dummy_per32 = NULL; |
| 375 | addr_t offset; | 398 | addr_t offset; |
| 376 | __u32 tmp; | 399 | __u32 tmp; |
| 377 | 400 | ||
| 378 | if (!test_thread_flag(TIF_31BIT) || | ||
| 379 | (addr & 3) || addr > sizeof(struct user) - 3) | ||
| 380 | return -EIO; | ||
| 381 | |||
| 382 | if (addr < (addr_t) &dummy32->regs.acrs) { | 401 | if (addr < (addr_t) &dummy32->regs.acrs) { |
| 383 | /* | 402 | /* |
| 384 | * psw and gprs are stored on the stack | 403 | * psw and gprs are stored on the stack |
| @@ -435,25 +454,32 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | |||
| 435 | } else | 454 | } else |
| 436 | tmp = 0; | 455 | tmp = 0; |
| 437 | 456 | ||
| 457 | return tmp; | ||
| 458 | } | ||
| 459 | |||
| 460 | static int peek_user_compat(struct task_struct *child, | ||
| 461 | addr_t addr, addr_t data) | ||
| 462 | { | ||
| 463 | __u32 tmp; | ||
| 464 | |||
| 465 | if (!test_thread_flag(TIF_31BIT) || | ||
| 466 | (addr & 3) || addr > sizeof(struct user) - 3) | ||
| 467 | return -EIO; | ||
| 468 | |||
| 469 | tmp = __peek_user_compat(child, addr); | ||
| 438 | return put_user(tmp, (__u32 __user *) data); | 470 | return put_user(tmp, (__u32 __user *) data); |
| 439 | } | 471 | } |
| 440 | 472 | ||
| 441 | /* | 473 | /* |
| 442 | * Same as poke_user but for a 31 bit program. | 474 | * Same as poke_user but for a 31 bit program. |
| 443 | */ | 475 | */ |
| 444 | static int | 476 | static int __poke_user_compat(struct task_struct *child, |
| 445 | poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | 477 | addr_t addr, addr_t data) |
| 446 | { | 478 | { |
| 447 | struct user32 *dummy32 = NULL; | 479 | struct user32 *dummy32 = NULL; |
| 448 | per_struct32 *dummy_per32 = NULL; | 480 | per_struct32 *dummy_per32 = NULL; |
| 481 | __u32 tmp = (__u32) data; | ||
| 449 | addr_t offset; | 482 | addr_t offset; |
| 450 | __u32 tmp; | ||
| 451 | |||
| 452 | if (!test_thread_flag(TIF_31BIT) || | ||
| 453 | (addr & 3) || addr > sizeof(struct user32) - 3) | ||
| 454 | return -EIO; | ||
| 455 | |||
| 456 | tmp = (__u32) data; | ||
| 457 | 483 | ||
| 458 | if (addr < (addr_t) &dummy32->regs.acrs) { | 484 | if (addr < (addr_t) &dummy32->regs.acrs) { |
| 459 | /* | 485 | /* |
| @@ -528,6 +554,16 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | |||
| 528 | return 0; | 554 | return 0; |
| 529 | } | 555 | } |
| 530 | 556 | ||
| 557 | static int poke_user_compat(struct task_struct *child, | ||
| 558 | addr_t addr, addr_t data) | ||
| 559 | { | ||
| 560 | if (!test_thread_flag(TIF_31BIT) || | ||
| 561 | (addr & 3) || addr > sizeof(struct user32) - 3) | ||
| 562 | return -EIO; | ||
| 563 | |||
| 564 | return __poke_user_compat(child, addr, data); | ||
| 565 | } | ||
| 566 | |||
| 531 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | 567 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
| 532 | compat_ulong_t caddr, compat_ulong_t cdata) | 568 | compat_ulong_t caddr, compat_ulong_t cdata) |
| 533 | { | 569 | { |
| @@ -539,11 +575,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 539 | switch (request) { | 575 | switch (request) { |
| 540 | case PTRACE_PEEKUSR: | 576 | case PTRACE_PEEKUSR: |
| 541 | /* read the word at location addr in the USER area. */ | 577 | /* read the word at location addr in the USER area. */ |
| 542 | return peek_user_emu31(child, addr, data); | 578 | return peek_user_compat(child, addr, data); |
| 543 | 579 | ||
| 544 | case PTRACE_POKEUSR: | 580 | case PTRACE_POKEUSR: |
| 545 | /* write the word at location addr in the USER area */ | 581 | /* write the word at location addr in the USER area */ |
| 546 | return poke_user_emu31(child, addr, data); | 582 | return poke_user_compat(child, addr, data); |
| 547 | 583 | ||
| 548 | case PTRACE_PEEKUSR_AREA: | 584 | case PTRACE_PEEKUSR_AREA: |
| 549 | case PTRACE_POKEUSR_AREA: | 585 | case PTRACE_POKEUSR_AREA: |
| @@ -555,13 +591,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 555 | copied = 0; | 591 | copied = 0; |
| 556 | while (copied < parea.len) { | 592 | while (copied < parea.len) { |
| 557 | if (request == PTRACE_PEEKUSR_AREA) | 593 | if (request == PTRACE_PEEKUSR_AREA) |
| 558 | ret = peek_user_emu31(child, addr, data); | 594 | ret = peek_user_compat(child, addr, data); |
| 559 | else { | 595 | else { |
| 560 | __u32 utmp; | 596 | __u32 utmp; |
| 561 | if (get_user(utmp, | 597 | if (get_user(utmp, |
| 562 | (__u32 __force __user *) data)) | 598 | (__u32 __force __user *) data)) |
| 563 | return -EFAULT; | 599 | return -EFAULT; |
| 564 | ret = poke_user_emu31(child, addr, utmp); | 600 | ret = poke_user_compat(child, addr, utmp); |
| 565 | } | 601 | } |
| 566 | if (ret) | 602 | if (ret) |
| 567 | return ret; | 603 | return ret; |
| @@ -610,3 +646,240 @@ syscall_trace(struct pt_regs *regs, int entryexit) | |||
| 610 | regs->gprs[2], regs->orig_gpr2, regs->gprs[3], | 646 | regs->gprs[2], regs->orig_gpr2, regs->gprs[3], |
| 611 | regs->gprs[4], regs->gprs[5]); | 647 | regs->gprs[4], regs->gprs[5]); |
| 612 | } | 648 | } |
| 649 | |||
| 650 | /* | ||
| 651 | * user_regset definitions. | ||
| 652 | */ | ||
| 653 | |||
| 654 | static int s390_regs_get(struct task_struct *target, | ||
| 655 | const struct user_regset *regset, | ||
| 656 | unsigned int pos, unsigned int count, | ||
| 657 | void *kbuf, void __user *ubuf) | ||
| 658 | { | ||
| 659 | if (target == current) | ||
| 660 | save_access_regs(target->thread.acrs); | ||
| 661 | |||
| 662 | if (kbuf) { | ||
| 663 | unsigned long *k = kbuf; | ||
| 664 | while (count > 0) { | ||
| 665 | *k++ = __peek_user(target, pos); | ||
| 666 | count -= sizeof(*k); | ||
| 667 | pos += sizeof(*k); | ||
| 668 | } | ||
| 669 | } else { | ||
| 670 | unsigned long __user *u = ubuf; | ||
| 671 | while (count > 0) { | ||
| 672 | if (__put_user(__peek_user(target, pos), u++)) | ||
| 673 | return -EFAULT; | ||
| 674 | count -= sizeof(*u); | ||
| 675 | pos += sizeof(*u); | ||
| 676 | } | ||
| 677 | } | ||
| 678 | return 0; | ||
| 679 | } | ||
| 680 | |||
| 681 | static int s390_regs_set(struct task_struct *target, | ||
| 682 | const struct user_regset *regset, | ||
| 683 | unsigned int pos, unsigned int count, | ||
| 684 | const void *kbuf, const void __user *ubuf) | ||
| 685 | { | ||
| 686 | int rc = 0; | ||
| 687 | |||
| 688 | if (target == current) | ||
| 689 | save_access_regs(target->thread.acrs); | ||
| 690 | |||
| 691 | if (kbuf) { | ||
| 692 | const unsigned long *k = kbuf; | ||
| 693 | while (count > 0 && !rc) { | ||
| 694 | rc = __poke_user(target, pos, *k++); | ||
| 695 | count -= sizeof(*k); | ||
| 696 | pos += sizeof(*k); | ||
| 697 | } | ||
| 698 | } else { | ||
| 699 | const unsigned long __user *u = ubuf; | ||
| 700 | while (count > 0 && !rc) { | ||
| 701 | unsigned long word; | ||
| 702 | rc = __get_user(word, u++); | ||
| 703 | if (rc) | ||
| 704 | break; | ||
| 705 | rc = __poke_user(target, pos, word); | ||
| 706 | count -= sizeof(*u); | ||
| 707 | pos += sizeof(*u); | ||
| 708 | } | ||
| 709 | } | ||
| 710 | |||
| 711 | if (rc == 0 && target == current) | ||
| 712 | restore_access_regs(target->thread.acrs); | ||
| 713 | |||
| 714 | return rc; | ||
| 715 | } | ||
| 716 | |||
| 717 | static int s390_fpregs_get(struct task_struct *target, | ||
| 718 | const struct user_regset *regset, unsigned int pos, | ||
| 719 | unsigned int count, void *kbuf, void __user *ubuf) | ||
| 720 | { | ||
| 721 | if (target == current) | ||
| 722 | save_fp_regs(&target->thread.fp_regs); | ||
| 723 | |||
| 724 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
| 725 | &target->thread.fp_regs, 0, -1); | ||
| 726 | } | ||
| 727 | |||
| 728 | static int s390_fpregs_set(struct task_struct *target, | ||
| 729 | const struct user_regset *regset, unsigned int pos, | ||
| 730 | unsigned int count, const void *kbuf, | ||
| 731 | const void __user *ubuf) | ||
| 732 | { | ||
| 733 | int rc = 0; | ||
| 734 | |||
| 735 | if (target == current) | ||
| 736 | save_fp_regs(&target->thread.fp_regs); | ||
| 737 | |||
| 738 | /* If setting FPC, must validate it first. */ | ||
| 739 | if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { | ||
| 740 | u32 fpc[2] = { target->thread.fp_regs.fpc, 0 }; | ||
| 741 | rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc, | ||
| 742 | 0, offsetof(s390_fp_regs, fprs)); | ||
| 743 | if (rc) | ||
| 744 | return rc; | ||
| 745 | if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0) | ||
| 746 | return -EINVAL; | ||
| 747 | target->thread.fp_regs.fpc = fpc[0]; | ||
| 748 | } | ||
| 749 | |||
| 750 | if (rc == 0 && count > 0) | ||
| 751 | rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
| 752 | target->thread.fp_regs.fprs, | ||
| 753 | offsetof(s390_fp_regs, fprs), -1); | ||
| 754 | |||
| 755 | if (rc == 0 && target == current) | ||
| 756 | restore_fp_regs(&target->thread.fp_regs); | ||
| 757 | |||
| 758 | return rc; | ||
| 759 | } | ||
| 760 | |||
| 761 | static const struct user_regset s390_regsets[] = { | ||
| 762 | [REGSET_GENERAL] = { | ||
| 763 | .core_note_type = NT_PRSTATUS, | ||
| 764 | .n = sizeof(s390_regs) / sizeof(long), | ||
| 765 | .size = sizeof(long), | ||
| 766 | .align = sizeof(long), | ||
| 767 | .get = s390_regs_get, | ||
| 768 | .set = s390_regs_set, | ||
| 769 | }, | ||
| 770 | [REGSET_FP] = { | ||
| 771 | .core_note_type = NT_PRFPREG, | ||
| 772 | .n = sizeof(s390_fp_regs) / sizeof(long), | ||
| 773 | .size = sizeof(long), | ||
| 774 | .align = sizeof(long), | ||
| 775 | .get = s390_fpregs_get, | ||
| 776 | .set = s390_fpregs_set, | ||
| 777 | }, | ||
| 778 | }; | ||
| 779 | |||
| 780 | static const struct user_regset_view user_s390_view = { | ||
| 781 | .name = UTS_MACHINE, | ||
| 782 | .e_machine = EM_S390, | ||
| 783 | .regsets = s390_regsets, | ||
| 784 | .n = ARRAY_SIZE(s390_regsets) | ||
| 785 | }; | ||
| 786 | |||
| 787 | #ifdef CONFIG_COMPAT | ||
| 788 | static int s390_compat_regs_get(struct task_struct *target, | ||
| 789 | const struct user_regset *regset, | ||
| 790 | unsigned int pos, unsigned int count, | ||
| 791 | void *kbuf, void __user *ubuf) | ||
| 792 | { | ||
| 793 | if (target == current) | ||
| 794 | save_access_regs(target->thread.acrs); | ||
| 795 | |||
| 796 | if (kbuf) { | ||
| 797 | compat_ulong_t *k = kbuf; | ||
| 798 | while (count > 0) { | ||
| 799 | *k++ = __peek_user_compat(target, pos); | ||
| 800 | count -= sizeof(*k); | ||
| 801 | pos += sizeof(*k); | ||
| 802 | } | ||
| 803 | } else { | ||
| 804 | compat_ulong_t __user *u = ubuf; | ||
| 805 | while (count > 0) { | ||
| 806 | if (__put_user(__peek_user_compat(target, pos), u++)) | ||
| 807 | return -EFAULT; | ||
| 808 | count -= sizeof(*u); | ||
| 809 | pos += sizeof(*u); | ||
| 810 | } | ||
| 811 | } | ||
| 812 | return 0; | ||
| 813 | } | ||
| 814 | |||
| 815 | static int s390_compat_regs_set(struct task_struct *target, | ||
| 816 | const struct user_regset *regset, | ||
| 817 | unsigned int pos, unsigned int count, | ||
| 818 | const void *kbuf, const void __user *ubuf) | ||
| 819 | { | ||
| 820 | int rc = 0; | ||
| 821 | |||
| 822 | if (target == current) | ||
| 823 | save_access_regs(target->thread.acrs); | ||
| 824 | |||
| 825 | if (kbuf) { | ||
| 826 | const compat_ulong_t *k = kbuf; | ||
| 827 | while (count > 0 && !rc) { | ||
| 828 | rc = __poke_user_compat(target, pos, *k++); | ||
| 829 | count -= sizeof(*k); | ||
| 830 | pos += sizeof(*k); | ||
| 831 | } | ||
| 832 | } else { | ||
| 833 | const compat_ulong_t __user *u = ubuf; | ||
| 834 | while (count > 0 && !rc) { | ||
| 835 | compat_ulong_t word; | ||
| 836 | rc = __get_user(word, u++); | ||
| 837 | if (rc) | ||
| 838 | break; | ||
| 839 | rc = __poke_user_compat(target, pos, word); | ||
| 840 | count -= sizeof(*u); | ||
| 841 | pos += sizeof(*u); | ||
| 842 | } | ||
| 843 | } | ||
| 844 | |||
| 845 | if (rc == 0 && target == current) | ||
| 846 | restore_access_regs(target->thread.acrs); | ||
| 847 | |||
| 848 | return rc; | ||
| 849 | } | ||
| 850 | |||
| 851 | static const struct user_regset s390_compat_regsets[] = { | ||
| 852 | [REGSET_GENERAL] = { | ||
| 853 | .core_note_type = NT_PRSTATUS, | ||
| 854 | .n = sizeof(s390_compat_regs) / sizeof(compat_long_t), | ||
| 855 | .size = sizeof(compat_long_t), | ||
| 856 | .align = sizeof(compat_long_t), | ||
| 857 | .get = s390_compat_regs_get, | ||
| 858 | .set = s390_compat_regs_set, | ||
| 859 | }, | ||
| 860 | [REGSET_FP] = { | ||
| 861 | .core_note_type = NT_PRFPREG, | ||
| 862 | .n = sizeof(s390_fp_regs) / sizeof(compat_long_t), | ||
| 863 | .size = sizeof(compat_long_t), | ||
| 864 | .align = sizeof(compat_long_t), | ||
| 865 | .get = s390_fpregs_get, | ||
| 866 | .set = s390_fpregs_set, | ||
| 867 | }, | ||
| 868 | }; | ||
| 869 | |||
| 870 | static const struct user_regset_view user_s390_compat_view = { | ||
| 871 | .name = "s390", | ||
| 872 | .e_machine = EM_S390, | ||
| 873 | .regsets = s390_compat_regsets, | ||
| 874 | .n = ARRAY_SIZE(s390_compat_regsets) | ||
| 875 | }; | ||
| 876 | #endif | ||
| 877 | |||
| 878 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | ||
| 879 | { | ||
| 880 | #ifdef CONFIG_COMPAT | ||
| 881 | if (test_tsk_thread_flag(task, TIF_31BIT)) | ||
| 882 | return &user_s390_compat_view; | ||
| 883 | #endif | ||
| 884 | return &user_s390_view; | ||
| 885 | } | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 2bc70b6e876a..b358e18273b0 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -77,7 +77,7 @@ unsigned long machine_flags; | |||
| 77 | unsigned long elf_hwcap = 0; | 77 | unsigned long elf_hwcap = 0; |
| 78 | char elf_platform[ELF_PLATFORM_SIZE]; | 78 | char elf_platform[ELF_PLATFORM_SIZE]; |
| 79 | 79 | ||
| 80 | struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS]; | 80 | struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; |
| 81 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ | 81 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ |
| 82 | static unsigned long __initdata memory_end; | 82 | static unsigned long __initdata memory_end; |
| 83 | 83 | ||
| @@ -205,12 +205,6 @@ static void __init conmode_default(void) | |||
| 205 | SET_CONSOLE_SCLP; | 205 | SET_CONSOLE_SCLP; |
| 206 | #endif | 206 | #endif |
| 207 | } | 207 | } |
| 208 | } else if (MACHINE_IS_P390) { | ||
| 209 | #if defined(CONFIG_TN3215_CONSOLE) | ||
| 210 | SET_CONSOLE_3215; | ||
| 211 | #elif defined(CONFIG_TN3270_CONSOLE) | ||
| 212 | SET_CONSOLE_3270; | ||
| 213 | #endif | ||
| 214 | } else { | 208 | } else { |
| 215 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) | 209 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
| 216 | SET_CONSOLE_SCLP; | 210 | SET_CONSOLE_SCLP; |
| @@ -221,18 +215,17 @@ static void __init conmode_default(void) | |||
| 221 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) | 215 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) |
| 222 | static void __init setup_zfcpdump(unsigned int console_devno) | 216 | static void __init setup_zfcpdump(unsigned int console_devno) |
| 223 | { | 217 | { |
| 224 | static char str[64]; | 218 | static char str[41]; |
| 225 | 219 | ||
| 226 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 220 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
| 227 | return; | 221 | return; |
| 228 | if (console_devno != -1) | 222 | if (console_devno != -1) |
| 229 | sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", | 223 | sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", |
| 230 | ipl_info.data.fcp.dev_id.devno, console_devno); | 224 | ipl_info.data.fcp.dev_id.devno, console_devno); |
| 231 | else | 225 | else |
| 232 | sprintf(str, "cio_ignore=all,!0.0.%04x", | 226 | sprintf(str, " cio_ignore=all,!0.0.%04x", |
| 233 | ipl_info.data.fcp.dev_id.devno); | 227 | ipl_info.data.fcp.dev_id.devno); |
| 234 | strcat(COMMAND_LINE, " "); | 228 | strcat(boot_command_line, str); |
| 235 | strcat(COMMAND_LINE, str); | ||
| 236 | console_loglevel = 2; | 229 | console_loglevel = 2; |
| 237 | } | 230 | } |
| 238 | #else | 231 | #else |
| @@ -289,32 +282,6 @@ static int __init early_parse_mem(char *p) | |||
| 289 | } | 282 | } |
| 290 | early_param("mem", early_parse_mem); | 283 | early_param("mem", early_parse_mem); |
| 291 | 284 | ||
| 292 | /* | ||
| 293 | * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes | ||
| 294 | */ | ||
| 295 | static int __init early_parse_ipldelay(char *p) | ||
| 296 | { | ||
| 297 | unsigned long delay = 0; | ||
| 298 | |||
| 299 | delay = simple_strtoul(p, &p, 0); | ||
| 300 | |||
| 301 | switch (*p) { | ||
| 302 | case 's': | ||
| 303 | case 'S': | ||
| 304 | delay *= 1000000; | ||
| 305 | break; | ||
| 306 | case 'm': | ||
| 307 | case 'M': | ||
| 308 | delay *= 60 * 1000000; | ||
| 309 | } | ||
| 310 | |||
| 311 | /* now wait for the requested amount of time */ | ||
| 312 | udelay(delay); | ||
| 313 | |||
| 314 | return 0; | ||
| 315 | } | ||
| 316 | early_param("ipldelay", early_parse_ipldelay); | ||
| 317 | |||
| 318 | #ifdef CONFIG_S390_SWITCH_AMODE | 285 | #ifdef CONFIG_S390_SWITCH_AMODE |
| 319 | #ifdef CONFIG_PGSTE | 286 | #ifdef CONFIG_PGSTE |
| 320 | unsigned int switch_amode = 1; | 287 | unsigned int switch_amode = 1; |
| @@ -804,11 +771,9 @@ setup_arch(char **cmdline_p) | |||
| 804 | printk("We are running native (64 bit mode)\n"); | 771 | printk("We are running native (64 bit mode)\n"); |
| 805 | #endif /* CONFIG_64BIT */ | 772 | #endif /* CONFIG_64BIT */ |
| 806 | 773 | ||
| 807 | /* Save unparsed command line copy for /proc/cmdline */ | 774 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
| 808 | strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); | 775 | /* boot_command_line has been already set up in early.c */ |
| 809 | 776 | *cmdline_p = boot_command_line; | |
| 810 | *cmdline_p = COMMAND_LINE; | ||
| 811 | *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0'; | ||
| 812 | 777 | ||
| 813 | ROOT_DEV = Root_RAM0; | 778 | ROOT_DEV = Root_RAM0; |
| 814 | 779 | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 7aec676fefd5..7418bebb547f 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * Time of day based timer functions. | 3 | * Time of day based timer functions. |
| 4 | * | 4 | * |
| 5 | * S390 version | 5 | * S390 version |
| 6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 1999, 2008 |
| 7 | * Author(s): Hartmut Penner (hp@de.ibm.com), | 7 | * Author(s): Hartmut Penner (hp@de.ibm.com), |
| 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com), | 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com), |
| 9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | 9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) |
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/notifier.h> | 31 | #include <linux/notifier.h> |
| 32 | #include <linux/clocksource.h> | 32 | #include <linux/clocksource.h> |
| 33 | #include <linux/clockchips.h> | 33 | #include <linux/clockchips.h> |
| 34 | #include <linux/bootmem.h> | ||
| 34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
| 35 | #include <asm/delay.h> | 36 | #include <asm/delay.h> |
| 36 | #include <asm/s390_ext.h> | 37 | #include <asm/s390_ext.h> |
| @@ -162,7 +163,7 @@ void init_cpu_timer(void) | |||
| 162 | /* Enable clock comparator timer interrupt. */ | 163 | /* Enable clock comparator timer interrupt. */ |
| 163 | __ctl_set_bit(0,11); | 164 | __ctl_set_bit(0,11); |
| 164 | 165 | ||
| 165 | /* Always allow ETR external interrupts, even without an ETR. */ | 166 | /* Always allow the timing alert external interrupt. */ |
| 166 | __ctl_set_bit(0, 4); | 167 | __ctl_set_bit(0, 4); |
| 167 | } | 168 | } |
| 168 | 169 | ||
| @@ -170,8 +171,21 @@ static void clock_comparator_interrupt(__u16 code) | |||
| 170 | { | 171 | { |
| 171 | } | 172 | } |
| 172 | 173 | ||
| 174 | static void etr_timing_alert(struct etr_irq_parm *); | ||
| 175 | static void stp_timing_alert(struct stp_irq_parm *); | ||
| 176 | |||
| 177 | static void timing_alert_interrupt(__u16 code) | ||
| 178 | { | ||
| 179 | if (S390_lowcore.ext_params & 0x00c40000) | ||
| 180 | etr_timing_alert((struct etr_irq_parm *) | ||
| 181 | &S390_lowcore.ext_params); | ||
| 182 | if (S390_lowcore.ext_params & 0x00038000) | ||
| 183 | stp_timing_alert((struct stp_irq_parm *) | ||
| 184 | &S390_lowcore.ext_params); | ||
| 185 | } | ||
| 186 | |||
| 173 | static void etr_reset(void); | 187 | static void etr_reset(void); |
| 174 | static void etr_ext_handler(__u16); | 188 | static void stp_reset(void); |
| 175 | 189 | ||
| 176 | /* | 190 | /* |
| 177 | * Get the TOD clock running. | 191 | * Get the TOD clock running. |
| @@ -181,6 +195,7 @@ static u64 __init reset_tod_clock(void) | |||
| 181 | u64 time; | 195 | u64 time; |
| 182 | 196 | ||
| 183 | etr_reset(); | 197 | etr_reset(); |
| 198 | stp_reset(); | ||
| 184 | if (store_clock(&time) == 0) | 199 | if (store_clock(&time) == 0) |
| 185 | return time; | 200 | return time; |
| 186 | /* TOD clock not running. Set the clock to Unix Epoch. */ | 201 | /* TOD clock not running. Set the clock to Unix Epoch. */ |
| @@ -231,8 +246,9 @@ void __init time_init(void) | |||
| 231 | if (clocksource_register(&clocksource_tod) != 0) | 246 | if (clocksource_register(&clocksource_tod) != 0) |
| 232 | panic("Could not register TOD clock source"); | 247 | panic("Could not register TOD clock source"); |
| 233 | 248 | ||
| 234 | /* request the etr external interrupt */ | 249 | /* request the timing alert external interrupt */ |
| 235 | if (register_early_external_interrupt(0x1406, etr_ext_handler, | 250 | if (register_early_external_interrupt(0x1406, |
| 251 | timing_alert_interrupt, | ||
| 236 | &ext_int_etr_cc) != 0) | 252 | &ext_int_etr_cc) != 0) |
| 237 | panic("Couldn't request external interrupt 0x1406"); | 253 | panic("Couldn't request external interrupt 0x1406"); |
| 238 | 254 | ||
| @@ -245,10 +261,112 @@ void __init time_init(void) | |||
| 245 | } | 261 | } |
| 246 | 262 | ||
| 247 | /* | 263 | /* |
| 264 | * The time is "clock". old is what we think the time is. | ||
| 265 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | ||
| 266 | * "delay" is an approximation how long the synchronization took. If | ||
| 267 | * the time correction is positive, then "delay" is subtracted from | ||
| 268 | * the time difference and only the remaining part is passed to ntp. | ||
| 269 | */ | ||
| 270 | static unsigned long long adjust_time(unsigned long long old, | ||
| 271 | unsigned long long clock, | ||
| 272 | unsigned long long delay) | ||
| 273 | { | ||
| 274 | unsigned long long delta, ticks; | ||
| 275 | struct timex adjust; | ||
| 276 | |||
| 277 | if (clock > old) { | ||
| 278 | /* It is later than we thought. */ | ||
| 279 | delta = ticks = clock - old; | ||
| 280 | delta = ticks = (delta < delay) ? 0 : delta - delay; | ||
| 281 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
| 282 | adjust.offset = ticks * (1000000 / HZ); | ||
| 283 | } else { | ||
| 284 | /* It is earlier than we thought. */ | ||
| 285 | delta = ticks = old - clock; | ||
| 286 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
| 287 | delta = -delta; | ||
| 288 | adjust.offset = -ticks * (1000000 / HZ); | ||
| 289 | } | ||
| 290 | jiffies_timer_cc += delta; | ||
| 291 | if (adjust.offset != 0) { | ||
| 292 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | ||
| 293 | adjust.offset); | ||
| 294 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | ||
| 295 | do_adjtimex(&adjust); | ||
| 296 | } | ||
| 297 | return delta; | ||
| 298 | } | ||
| 299 | |||
| 300 | static DEFINE_PER_CPU(atomic_t, clock_sync_word); | ||
| 301 | static unsigned long clock_sync_flags; | ||
| 302 | |||
| 303 | #define CLOCK_SYNC_HAS_ETR 0 | ||
| 304 | #define CLOCK_SYNC_HAS_STP 1 | ||
| 305 | #define CLOCK_SYNC_ETR 2 | ||
| 306 | #define CLOCK_SYNC_STP 3 | ||
| 307 | |||
| 308 | /* | ||
| 309 | * The synchronous get_clock function. It will write the current clock | ||
| 310 | * value to the clock pointer and return 0 if the clock is in sync with | ||
| 311 | * the external time source. If the clock mode is local it will return | ||
| 312 | * -ENOSYS and -EAGAIN if the clock is not in sync with the external | ||
| 313 | * reference. | ||
| 314 | */ | ||
| 315 | int get_sync_clock(unsigned long long *clock) | ||
| 316 | { | ||
| 317 | atomic_t *sw_ptr; | ||
| 318 | unsigned int sw0, sw1; | ||
| 319 | |||
| 320 | sw_ptr = &get_cpu_var(clock_sync_word); | ||
| 321 | sw0 = atomic_read(sw_ptr); | ||
| 322 | *clock = get_clock(); | ||
| 323 | sw1 = atomic_read(sw_ptr); | ||
| 324 | put_cpu_var(clock_sync_sync); | ||
| 325 | if (sw0 == sw1 && (sw0 & 0x80000000U)) | ||
| 326 | /* Success: time is in sync. */ | ||
| 327 | return 0; | ||
| 328 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && | ||
| 329 | !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | ||
| 330 | return -ENOSYS; | ||
| 331 | if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && | ||
| 332 | !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | ||
| 333 | return -EACCES; | ||
| 334 | return -EAGAIN; | ||
| 335 | } | ||
| 336 | EXPORT_SYMBOL(get_sync_clock); | ||
| 337 | |||
| 338 | /* | ||
| 339 | * Make get_sync_clock return -EAGAIN. | ||
| 340 | */ | ||
| 341 | static void disable_sync_clock(void *dummy) | ||
| 342 | { | ||
| 343 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); | ||
| 344 | /* | ||
| 345 | * Clear the in-sync bit 2^31. All get_sync_clock calls will | ||
| 346 | * fail until the sync bit is turned back on. In addition | ||
| 347 | * increase the "sequence" counter to avoid the race of an | ||
| 348 | * etr event and the complete recovery against get_sync_clock. | ||
| 349 | */ | ||
| 350 | atomic_clear_mask(0x80000000, sw_ptr); | ||
| 351 | atomic_inc(sw_ptr); | ||
| 352 | } | ||
| 353 | |||
| 354 | /* | ||
| 355 | * Make get_sync_clock return 0 again. | ||
| 356 | * Needs to be called from a context disabled for preemption. | ||
| 357 | */ | ||
| 358 | static void enable_sync_clock(void) | ||
| 359 | { | ||
| 360 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); | ||
| 361 | atomic_set_mask(0x80000000, sw_ptr); | ||
| 362 | } | ||
| 363 | |||
| 364 | /* | ||
| 248 | * External Time Reference (ETR) code. | 365 | * External Time Reference (ETR) code. |
| 249 | */ | 366 | */ |
| 250 | static int etr_port0_online; | 367 | static int etr_port0_online; |
| 251 | static int etr_port1_online; | 368 | static int etr_port1_online; |
| 369 | static int etr_steai_available; | ||
| 252 | 370 | ||
| 253 | static int __init early_parse_etr(char *p) | 371 | static int __init early_parse_etr(char *p) |
| 254 | { | 372 | { |
| @@ -273,12 +391,6 @@ enum etr_event { | |||
| 273 | ETR_EVENT_UPDATE, | 391 | ETR_EVENT_UPDATE, |
| 274 | }; | 392 | }; |
| 275 | 393 | ||
| 276 | enum etr_flags { | ||
| 277 | ETR_FLAG_ENOSYS, | ||
| 278 | ETR_FLAG_EACCES, | ||
| 279 | ETR_FLAG_STEAI, | ||
| 280 | }; | ||
| 281 | |||
| 282 | /* | 394 | /* |
| 283 | * Valid bit combinations of the eacr register are (x = don't care): | 395 | * Valid bit combinations of the eacr register are (x = don't care): |
| 284 | * e0 e1 dp p0 p1 ea es sl | 396 | * e0 e1 dp p0 p1 ea es sl |
| @@ -305,74 +417,18 @@ enum etr_flags { | |||
| 305 | */ | 417 | */ |
| 306 | static struct etr_eacr etr_eacr; | 418 | static struct etr_eacr etr_eacr; |
| 307 | static u64 etr_tolec; /* time of last eacr update */ | 419 | static u64 etr_tolec; /* time of last eacr update */ |
| 308 | static unsigned long etr_flags; | ||
| 309 | static struct etr_aib etr_port0; | 420 | static struct etr_aib etr_port0; |
| 310 | static int etr_port0_uptodate; | 421 | static int etr_port0_uptodate; |
| 311 | static struct etr_aib etr_port1; | 422 | static struct etr_aib etr_port1; |
| 312 | static int etr_port1_uptodate; | 423 | static int etr_port1_uptodate; |
| 313 | static unsigned long etr_events; | 424 | static unsigned long etr_events; |
| 314 | static struct timer_list etr_timer; | 425 | static struct timer_list etr_timer; |
| 315 | static DEFINE_PER_CPU(atomic_t, etr_sync_word); | ||
| 316 | 426 | ||
| 317 | static void etr_timeout(unsigned long dummy); | 427 | static void etr_timeout(unsigned long dummy); |
| 318 | static void etr_work_fn(struct work_struct *work); | 428 | static void etr_work_fn(struct work_struct *work); |
| 319 | static DECLARE_WORK(etr_work, etr_work_fn); | 429 | static DECLARE_WORK(etr_work, etr_work_fn); |
| 320 | 430 | ||
| 321 | /* | 431 | /* |
| 322 | * The etr get_clock function. It will write the current clock value | ||
| 323 | * to the clock pointer and return 0 if the clock is in sync with the | ||
| 324 | * external time source. If the clock mode is local it will return | ||
| 325 | * -ENOSYS and -EAGAIN if the clock is not in sync with the external | ||
| 326 | * reference. This function is what ETR is all about.. | ||
| 327 | */ | ||
| 328 | int get_sync_clock(unsigned long long *clock) | ||
| 329 | { | ||
| 330 | atomic_t *sw_ptr; | ||
| 331 | unsigned int sw0, sw1; | ||
| 332 | |||
| 333 | sw_ptr = &get_cpu_var(etr_sync_word); | ||
| 334 | sw0 = atomic_read(sw_ptr); | ||
| 335 | *clock = get_clock(); | ||
| 336 | sw1 = atomic_read(sw_ptr); | ||
| 337 | put_cpu_var(etr_sync_sync); | ||
| 338 | if (sw0 == sw1 && (sw0 & 0x80000000U)) | ||
| 339 | /* Success: time is in sync. */ | ||
| 340 | return 0; | ||
| 341 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | ||
| 342 | return -ENOSYS; | ||
| 343 | if (test_bit(ETR_FLAG_EACCES, &etr_flags)) | ||
| 344 | return -EACCES; | ||
| 345 | return -EAGAIN; | ||
| 346 | } | ||
| 347 | EXPORT_SYMBOL(get_sync_clock); | ||
| 348 | |||
| 349 | /* | ||
| 350 | * Make get_sync_clock return -EAGAIN. | ||
| 351 | */ | ||
| 352 | static void etr_disable_sync_clock(void *dummy) | ||
| 353 | { | ||
| 354 | atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); | ||
| 355 | /* | ||
| 356 | * Clear the in-sync bit 2^31. All get_sync_clock calls will | ||
| 357 | * fail until the sync bit is turned back on. In addition | ||
| 358 | * increase the "sequence" counter to avoid the race of an | ||
| 359 | * etr event and the complete recovery against get_sync_clock. | ||
| 360 | */ | ||
| 361 | atomic_clear_mask(0x80000000, sw_ptr); | ||
| 362 | atomic_inc(sw_ptr); | ||
| 363 | } | ||
| 364 | |||
| 365 | /* | ||
| 366 | * Make get_sync_clock return 0 again. | ||
| 367 | * Needs to be called from a context disabled for preemption. | ||
| 368 | */ | ||
| 369 | static void etr_enable_sync_clock(void) | ||
| 370 | { | ||
| 371 | atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); | ||
| 372 | atomic_set_mask(0x80000000, sw_ptr); | ||
| 373 | } | ||
| 374 | |||
| 375 | /* | ||
| 376 | * Reset ETR attachment. | 432 | * Reset ETR attachment. |
| 377 | */ | 433 | */ |
| 378 | static void etr_reset(void) | 434 | static void etr_reset(void) |
| @@ -381,15 +437,13 @@ static void etr_reset(void) | |||
| 381 | .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, | 437 | .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, |
| 382 | .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, | 438 | .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, |
| 383 | .es = 0, .sl = 0 }; | 439 | .es = 0, .sl = 0 }; |
| 384 | if (etr_setr(&etr_eacr) == 0) | 440 | if (etr_setr(&etr_eacr) == 0) { |
| 385 | etr_tolec = get_clock(); | 441 | etr_tolec = get_clock(); |
| 386 | else { | 442 | set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); |
| 387 | set_bit(ETR_FLAG_ENOSYS, &etr_flags); | 443 | } else if (etr_port0_online || etr_port1_online) { |
| 388 | if (etr_port0_online || etr_port1_online) { | 444 | printk(KERN_WARNING "Running on non ETR capable " |
| 389 | printk(KERN_WARNING "Running on non ETR capable " | 445 | "machine, only local mode available.\n"); |
| 390 | "machine, only local mode available.\n"); | 446 | etr_port0_online = etr_port1_online = 0; |
| 391 | etr_port0_online = etr_port1_online = 0; | ||
| 392 | } | ||
| 393 | } | 447 | } |
| 394 | } | 448 | } |
| 395 | 449 | ||
| @@ -397,14 +451,12 @@ static int __init etr_init(void) | |||
| 397 | { | 451 | { |
| 398 | struct etr_aib aib; | 452 | struct etr_aib aib; |
| 399 | 453 | ||
| 400 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | 454 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) |
| 401 | return 0; | 455 | return 0; |
| 402 | /* Check if this machine has the steai instruction. */ | 456 | /* Check if this machine has the steai instruction. */ |
| 403 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) | 457 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) |
| 404 | set_bit(ETR_FLAG_STEAI, &etr_flags); | 458 | etr_steai_available = 1; |
| 405 | setup_timer(&etr_timer, etr_timeout, 0UL); | 459 | setup_timer(&etr_timer, etr_timeout, 0UL); |
| 406 | if (!etr_port0_online && !etr_port1_online) | ||
| 407 | set_bit(ETR_FLAG_EACCES, &etr_flags); | ||
| 408 | if (etr_port0_online) { | 460 | if (etr_port0_online) { |
| 409 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 461 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
| 410 | schedule_work(&etr_work); | 462 | schedule_work(&etr_work); |
| @@ -435,7 +487,8 @@ void etr_switch_to_local(void) | |||
| 435 | { | 487 | { |
| 436 | if (!etr_eacr.sl) | 488 | if (!etr_eacr.sl) |
| 437 | return; | 489 | return; |
| 438 | etr_disable_sync_clock(NULL); | 490 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
| 491 | disable_sync_clock(NULL); | ||
| 439 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); | 492 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); |
| 440 | schedule_work(&etr_work); | 493 | schedule_work(&etr_work); |
| 441 | } | 494 | } |
| @@ -450,23 +503,21 @@ void etr_sync_check(void) | |||
| 450 | { | 503 | { |
| 451 | if (!etr_eacr.es) | 504 | if (!etr_eacr.es) |
| 452 | return; | 505 | return; |
| 453 | etr_disable_sync_clock(NULL); | 506 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
| 507 | disable_sync_clock(NULL); | ||
| 454 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); | 508 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); |
| 455 | schedule_work(&etr_work); | 509 | schedule_work(&etr_work); |
| 456 | } | 510 | } |
| 457 | 511 | ||
| 458 | /* | 512 | /* |
| 459 | * ETR external interrupt. There are two causes: | 513 | * ETR timing alert. There are two causes: |
| 460 | * 1) port state change, check the usability of the port | 514 | * 1) port state change, check the usability of the port |
| 461 | * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the | 515 | * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the |
| 462 | * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) | 516 | * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) |
| 463 | * or ETR-data word 4 (edf4) has changed. | 517 | * or ETR-data word 4 (edf4) has changed. |
| 464 | */ | 518 | */ |
| 465 | static void etr_ext_handler(__u16 code) | 519 | static void etr_timing_alert(struct etr_irq_parm *intparm) |
| 466 | { | 520 | { |
| 467 | struct etr_interruption_parameter *intparm = | ||
| 468 | (struct etr_interruption_parameter *) &S390_lowcore.ext_params; | ||
| 469 | |||
| 470 | if (intparm->pc0) | 521 | if (intparm->pc0) |
| 471 | /* ETR port 0 state change. */ | 522 | /* ETR port 0 state change. */ |
| 472 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 523 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
| @@ -591,58 +642,23 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | |||
| 591 | return 1; | 642 | return 1; |
| 592 | } | 643 | } |
| 593 | 644 | ||
| 594 | /* | 645 | struct clock_sync_data { |
| 595 | * The time is "clock". old is what we think the time is. | ||
| 596 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | ||
| 597 | * "delay" is an approximation how long the synchronization took. If | ||
| 598 | * the time correction is positive, then "delay" is subtracted from | ||
| 599 | * the time difference and only the remaining part is passed to ntp. | ||
| 600 | */ | ||
| 601 | static unsigned long long etr_adjust_time(unsigned long long old, | ||
| 602 | unsigned long long clock, | ||
| 603 | unsigned long long delay) | ||
| 604 | { | ||
| 605 | unsigned long long delta, ticks; | ||
| 606 | struct timex adjust; | ||
| 607 | |||
| 608 | if (clock > old) { | ||
| 609 | /* It is later than we thought. */ | ||
| 610 | delta = ticks = clock - old; | ||
| 611 | delta = ticks = (delta < delay) ? 0 : delta - delay; | ||
| 612 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
| 613 | adjust.offset = ticks * (1000000 / HZ); | ||
| 614 | } else { | ||
| 615 | /* It is earlier than we thought. */ | ||
| 616 | delta = ticks = old - clock; | ||
| 617 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
| 618 | delta = -delta; | ||
| 619 | adjust.offset = -ticks * (1000000 / HZ); | ||
| 620 | } | ||
| 621 | jiffies_timer_cc += delta; | ||
| 622 | if (adjust.offset != 0) { | ||
| 623 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | ||
| 624 | adjust.offset); | ||
| 625 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | ||
| 626 | do_adjtimex(&adjust); | ||
| 627 | } | ||
| 628 | return delta; | ||
| 629 | } | ||
| 630 | |||
| 631 | static struct { | ||
| 632 | int in_sync; | 646 | int in_sync; |
| 633 | unsigned long long fixup_cc; | 647 | unsigned long long fixup_cc; |
| 634 | } etr_sync; | 648 | }; |
| 635 | 649 | ||
| 636 | static void etr_sync_cpu_start(void *dummy) | 650 | static void clock_sync_cpu_start(void *dummy) |
| 637 | { | 651 | { |
| 638 | etr_enable_sync_clock(); | 652 | struct clock_sync_data *sync = dummy; |
| 653 | |||
| 654 | enable_sync_clock(); | ||
| 639 | /* | 655 | /* |
| 640 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | 656 | * This looks like a busy wait loop but it isn't. etr_sync_cpus |
| 641 | * is called on all other cpus while the TOD clocks is stopped. | 657 | * is called on all other cpus while the TOD clocks is stopped. |
| 642 | * __udelay will stop the cpu on an enabled wait psw until the | 658 | * __udelay will stop the cpu on an enabled wait psw until the |
| 643 | * TOD is running again. | 659 | * TOD is running again. |
| 644 | */ | 660 | */ |
| 645 | while (etr_sync.in_sync == 0) { | 661 | while (sync->in_sync == 0) { |
| 646 | __udelay(1); | 662 | __udelay(1); |
| 647 | /* | 663 | /* |
| 648 | * A different cpu changes *in_sync. Therefore use | 664 | * A different cpu changes *in_sync. Therefore use |
| @@ -650,17 +666,17 @@ static void etr_sync_cpu_start(void *dummy) | |||
| 650 | */ | 666 | */ |
| 651 | barrier(); | 667 | barrier(); |
| 652 | } | 668 | } |
| 653 | if (etr_sync.in_sync != 1) | 669 | if (sync->in_sync != 1) |
| 654 | /* Didn't work. Clear per-cpu in sync bit again. */ | 670 | /* Didn't work. Clear per-cpu in sync bit again. */ |
| 655 | etr_disable_sync_clock(NULL); | 671 | disable_sync_clock(NULL); |
| 656 | /* | 672 | /* |
| 657 | * This round of TOD syncing is done. Set the clock comparator | 673 | * This round of TOD syncing is done. Set the clock comparator |
| 658 | * to the next tick and let the processor continue. | 674 | * to the next tick and let the processor continue. |
| 659 | */ | 675 | */ |
| 660 | fixup_clock_comparator(etr_sync.fixup_cc); | 676 | fixup_clock_comparator(sync->fixup_cc); |
| 661 | } | 677 | } |
| 662 | 678 | ||
| 663 | static void etr_sync_cpu_end(void *dummy) | 679 | static void clock_sync_cpu_end(void *dummy) |
| 664 | { | 680 | { |
| 665 | } | 681 | } |
| 666 | 682 | ||
| @@ -672,6 +688,7 @@ static void etr_sync_cpu_end(void *dummy) | |||
| 672 | static int etr_sync_clock(struct etr_aib *aib, int port) | 688 | static int etr_sync_clock(struct etr_aib *aib, int port) |
| 673 | { | 689 | { |
| 674 | struct etr_aib *sync_port; | 690 | struct etr_aib *sync_port; |
| 691 | struct clock_sync_data etr_sync; | ||
| 675 | unsigned long long clock, old_clock, delay, delta; | 692 | unsigned long long clock, old_clock, delay, delta; |
| 676 | int follows; | 693 | int follows; |
| 677 | int rc; | 694 | int rc; |
| @@ -690,9 +707,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 690 | */ | 707 | */ |
| 691 | memset(&etr_sync, 0, sizeof(etr_sync)); | 708 | memset(&etr_sync, 0, sizeof(etr_sync)); |
| 692 | preempt_disable(); | 709 | preempt_disable(); |
| 693 | smp_call_function(etr_sync_cpu_start, NULL, 0, 0); | 710 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0); |
| 694 | local_irq_disable(); | 711 | local_irq_disable(); |
| 695 | etr_enable_sync_clock(); | 712 | enable_sync_clock(); |
| 696 | 713 | ||
| 697 | /* Set clock to next OTE. */ | 714 | /* Set clock to next OTE. */ |
| 698 | __ctl_set_bit(14, 21); | 715 | __ctl_set_bit(14, 21); |
| @@ -707,13 +724,13 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 707 | /* Adjust Linux timing variables. */ | 724 | /* Adjust Linux timing variables. */ |
| 708 | delay = (unsigned long long) | 725 | delay = (unsigned long long) |
| 709 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | 726 | (aib->edf2.etv - sync_port->edf2.etv) << 32; |
| 710 | delta = etr_adjust_time(old_clock, clock, delay); | 727 | delta = adjust_time(old_clock, clock, delay); |
| 711 | etr_sync.fixup_cc = delta; | 728 | etr_sync.fixup_cc = delta; |
| 712 | fixup_clock_comparator(delta); | 729 | fixup_clock_comparator(delta); |
| 713 | /* Verify that the clock is properly set. */ | 730 | /* Verify that the clock is properly set. */ |
| 714 | if (!etr_aib_follows(sync_port, aib, port)) { | 731 | if (!etr_aib_follows(sync_port, aib, port)) { |
| 715 | /* Didn't work. */ | 732 | /* Didn't work. */ |
| 716 | etr_disable_sync_clock(NULL); | 733 | disable_sync_clock(NULL); |
| 717 | etr_sync.in_sync = -EAGAIN; | 734 | etr_sync.in_sync = -EAGAIN; |
| 718 | rc = -EAGAIN; | 735 | rc = -EAGAIN; |
| 719 | } else { | 736 | } else { |
| @@ -724,12 +741,12 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 724 | /* Could not set the clock ?!? */ | 741 | /* Could not set the clock ?!? */ |
| 725 | __ctl_clear_bit(0, 29); | 742 | __ctl_clear_bit(0, 29); |
| 726 | __ctl_clear_bit(14, 21); | 743 | __ctl_clear_bit(14, 21); |
| 727 | etr_disable_sync_clock(NULL); | 744 | disable_sync_clock(NULL); |
| 728 | etr_sync.in_sync = -EAGAIN; | 745 | etr_sync.in_sync = -EAGAIN; |
| 729 | rc = -EAGAIN; | 746 | rc = -EAGAIN; |
| 730 | } | 747 | } |
| 731 | local_irq_enable(); | 748 | local_irq_enable(); |
| 732 | smp_call_function(etr_sync_cpu_end,NULL,0,0); | 749 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); |
| 733 | preempt_enable(); | 750 | preempt_enable(); |
| 734 | return rc; | 751 | return rc; |
| 735 | } | 752 | } |
| @@ -832,7 +849,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, | |||
| 832 | * Do not try to get the alternate port aib if the clock | 849 | * Do not try to get the alternate port aib if the clock |
| 833 | * is not in sync yet. | 850 | * is not in sync yet. |
| 834 | */ | 851 | */ |
| 835 | if (!eacr.es) | 852 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es) |
| 836 | return eacr; | 853 | return eacr; |
| 837 | 854 | ||
| 838 | /* | 855 | /* |
| @@ -840,7 +857,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, | |||
| 840 | * the other port immediately. If only stetr is available the | 857 | * the other port immediately. If only stetr is available the |
| 841 | * data-port bit toggle has to be used. | 858 | * data-port bit toggle has to be used. |
| 842 | */ | 859 | */ |
| 843 | if (test_bit(ETR_FLAG_STEAI, &etr_flags)) { | 860 | if (etr_steai_available) { |
| 844 | if (eacr.p0 && !etr_port0_uptodate) { | 861 | if (eacr.p0 && !etr_port0_uptodate) { |
| 845 | etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); | 862 | etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); |
| 846 | etr_port0_uptodate = 1; | 863 | etr_port0_uptodate = 1; |
| @@ -909,10 +926,10 @@ static void etr_work_fn(struct work_struct *work) | |||
| 909 | if (!eacr.ea) { | 926 | if (!eacr.ea) { |
| 910 | /* Both ports offline. Reset everything. */ | 927 | /* Both ports offline. Reset everything. */ |
| 911 | eacr.dp = eacr.es = eacr.sl = 0; | 928 | eacr.dp = eacr.es = eacr.sl = 0; |
| 912 | on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); | 929 | on_each_cpu(disable_sync_clock, NULL, 0, 1); |
| 913 | del_timer_sync(&etr_timer); | 930 | del_timer_sync(&etr_timer); |
| 914 | etr_update_eacr(eacr); | 931 | etr_update_eacr(eacr); |
| 915 | set_bit(ETR_FLAG_EACCES, &etr_flags); | 932 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
| 916 | return; | 933 | return; |
| 917 | } | 934 | } |
| 918 | 935 | ||
| @@ -953,7 +970,6 @@ static void etr_work_fn(struct work_struct *work) | |||
| 953 | eacr.e1 = 1; | 970 | eacr.e1 = 1; |
| 954 | sync_port = (etr_port0_uptodate && | 971 | sync_port = (etr_port0_uptodate && |
| 955 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | 972 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; |
| 956 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
| 957 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { | 973 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { |
| 958 | eacr.sl = 0; | 974 | eacr.sl = 0; |
| 959 | eacr.e0 = 0; | 975 | eacr.e0 = 0; |
| @@ -962,7 +978,6 @@ static void etr_work_fn(struct work_struct *work) | |||
| 962 | eacr.es = 0; | 978 | eacr.es = 0; |
| 963 | sync_port = (etr_port1_uptodate && | 979 | sync_port = (etr_port1_uptodate && |
| 964 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | 980 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; |
| 965 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
| 966 | } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { | 981 | } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { |
| 967 | eacr.sl = 1; | 982 | eacr.sl = 1; |
| 968 | eacr.e0 = 1; | 983 | eacr.e0 = 1; |
| @@ -976,7 +991,6 @@ static void etr_work_fn(struct work_struct *work) | |||
| 976 | eacr.e1 = 1; | 991 | eacr.e1 = 1; |
| 977 | sync_port = (etr_port0_uptodate && | 992 | sync_port = (etr_port0_uptodate && |
| 978 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | 993 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; |
| 979 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
| 980 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { | 994 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { |
| 981 | eacr.sl = 1; | 995 | eacr.sl = 1; |
| 982 | eacr.e0 = 0; | 996 | eacr.e0 = 0; |
| @@ -985,19 +999,22 @@ static void etr_work_fn(struct work_struct *work) | |||
| 985 | eacr.es = 0; | 999 | eacr.es = 0; |
| 986 | sync_port = (etr_port1_uptodate && | 1000 | sync_port = (etr_port1_uptodate && |
| 987 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | 1001 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; |
| 988 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
| 989 | } else { | 1002 | } else { |
| 990 | /* Both ports not usable. */ | 1003 | /* Both ports not usable. */ |
| 991 | eacr.es = eacr.sl = 0; | 1004 | eacr.es = eacr.sl = 0; |
| 992 | sync_port = -1; | 1005 | sync_port = -1; |
| 993 | set_bit(ETR_FLAG_EACCES, &etr_flags); | 1006 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
| 994 | } | 1007 | } |
| 995 | 1008 | ||
| 1009 | if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | ||
| 1010 | eacr.es = 0; | ||
| 1011 | |||
| 996 | /* | 1012 | /* |
| 997 | * If the clock is in sync just update the eacr and return. | 1013 | * If the clock is in sync just update the eacr and return. |
| 998 | * If there is no valid sync port wait for a port update. | 1014 | * If there is no valid sync port wait for a port update. |
| 999 | */ | 1015 | */ |
| 1000 | if (eacr.es || sync_port < 0) { | 1016 | if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) || |
| 1017 | eacr.es || sync_port < 0) { | ||
| 1001 | etr_update_eacr(eacr); | 1018 | etr_update_eacr(eacr); |
| 1002 | etr_set_tolec_timeout(now); | 1019 | etr_set_tolec_timeout(now); |
| 1003 | return; | 1020 | return; |
| @@ -1018,11 +1035,13 @@ static void etr_work_fn(struct work_struct *work) | |||
| 1018 | * and set up a timer to try again after 0.5 seconds | 1035 | * and set up a timer to try again after 0.5 seconds |
| 1019 | */ | 1036 | */ |
| 1020 | etr_update_eacr(eacr); | 1037 | etr_update_eacr(eacr); |
| 1038 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | ||
| 1021 | if (now < etr_tolec + (1600000 << 12) || | 1039 | if (now < etr_tolec + (1600000 << 12) || |
| 1022 | etr_sync_clock(&aib, sync_port) != 0) { | 1040 | etr_sync_clock(&aib, sync_port) != 0) { |
| 1023 | /* Sync failed. Try again in 1/2 second. */ | 1041 | /* Sync failed. Try again in 1/2 second. */ |
| 1024 | eacr.es = 0; | 1042 | eacr.es = 0; |
| 1025 | etr_update_eacr(eacr); | 1043 | etr_update_eacr(eacr); |
| 1044 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | ||
| 1026 | etr_set_sync_timeout(); | 1045 | etr_set_sync_timeout(); |
| 1027 | } else | 1046 | } else |
| 1028 | etr_set_tolec_timeout(now); | 1047 | etr_set_tolec_timeout(now); |
| @@ -1097,8 +1116,8 @@ static ssize_t etr_online_store(struct sys_device *dev, | |||
| 1097 | value = simple_strtoul(buf, NULL, 0); | 1116 | value = simple_strtoul(buf, NULL, 0); |
| 1098 | if (value != 0 && value != 1) | 1117 | if (value != 0 && value != 1) |
| 1099 | return -EINVAL; | 1118 | return -EINVAL; |
| 1100 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | 1119 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) |
| 1101 | return -ENOSYS; | 1120 | return -EOPNOTSUPP; |
| 1102 | if (dev == &etr_port0_dev) { | 1121 | if (dev == &etr_port0_dev) { |
| 1103 | if (etr_port0_online == value) | 1122 | if (etr_port0_online == value) |
| 1104 | return count; /* Nothing to do. */ | 1123 | return count; /* Nothing to do. */ |
| @@ -1292,3 +1311,318 @@ out: | |||
| 1292 | } | 1311 | } |
| 1293 | 1312 | ||
| 1294 | device_initcall(etr_init_sysfs); | 1313 | device_initcall(etr_init_sysfs); |
| 1314 | |||
| 1315 | /* | ||
| 1316 | * Server Time Protocol (STP) code. | ||
| 1317 | */ | ||
| 1318 | static int stp_online; | ||
| 1319 | static struct stp_sstpi stp_info; | ||
| 1320 | static void *stp_page; | ||
| 1321 | |||
| 1322 | static void stp_work_fn(struct work_struct *work); | ||
| 1323 | static DECLARE_WORK(stp_work, stp_work_fn); | ||
| 1324 | |||
| 1325 | static int __init early_parse_stp(char *p) | ||
| 1326 | { | ||
| 1327 | if (strncmp(p, "off", 3) == 0) | ||
| 1328 | stp_online = 0; | ||
| 1329 | else if (strncmp(p, "on", 2) == 0) | ||
| 1330 | stp_online = 1; | ||
| 1331 | return 0; | ||
| 1332 | } | ||
| 1333 | early_param("stp", early_parse_stp); | ||
| 1334 | |||
| 1335 | /* | ||
| 1336 | * Reset STP attachment. | ||
| 1337 | */ | ||
| 1338 | static void stp_reset(void) | ||
| 1339 | { | ||
| 1340 | int rc; | ||
| 1341 | |||
| 1342 | stp_page = alloc_bootmem_pages(PAGE_SIZE); | ||
| 1343 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | ||
| 1344 | if (rc == 1) | ||
| 1345 | set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); | ||
| 1346 | else if (stp_online) { | ||
| 1347 | printk(KERN_WARNING "Running on non STP capable machine.\n"); | ||
| 1348 | free_bootmem((unsigned long) stp_page, PAGE_SIZE); | ||
| 1349 | stp_page = NULL; | ||
| 1350 | stp_online = 0; | ||
| 1351 | } | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | static int __init stp_init(void) | ||
| 1355 | { | ||
| 1356 | if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online) | ||
| 1357 | schedule_work(&stp_work); | ||
| 1358 | return 0; | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | arch_initcall(stp_init); | ||
| 1362 | |||
| 1363 | /* | ||
| 1364 | * STP timing alert. There are three causes: | ||
| 1365 | * 1) timing status change | ||
| 1366 | * 2) link availability change | ||
| 1367 | * 3) time control parameter change | ||
| 1368 | * In all three cases we are only interested in the clock source state. | ||
| 1369 | * If a STP clock source is now available use it. | ||
| 1370 | */ | ||
| 1371 | static void stp_timing_alert(struct stp_irq_parm *intparm) | ||
| 1372 | { | ||
| 1373 | if (intparm->tsc || intparm->lac || intparm->tcpc) | ||
| 1374 | schedule_work(&stp_work); | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | /* | ||
| 1378 | * STP sync check machine check. This is called when the timing state | ||
| 1379 | * changes from the synchronized state to the unsynchronized state. | ||
| 1380 | * After a STP sync check the clock is not in sync. The machine check | ||
| 1381 | * is broadcasted to all cpus at the same time. | ||
| 1382 | */ | ||
| 1383 | void stp_sync_check(void) | ||
| 1384 | { | ||
| 1385 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | ||
| 1386 | return; | ||
| 1387 | disable_sync_clock(NULL); | ||
| 1388 | schedule_work(&stp_work); | ||
| 1389 | } | ||
| 1390 | |||
| 1391 | /* | ||
| 1392 | * STP island condition machine check. This is called when an attached | ||
| 1393 | * server attempts to communicate over an STP link and the servers | ||
| 1394 | * have matching CTN ids and have a valid stratum-1 configuration | ||
| 1395 | * but the configurations do not match. | ||
| 1396 | */ | ||
| 1397 | void stp_island_check(void) | ||
| 1398 | { | ||
| 1399 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | ||
| 1400 | return; | ||
| 1401 | disable_sync_clock(NULL); | ||
| 1402 | schedule_work(&stp_work); | ||
| 1403 | } | ||
| 1404 | |||
| 1405 | /* | ||
| 1406 | * STP tasklet. Check for the STP state and take over the clock | ||
| 1407 | * synchronization if the STP clock source is usable. | ||
| 1408 | */ | ||
| 1409 | static void stp_work_fn(struct work_struct *work) | ||
| 1410 | { | ||
| 1411 | struct clock_sync_data stp_sync; | ||
| 1412 | unsigned long long old_clock, delta; | ||
| 1413 | int rc; | ||
| 1414 | |||
| 1415 | if (!stp_online) { | ||
| 1416 | chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | ||
| 1417 | return; | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); | ||
| 1421 | if (rc) | ||
| 1422 | return; | ||
| 1423 | |||
| 1424 | rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); | ||
| 1425 | if (rc || stp_info.c == 0) | ||
| 1426 | return; | ||
| 1427 | |||
| 1428 | /* | ||
| 1429 | * Catch all other cpus and make them wait until we have | ||
| 1430 | * successfully synced the clock. smp_call_function will | ||
| 1431 | * return after all other cpus are in clock_sync_cpu_start. | ||
| 1432 | */ | ||
| 1433 | memset(&stp_sync, 0, sizeof(stp_sync)); | ||
| 1434 | preempt_disable(); | ||
| 1435 | smp_call_function(clock_sync_cpu_start, &stp_sync, 0, 0); | ||
| 1436 | local_irq_disable(); | ||
| 1437 | enable_sync_clock(); | ||
| 1438 | |||
| 1439 | set_bit(CLOCK_SYNC_STP, &clock_sync_flags); | ||
| 1440 | if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | ||
| 1441 | schedule_work(&etr_work); | ||
| 1442 | |||
| 1443 | rc = 0; | ||
| 1444 | if (stp_info.todoff[0] || stp_info.todoff[1] || | ||
| 1445 | stp_info.todoff[2] || stp_info.todoff[3] || | ||
| 1446 | stp_info.tmd != 2) { | ||
| 1447 | old_clock = get_clock(); | ||
| 1448 | rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); | ||
| 1449 | if (rc == 0) { | ||
| 1450 | delta = adjust_time(old_clock, get_clock(), 0); | ||
| 1451 | fixup_clock_comparator(delta); | ||
| 1452 | rc = chsc_sstpi(stp_page, &stp_info, | ||
| 1453 | sizeof(struct stp_sstpi)); | ||
| 1454 | if (rc == 0 && stp_info.tmd != 2) | ||
| 1455 | rc = -EAGAIN; | ||
| 1456 | } | ||
| 1457 | } | ||
| 1458 | if (rc) { | ||
| 1459 | disable_sync_clock(NULL); | ||
| 1460 | stp_sync.in_sync = -EAGAIN; | ||
| 1461 | clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); | ||
| 1462 | if (etr_port0_online || etr_port1_online) | ||
| 1463 | schedule_work(&etr_work); | ||
| 1464 | } else | ||
| 1465 | stp_sync.in_sync = 1; | ||
| 1466 | |||
| 1467 | local_irq_enable(); | ||
| 1468 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); | ||
| 1469 | preempt_enable(); | ||
| 1470 | } | ||
| 1471 | |||
| 1472 | /* | ||
| 1473 | * STP class sysfs interface functions | ||
| 1474 | */ | ||
| 1475 | static struct sysdev_class stp_sysclass = { | ||
| 1476 | .name = "stp", | ||
| 1477 | }; | ||
| 1478 | |||
| 1479 | static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf) | ||
| 1480 | { | ||
| 1481 | if (!stp_online) | ||
| 1482 | return -ENODATA; | ||
| 1483 | return sprintf(buf, "%016llx\n", | ||
| 1484 | *(unsigned long long *) stp_info.ctnid); | ||
| 1485 | } | ||
| 1486 | |||
| 1487 | static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); | ||
| 1488 | |||
| 1489 | static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf) | ||
| 1490 | { | ||
| 1491 | if (!stp_online) | ||
| 1492 | return -ENODATA; | ||
| 1493 | return sprintf(buf, "%i\n", stp_info.ctn); | ||
| 1494 | } | ||
| 1495 | |||
| 1496 | static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); | ||
| 1497 | |||
| 1498 | static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf) | ||
| 1499 | { | ||
| 1500 | if (!stp_online || !(stp_info.vbits & 0x2000)) | ||
| 1501 | return -ENODATA; | ||
| 1502 | return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); | ||
| 1503 | } | ||
| 1504 | |||
| 1505 | static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); | ||
| 1506 | |||
| 1507 | static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf) | ||
| 1508 | { | ||
| 1509 | if (!stp_online || !(stp_info.vbits & 0x8000)) | ||
| 1510 | return -ENODATA; | ||
| 1511 | return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); | ||
| 1512 | } | ||
| 1513 | |||
| 1514 | static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); | ||
| 1515 | |||
| 1516 | static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf) | ||
| 1517 | { | ||
| 1518 | if (!stp_online) | ||
| 1519 | return -ENODATA; | ||
| 1520 | return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); | ||
| 1521 | } | ||
| 1522 | |||
| 1523 | static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL); | ||
| 1524 | |||
| 1525 | static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf) | ||
| 1526 | { | ||
| 1527 | if (!stp_online || !(stp_info.vbits & 0x0800)) | ||
| 1528 | return -ENODATA; | ||
| 1529 | return sprintf(buf, "%i\n", (int) stp_info.tto); | ||
| 1530 | } | ||
| 1531 | |||
| 1532 | static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL); | ||
| 1533 | |||
| 1534 | static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf) | ||
| 1535 | { | ||
| 1536 | if (!stp_online || !(stp_info.vbits & 0x4000)) | ||
| 1537 | return -ENODATA; | ||
| 1538 | return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); | ||
| 1539 | } | ||
| 1540 | |||
| 1541 | static SYSDEV_CLASS_ATTR(time_zone_offset, 0400, | ||
| 1542 | stp_time_zone_offset_show, NULL); | ||
| 1543 | |||
| 1544 | static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf) | ||
| 1545 | { | ||
| 1546 | if (!stp_online) | ||
| 1547 | return -ENODATA; | ||
| 1548 | return sprintf(buf, "%i\n", stp_info.tmd); | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); | ||
| 1552 | |||
| 1553 | static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf) | ||
| 1554 | { | ||
| 1555 | if (!stp_online) | ||
| 1556 | return -ENODATA; | ||
| 1557 | return sprintf(buf, "%i\n", stp_info.tst); | ||
| 1558 | } | ||
| 1559 | |||
| 1560 | static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL); | ||
| 1561 | |||
| 1562 | static ssize_t stp_online_show(struct sysdev_class *class, char *buf) | ||
| 1563 | { | ||
| 1564 | return sprintf(buf, "%i\n", stp_online); | ||
| 1565 | } | ||
| 1566 | |||
| 1567 | static ssize_t stp_online_store(struct sysdev_class *class, | ||
| 1568 | const char *buf, size_t count) | ||
| 1569 | { | ||
| 1570 | unsigned int value; | ||
| 1571 | |||
| 1572 | value = simple_strtoul(buf, NULL, 0); | ||
| 1573 | if (value != 0 && value != 1) | ||
| 1574 | return -EINVAL; | ||
| 1575 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | ||
| 1576 | return -EOPNOTSUPP; | ||
| 1577 | stp_online = value; | ||
| 1578 | schedule_work(&stp_work); | ||
| 1579 | return count; | ||
| 1580 | } | ||
| 1581 | |||
| 1582 | /* | ||
| 1583 | * Can't use SYSDEV_CLASS_ATTR because the attribute should be named | ||
| 1584 | * stp/online but attr_online already exists in this file .. | ||
| 1585 | */ | ||
| 1586 | static struct sysdev_class_attribute attr_stp_online = { | ||
| 1587 | .attr = { .name = "online", .mode = 0600 }, | ||
| 1588 | .show = stp_online_show, | ||
| 1589 | .store = stp_online_store, | ||
| 1590 | }; | ||
| 1591 | |||
| 1592 | static struct sysdev_class_attribute *stp_attributes[] = { | ||
| 1593 | &attr_ctn_id, | ||
| 1594 | &attr_ctn_type, | ||
| 1595 | &attr_dst_offset, | ||
| 1596 | &attr_leap_seconds, | ||
| 1597 | &attr_stp_online, | ||
| 1598 | &attr_stratum, | ||
| 1599 | &attr_time_offset, | ||
| 1600 | &attr_time_zone_offset, | ||
| 1601 | &attr_timing_mode, | ||
| 1602 | &attr_timing_state, | ||
| 1603 | NULL | ||
| 1604 | }; | ||
| 1605 | |||
| 1606 | static int __init stp_init_sysfs(void) | ||
| 1607 | { | ||
| 1608 | struct sysdev_class_attribute **attr; | ||
| 1609 | int rc; | ||
| 1610 | |||
| 1611 | rc = sysdev_class_register(&stp_sysclass); | ||
| 1612 | if (rc) | ||
| 1613 | goto out; | ||
| 1614 | for (attr = stp_attributes; *attr; attr++) { | ||
| 1615 | rc = sysdev_class_create_file(&stp_sysclass, *attr); | ||
| 1616 | if (rc) | ||
| 1617 | goto out_unreg; | ||
| 1618 | } | ||
| 1619 | return 0; | ||
| 1620 | out_unreg: | ||
| 1621 | for (; attr >= stp_attributes; attr--) | ||
| 1622 | sysdev_class_remove_file(&stp_sysclass, *attr); | ||
| 1623 | sysdev_class_unregister(&stp_sysclass); | ||
| 1624 | out: | ||
| 1625 | return rc; | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | device_initcall(stp_init_sysfs); | ||
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 661a07217057..212d618b0095 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
| @@ -313,8 +313,6 @@ void __init s390_init_cpu_topology(void) | |||
| 313 | machine_has_topology_irq = 1; | 313 | machine_has_topology_irq = 1; |
| 314 | 314 | ||
| 315 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 315 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
| 316 | if (!tl_info) | ||
| 317 | goto error; | ||
| 318 | info = tl_info; | 316 | info = tl_info; |
| 319 | stsi(info, 15, 1, 2); | 317 | stsi(info, 15, 1, 2); |
| 320 | 318 | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index ca90ee3f930e..0fa5dc5d68e1 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
| @@ -136,7 +136,7 @@ static inline void set_vtimer(__u64 expires) | |||
| 136 | } | 136 | } |
| 137 | #endif | 137 | #endif |
| 138 | 138 | ||
| 139 | static void start_cpu_timer(void) | 139 | void vtime_start_cpu_timer(void) |
| 140 | { | 140 | { |
| 141 | struct vtimer_queue *vt_list; | 141 | struct vtimer_queue *vt_list; |
| 142 | 142 | ||
| @@ -150,7 +150,7 @@ static void start_cpu_timer(void) | |||
| 150 | set_vtimer(vt_list->idle); | 150 | set_vtimer(vt_list->idle); |
| 151 | } | 151 | } |
| 152 | 152 | ||
| 153 | static void stop_cpu_timer(void) | 153 | void vtime_stop_cpu_timer(void) |
| 154 | { | 154 | { |
| 155 | struct vtimer_queue *vt_list; | 155 | struct vtimer_queue *vt_list; |
| 156 | 156 | ||
| @@ -318,8 +318,7 @@ static void internal_add_vtimer(struct vtimer_list *timer) | |||
| 318 | vt_list = &per_cpu(virt_cpu_timer, timer->cpu); | 318 | vt_list = &per_cpu(virt_cpu_timer, timer->cpu); |
| 319 | spin_lock_irqsave(&vt_list->lock, flags); | 319 | spin_lock_irqsave(&vt_list->lock, flags); |
| 320 | 320 | ||
| 321 | if (timer->cpu != smp_processor_id()) | 321 | BUG_ON(timer->cpu != smp_processor_id()); |
| 322 | printk("internal_add_vtimer: BUG, running on wrong CPU"); | ||
| 323 | 322 | ||
| 324 | /* if list is empty we only have to set the timer */ | 323 | /* if list is empty we only have to set the timer */ |
| 325 | if (list_empty(&vt_list->list)) { | 324 | if (list_empty(&vt_list->list)) { |
| @@ -353,25 +352,12 @@ static void internal_add_vtimer(struct vtimer_list *timer) | |||
| 353 | put_cpu(); | 352 | put_cpu(); |
| 354 | } | 353 | } |
| 355 | 354 | ||
| 356 | static inline int prepare_vtimer(struct vtimer_list *timer) | 355 | static inline void prepare_vtimer(struct vtimer_list *timer) |
| 357 | { | 356 | { |
| 358 | if (!timer->function) { | 357 | BUG_ON(!timer->function); |
| 359 | printk("add_virt_timer: uninitialized timer\n"); | 358 | BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); |
| 360 | return -EINVAL; | 359 | BUG_ON(vtimer_pending(timer)); |
| 361 | } | ||
| 362 | |||
| 363 | if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) { | ||
| 364 | printk("add_virt_timer: invalid timer expire value!\n"); | ||
| 365 | return -EINVAL; | ||
| 366 | } | ||
| 367 | |||
| 368 | if (vtimer_pending(timer)) { | ||
| 369 | printk("add_virt_timer: timer pending\n"); | ||
| 370 | return -EBUSY; | ||
| 371 | } | ||
| 372 | |||
| 373 | timer->cpu = get_cpu(); | 360 | timer->cpu = get_cpu(); |
| 374 | return 0; | ||
| 375 | } | 361 | } |
| 376 | 362 | ||
| 377 | /* | 363 | /* |
| @@ -382,10 +368,7 @@ void add_virt_timer(void *new) | |||
| 382 | struct vtimer_list *timer; | 368 | struct vtimer_list *timer; |
| 383 | 369 | ||
| 384 | timer = (struct vtimer_list *)new; | 370 | timer = (struct vtimer_list *)new; |
| 385 | 371 | prepare_vtimer(timer); | |
| 386 | if (prepare_vtimer(timer) < 0) | ||
| 387 | return; | ||
| 388 | |||
| 389 | timer->interval = 0; | 372 | timer->interval = 0; |
| 390 | internal_add_vtimer(timer); | 373 | internal_add_vtimer(timer); |
| 391 | } | 374 | } |
| @@ -399,10 +382,7 @@ void add_virt_timer_periodic(void *new) | |||
| 399 | struct vtimer_list *timer; | 382 | struct vtimer_list *timer; |
| 400 | 383 | ||
| 401 | timer = (struct vtimer_list *)new; | 384 | timer = (struct vtimer_list *)new; |
| 402 | 385 | prepare_vtimer(timer); | |
| 403 | if (prepare_vtimer(timer) < 0) | ||
| 404 | return; | ||
| 405 | |||
| 406 | timer->interval = timer->expires; | 386 | timer->interval = timer->expires; |
| 407 | internal_add_vtimer(timer); | 387 | internal_add_vtimer(timer); |
| 408 | } | 388 | } |
| @@ -423,15 +403,8 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) | |||
| 423 | unsigned long flags; | 403 | unsigned long flags; |
| 424 | int cpu; | 404 | int cpu; |
| 425 | 405 | ||
| 426 | if (!timer->function) { | 406 | BUG_ON(!timer->function); |
| 427 | printk("mod_virt_timer: uninitialized timer\n"); | 407 | BUG_ON(!expires || expires > VTIMER_MAX_SLICE); |
| 428 | return -EINVAL; | ||
| 429 | } | ||
| 430 | |||
| 431 | if (!expires || expires > VTIMER_MAX_SLICE) { | ||
| 432 | printk("mod_virt_timer: invalid expire range\n"); | ||
| 433 | return -EINVAL; | ||
| 434 | } | ||
| 435 | 408 | ||
| 436 | /* | 409 | /* |
| 437 | * This is a common optimization triggered by the | 410 | * This is a common optimization triggered by the |
| @@ -444,6 +417,9 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) | |||
| 444 | cpu = get_cpu(); | 417 | cpu = get_cpu(); |
| 445 | vt_list = &per_cpu(virt_cpu_timer, cpu); | 418 | vt_list = &per_cpu(virt_cpu_timer, cpu); |
| 446 | 419 | ||
| 420 | /* check if we run on the right CPU */ | ||
| 421 | BUG_ON(timer->cpu != cpu); | ||
| 422 | |||
| 447 | /* disable interrupts before test if timer is pending */ | 423 | /* disable interrupts before test if timer is pending */ |
| 448 | spin_lock_irqsave(&vt_list->lock, flags); | 424 | spin_lock_irqsave(&vt_list->lock, flags); |
| 449 | 425 | ||
| @@ -458,14 +434,6 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) | |||
| 458 | return 0; | 434 | return 0; |
| 459 | } | 435 | } |
| 460 | 436 | ||
| 461 | /* check if we run on the right CPU */ | ||
| 462 | if (timer->cpu != cpu) { | ||
| 463 | printk("mod_virt_timer: running on wrong CPU, check your code\n"); | ||
| 464 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
| 465 | put_cpu(); | ||
| 466 | return -EINVAL; | ||
| 467 | } | ||
| 468 | |||
| 469 | list_del_init(&timer->entry); | 437 | list_del_init(&timer->entry); |
| 470 | timer->expires = expires; | 438 | timer->expires = expires; |
| 471 | 439 | ||
| @@ -536,24 +504,6 @@ void init_cpu_vtimer(void) | |||
| 536 | 504 | ||
| 537 | } | 505 | } |
| 538 | 506 | ||
| 539 | static int vtimer_idle_notify(struct notifier_block *self, | ||
| 540 | unsigned long action, void *hcpu) | ||
| 541 | { | ||
| 542 | switch (action) { | ||
| 543 | case S390_CPU_IDLE: | ||
| 544 | stop_cpu_timer(); | ||
| 545 | break; | ||
| 546 | case S390_CPU_NOT_IDLE: | ||
| 547 | start_cpu_timer(); | ||
| 548 | break; | ||
| 549 | } | ||
| 550 | return NOTIFY_OK; | ||
| 551 | } | ||
| 552 | |||
| 553 | static struct notifier_block vtimer_idle_nb = { | ||
| 554 | .notifier_call = vtimer_idle_notify, | ||
| 555 | }; | ||
| 556 | |||
| 557 | void __init vtime_init(void) | 507 | void __init vtime_init(void) |
| 558 | { | 508 | { |
| 559 | /* request the cpu timer external interrupt */ | 509 | /* request the cpu timer external interrupt */ |
| @@ -561,9 +511,6 @@ void __init vtime_init(void) | |||
| 561 | &ext_int_info_timer) != 0) | 511 | &ext_int_info_timer) != 0) |
| 562 | panic("Couldn't request external interrupt 0x1005"); | 512 | panic("Couldn't request external interrupt 0x1005"); |
| 563 | 513 | ||
| 564 | if (register_idle_notifier(&vtimer_idle_nb)) | ||
| 565 | panic("Couldn't register idle notifier"); | ||
| 566 | |||
| 567 | /* Enable cpu timer interrupts on the boot cpu. */ | 514 | /* Enable cpu timer interrupts on the boot cpu. */ |
| 568 | init_cpu_vtimer(); | 515 | init_cpu_vtimer(); |
| 569 | } | 516 | } |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 05598649b326..388cc7420055 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
| @@ -202,3 +202,22 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
| 202 | } | 202 | } |
| 203 | } | 203 | } |
| 204 | #endif | 204 | #endif |
| 205 | |||
| 206 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
| 207 | int arch_add_memory(int nid, u64 start, u64 size) | ||
| 208 | { | ||
| 209 | struct pglist_data *pgdat; | ||
| 210 | struct zone *zone; | ||
| 211 | int rc; | ||
| 212 | |||
| 213 | pgdat = NODE_DATA(nid); | ||
| 214 | zone = pgdat->node_zones + ZONE_NORMAL; | ||
| 215 | rc = vmem_add_mapping(start, size); | ||
| 216 | if (rc) | ||
| 217 | return rc; | ||
| 218 | rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size)); | ||
| 219 | if (rc) | ||
| 220 | vmem_remove_mapping(start, size); | ||
| 221 | return rc; | ||
| 222 | } | ||
| 223 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1a4025683362..1b6c52ef7339 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
| @@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
| 995 | now = get_clock(); | 995 | now = get_clock(); |
| 996 | 996 | ||
| 997 | DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", | 997 | DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", |
| 998 | cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), | 998 | cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) | |
| 999 | (unsigned int) intparm); | 999 | irb->scsw.cmd.dstat), (unsigned int) intparm); |
| 1000 | 1000 | ||
| 1001 | /* check for unsolicited interrupts */ | 1001 | /* check for unsolicited interrupts */ |
| 1002 | cqr = (struct dasd_ccw_req *) intparm; | 1002 | cqr = (struct dasd_ccw_req *) intparm; |
| 1003 | if (!cqr || ((irb->scsw.cc == 1) && | 1003 | if (!cqr || ((irb->scsw.cmd.cc == 1) && |
| 1004 | (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && | 1004 | (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && |
| 1005 | (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { | 1005 | (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) { |
| 1006 | if (cqr && cqr->status == DASD_CQR_IN_IO) | 1006 | if (cqr && cqr->status == DASD_CQR_IN_IO) |
| 1007 | cqr->status = DASD_CQR_QUEUED; | 1007 | cqr->status = DASD_CQR_QUEUED; |
| 1008 | device = dasd_device_from_cdev_locked(cdev); | 1008 | device = dasd_device_from_cdev_locked(cdev); |
| @@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
| 1025 | 1025 | ||
| 1026 | /* Check for clear pending */ | 1026 | /* Check for clear pending */ |
| 1027 | if (cqr->status == DASD_CQR_CLEAR_PENDING && | 1027 | if (cqr->status == DASD_CQR_CLEAR_PENDING && |
| 1028 | irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { | 1028 | irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { |
| 1029 | cqr->status = DASD_CQR_CLEARED; | 1029 | cqr->status = DASD_CQR_CLEARED; |
| 1030 | dasd_device_clear_timer(device); | 1030 | dasd_device_clear_timer(device); |
| 1031 | wake_up(&dasd_flush_wq); | 1031 | wake_up(&dasd_flush_wq); |
| @@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
| 1041 | return; | 1041 | return; |
| 1042 | } | 1042 | } |
| 1043 | DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", | 1043 | DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", |
| 1044 | ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); | 1044 | ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr); |
| 1045 | next = NULL; | 1045 | next = NULL; |
| 1046 | expires = 0; | 1046 | expires = 0; |
| 1047 | if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && | 1047 | if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && |
| 1048 | irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { | 1048 | irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) { |
| 1049 | /* request was completed successfully */ | 1049 | /* request was completed successfully */ |
| 1050 | cqr->status = DASD_CQR_SUCCESS; | 1050 | cqr->status = DASD_CQR_SUCCESS; |
| 1051 | cqr->stopclk = now; | 1051 | cqr->stopclk = now; |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index e6700df52df4..5c6e6f331cb0 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
| @@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | |||
| 1572 | 1572 | ||
| 1573 | /* determine the address of the CCW to be restarted */ | 1573 | /* determine the address of the CCW to be restarted */ |
| 1574 | /* Imprecise ending is not set -> addr from IRB-SCSW */ | 1574 | /* Imprecise ending is not set -> addr from IRB-SCSW */ |
| 1575 | cpa = default_erp->refers->irb.scsw.cpa; | 1575 | cpa = default_erp->refers->irb.scsw.cmd.cpa; |
| 1576 | 1576 | ||
| 1577 | if (cpa == 0) { | 1577 | if (cpa == 0) { |
| 1578 | 1578 | ||
| @@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) | |||
| 1725 | 1725 | ||
| 1726 | /* determine the address of the CCW to be restarted */ | 1726 | /* determine the address of the CCW to be restarted */ |
| 1727 | /* Imprecise ending is not set -> addr from IRB-SCSW */ | 1727 | /* Imprecise ending is not set -> addr from IRB-SCSW */ |
| 1728 | cpa = previous_erp->irb.scsw.cpa; | 1728 | cpa = previous_erp->irb.scsw.cmd.cpa; |
| 1729 | 1729 | ||
| 1730 | if (cpa == 0) { | 1730 | if (cpa == 0) { |
| 1731 | 1731 | ||
| @@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp) | |||
| 2171 | { | 2171 | { |
| 2172 | struct dasd_device *device = erp->startdev; | 2172 | struct dasd_device *device = erp->startdev; |
| 2173 | 2173 | ||
| 2174 | if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | 2174 | if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
| 2175 | | SCHN_STAT_CHN_CTRL_CHK)) { | 2175 | | SCHN_STAT_CHN_CTRL_CHK)) { |
| 2176 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | 2176 | DEV_MESSAGE(KERN_DEBUG, device, "%s", |
| 2177 | "channel or interface control check"); | 2177 | "channel or interface control check"); |
| @@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) | |||
| 2352 | 2352 | ||
| 2353 | if ((cqr1->irb.esw.esw0.erw.cons == 0) && | 2353 | if ((cqr1->irb.esw.esw0.erw.cons == 0) && |
| 2354 | (cqr2->irb.esw.esw0.erw.cons == 0)) { | 2354 | (cqr2->irb.esw.esw0.erw.cons == 0)) { |
| 2355 | if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | | 2355 | if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | |
| 2356 | SCHN_STAT_CHN_CTRL_CHK)) == | 2356 | SCHN_STAT_CHN_CTRL_CHK)) == |
| 2357 | (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK | | 2357 | (cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK | |
| 2358 | SCHN_STAT_CHN_CTRL_CHK))) | 2358 | SCHN_STAT_CHN_CTRL_CHK))) |
| 2359 | return 1; /* match with ifcc*/ | 2359 | return 1; /* match with ifcc*/ |
| 2360 | } | 2360 | } |
| @@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
| 2622 | } | 2622 | } |
| 2623 | 2623 | ||
| 2624 | /* double-check if current erp/cqr was successfull */ | 2624 | /* double-check if current erp/cqr was successfull */ |
| 2625 | if ((cqr->irb.scsw.cstat == 0x00) && | 2625 | if ((cqr->irb.scsw.cmd.cstat == 0x00) && |
| 2626 | (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { | 2626 | (cqr->irb.scsw.cmd.dstat == |
| 2627 | (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { | ||
| 2627 | 2628 | ||
| 2628 | DEV_MESSAGE(KERN_DEBUG, device, | 2629 | DEV_MESSAGE(KERN_DEBUG, device, |
| 2629 | "ERP called for successful request %p" | 2630 | "ERP called for successful request %p" |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a0edae091b5e..e0b77210d37a 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
| @@ -1404,13 +1404,14 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, | |||
| 1404 | 1404 | ||
| 1405 | /* first of all check for state change pending interrupt */ | 1405 | /* first of all check for state change pending interrupt */ |
| 1406 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; | 1406 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; |
| 1407 | if ((irb->scsw.dstat & mask) == mask) { | 1407 | if ((irb->scsw.cmd.dstat & mask) == mask) { |
| 1408 | dasd_generic_handle_state_change(device); | 1408 | dasd_generic_handle_state_change(device); |
| 1409 | return; | 1409 | return; |
| 1410 | } | 1410 | } |
| 1411 | 1411 | ||
| 1412 | /* summary unit check */ | 1412 | /* summary unit check */ |
| 1413 | if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) { | 1413 | if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
| 1414 | (irb->ecw[7] == 0x0D)) { | ||
| 1414 | dasd_alias_handle_summary_unit_check(device, irb); | 1415 | dasd_alias_handle_summary_unit_check(device, irb); |
| 1415 | return; | 1416 | return; |
| 1416 | } | 1417 | } |
| @@ -2068,11 +2069,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, | |||
| 2068 | device->cdev->dev.bus_id); | 2069 | device->cdev->dev.bus_id); |
| 2069 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 2070 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
| 2070 | " in req: %p CS: 0x%02X DS: 0x%02X\n", req, | 2071 | " in req: %p CS: 0x%02X DS: 0x%02X\n", req, |
| 2071 | irb->scsw.cstat, irb->scsw.dstat); | 2072 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); |
| 2072 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 2073 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
| 2073 | " device %s: Failing CCW: %p\n", | 2074 | " device %s: Failing CCW: %p\n", |
| 2074 | device->cdev->dev.bus_id, | 2075 | device->cdev->dev.bus_id, |
| 2075 | (void *) (addr_t) irb->scsw.cpa); | 2076 | (void *) (addr_t) irb->scsw.cmd.cpa); |
| 2076 | if (irb->esw.esw0.erw.cons) { | 2077 | if (irb->esw.esw0.erw.cons) { |
| 2077 | for (sl = 0; sl < 4; sl++) { | 2078 | for (sl = 0; sl < 4; sl++) { |
| 2078 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 2079 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
| @@ -2122,7 +2123,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, | |||
| 2122 | /* scsw->cda is either valid or zero */ | 2123 | /* scsw->cda is either valid or zero */ |
| 2123 | len = 0; | 2124 | len = 0; |
| 2124 | from = ++to; | 2125 | from = ++to; |
| 2125 | fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ | 2126 | fail = (struct ccw1 *)(addr_t) |
| 2127 | irb->scsw.cmd.cpa; /* failing CCW */ | ||
| 2126 | if (from < fail - 2) { | 2128 | if (from < fail - 2) { |
| 2127 | from = fail - 2; /* there is a gap - print header */ | 2129 | from = fail - 2; /* there is a gap - print header */ |
| 2128 | len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); | 2130 | len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 116611583df8..aee4656127f7 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
| @@ -222,7 +222,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, | |||
| 222 | 222 | ||
| 223 | /* first of all check for state change pending interrupt */ | 223 | /* first of all check for state change pending interrupt */ |
| 224 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; | 224 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; |
| 225 | if ((irb->scsw.dstat & mask) == mask) { | 225 | if ((irb->scsw.cmd.dstat & mask) == mask) { |
| 226 | dasd_generic_handle_state_change(device); | 226 | dasd_generic_handle_state_change(device); |
| 227 | return; | 227 | return; |
| 228 | } | 228 | } |
| @@ -449,11 +449,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | |||
| 449 | device->cdev->dev.bus_id); | 449 | device->cdev->dev.bus_id); |
| 450 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 450 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
| 451 | " in req: %p CS: 0x%02X DS: 0x%02X\n", req, | 451 | " in req: %p CS: 0x%02X DS: 0x%02X\n", req, |
| 452 | irb->scsw.cstat, irb->scsw.dstat); | 452 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); |
| 453 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 453 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
| 454 | " device %s: Failing CCW: %p\n", | 454 | " device %s: Failing CCW: %p\n", |
| 455 | device->cdev->dev.bus_id, | 455 | device->cdev->dev.bus_id, |
| 456 | (void *) (addr_t) irb->scsw.cpa); | 456 | (void *) (addr_t) irb->scsw.cmd.cpa); |
| 457 | if (irb->esw.esw0.erw.cons) { | 457 | if (irb->esw.esw0.erw.cons) { |
| 458 | for (sl = 0; sl < 4; sl++) { | 458 | for (sl = 0; sl < 4; sl++) { |
| 459 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 459 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
| @@ -498,11 +498,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | |||
| 498 | 498 | ||
| 499 | /* print failing CCW area */ | 499 | /* print failing CCW area */ |
| 500 | len = 0; | 500 | len = 0; |
| 501 | if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) { | 501 | if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { |
| 502 | act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2; | 502 | act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; |
| 503 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); | 503 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); |
| 504 | } | 504 | } |
| 505 | end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last); | 505 | end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); |
| 506 | while (act <= end) { | 506 | while (act <= end) { |
| 507 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 507 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
| 508 | " CCW %p: %08X %08X DAT:", | 508 | " CCW %p: %08X %08X DAT:", |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index bb52d2fbac18..01fcdd91b846 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
| @@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
| 167 | struct dcssblk_dev_info *dev_info; | 167 | struct dcssblk_dev_info *dev_info; |
| 168 | int rc; | 168 | int rc; |
| 169 | 169 | ||
| 170 | if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { | 170 | if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) |
| 171 | PRINT_WARN("Invalid value, must be 0 or 1\n"); | ||
| 172 | return -EINVAL; | 171 | return -EINVAL; |
| 173 | } | ||
| 174 | down_write(&dcssblk_devices_sem); | 172 | down_write(&dcssblk_devices_sem); |
| 175 | dev_info = container_of(dev, struct dcssblk_dev_info, dev); | 173 | dev_info = container_of(dev, struct dcssblk_dev_info, dev); |
| 176 | if (atomic_read(&dev_info->use_count)) { | 174 | if (atomic_read(&dev_info->use_count)) { |
| @@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
| 215 | set_disk_ro(dev_info->gd, 0); | 213 | set_disk_ro(dev_info->gd, 0); |
| 216 | } | 214 | } |
| 217 | } else { | 215 | } else { |
| 218 | PRINT_WARN("Invalid value, must be 0 or 1\n"); | ||
| 219 | rc = -EINVAL; | 216 | rc = -EINVAL; |
| 220 | goto out; | 217 | goto out; |
| 221 | } | 218 | } |
| @@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
| 258 | { | 255 | { |
| 259 | struct dcssblk_dev_info *dev_info; | 256 | struct dcssblk_dev_info *dev_info; |
| 260 | 257 | ||
| 261 | if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) { | 258 | if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) |
| 262 | PRINT_WARN("Invalid value, must be 0 or 1\n"); | ||
| 263 | return -EINVAL; | 259 | return -EINVAL; |
| 264 | } | ||
| 265 | dev_info = container_of(dev, struct dcssblk_dev_info, dev); | 260 | dev_info = container_of(dev, struct dcssblk_dev_info, dev); |
| 266 | 261 | ||
| 267 | down_write(&dcssblk_devices_sem); | 262 | down_write(&dcssblk_devices_sem); |
| @@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
| 289 | } | 284 | } |
| 290 | } else { | 285 | } else { |
| 291 | up_write(&dcssblk_devices_sem); | 286 | up_write(&dcssblk_devices_sem); |
| 292 | PRINT_WARN("Invalid value, must be 0 or 1\n"); | ||
| 293 | return -EINVAL; | 287 | return -EINVAL; |
| 294 | } | 288 | } |
| 295 | up_write(&dcssblk_devices_sem); | 289 | up_write(&dcssblk_devices_sem); |
| @@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
| 441 | goto out; | 435 | goto out; |
| 442 | 436 | ||
| 443 | unregister_dev: | 437 | unregister_dev: |
| 444 | PRINT_ERR("device_create_file() failed!\n"); | ||
| 445 | list_del(&dev_info->lh); | 438 | list_del(&dev_info->lh); |
| 446 | blk_cleanup_queue(dev_info->dcssblk_queue); | 439 | blk_cleanup_queue(dev_info->dcssblk_queue); |
| 447 | dev_info->gd->queue = NULL; | 440 | dev_info->gd->queue = NULL; |
| @@ -702,10 +695,8 @@ dcssblk_check_params(void) | |||
| 702 | static void __exit | 695 | static void __exit |
| 703 | dcssblk_exit(void) | 696 | dcssblk_exit(void) |
| 704 | { | 697 | { |
| 705 | PRINT_DEBUG("DCSSBLOCK EXIT...\n"); | ||
| 706 | s390_root_dev_unregister(dcssblk_root_dev); | 698 | s390_root_dev_unregister(dcssblk_root_dev); |
| 707 | unregister_blkdev(dcssblk_major, DCSSBLK_NAME); | 699 | unregister_blkdev(dcssblk_major, DCSSBLK_NAME); |
| 708 | PRINT_DEBUG("...finished!\n"); | ||
| 709 | } | 700 | } |
| 710 | 701 | ||
| 711 | static int __init | 702 | static int __init |
| @@ -713,27 +704,21 @@ dcssblk_init(void) | |||
| 713 | { | 704 | { |
| 714 | int rc; | 705 | int rc; |
| 715 | 706 | ||
| 716 | PRINT_DEBUG("DCSSBLOCK INIT...\n"); | ||
| 717 | dcssblk_root_dev = s390_root_dev_register("dcssblk"); | 707 | dcssblk_root_dev = s390_root_dev_register("dcssblk"); |
| 718 | if (IS_ERR(dcssblk_root_dev)) { | 708 | if (IS_ERR(dcssblk_root_dev)) |
| 719 | PRINT_ERR("device_register() failed!\n"); | ||
| 720 | return PTR_ERR(dcssblk_root_dev); | 709 | return PTR_ERR(dcssblk_root_dev); |
| 721 | } | ||
| 722 | rc = device_create_file(dcssblk_root_dev, &dev_attr_add); | 710 | rc = device_create_file(dcssblk_root_dev, &dev_attr_add); |
| 723 | if (rc) { | 711 | if (rc) { |
| 724 | PRINT_ERR("device_create_file(add) failed!\n"); | ||
| 725 | s390_root_dev_unregister(dcssblk_root_dev); | 712 | s390_root_dev_unregister(dcssblk_root_dev); |
| 726 | return rc; | 713 | return rc; |
| 727 | } | 714 | } |
| 728 | rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); | 715 | rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); |
| 729 | if (rc) { | 716 | if (rc) { |
| 730 | PRINT_ERR("device_create_file(remove) failed!\n"); | ||
| 731 | s390_root_dev_unregister(dcssblk_root_dev); | 717 | s390_root_dev_unregister(dcssblk_root_dev); |
| 732 | return rc; | 718 | return rc; |
| 733 | } | 719 | } |
| 734 | rc = register_blkdev(0, DCSSBLK_NAME); | 720 | rc = register_blkdev(0, DCSSBLK_NAME); |
| 735 | if (rc < 0) { | 721 | if (rc < 0) { |
| 736 | PRINT_ERR("Can't get dynamic major!\n"); | ||
| 737 | s390_root_dev_unregister(dcssblk_root_dev); | 722 | s390_root_dev_unregister(dcssblk_root_dev); |
| 738 | return rc; | 723 | return rc; |
| 739 | } | 724 | } |
| @@ -742,7 +727,6 @@ dcssblk_init(void) | |||
| 742 | 727 | ||
| 743 | dcssblk_check_params(); | 728 | dcssblk_check_params(); |
| 744 | 729 | ||
| 745 | PRINT_DEBUG("...finished!\n"); | ||
| 746 | return 0; | 730 | return 0; |
| 747 | } | 731 | } |
| 748 | 732 | ||
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index f231bc21b1ca..dd9b986389a2 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
| @@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) | |||
| 100 | : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); | 100 | : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); |
| 101 | if (cc == 3) | 101 | if (cc == 3) |
| 102 | return -ENXIO; | 102 | return -ENXIO; |
| 103 | if (cc == 2) { | 103 | if (cc == 2) |
| 104 | PRINT_ERR("expanded storage lost!\n"); | ||
| 105 | return -ENXIO; | 104 | return -ENXIO; |
| 106 | } | 105 | if (cc == 1) |
| 107 | if (cc == 1) { | ||
| 108 | PRINT_ERR("page in failed for page index %u.\n", | ||
| 109 | xpage_index); | ||
| 110 | return -EIO; | 106 | return -EIO; |
| 111 | } | ||
| 112 | return 0; | 107 | return 0; |
| 113 | } | 108 | } |
| 114 | 109 | ||
| @@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) | |||
| 135 | : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); | 130 | : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); |
| 136 | if (cc == 3) | 131 | if (cc == 3) |
| 137 | return -ENXIO; | 132 | return -ENXIO; |
| 138 | if (cc == 2) { | 133 | if (cc == 2) |
| 139 | PRINT_ERR("expanded storage lost!\n"); | ||
| 140 | return -ENXIO; | 134 | return -ENXIO; |
| 141 | } | 135 | if (cc == 1) |
| 142 | if (cc == 1) { | ||
| 143 | PRINT_ERR("page out failed for page index %u.\n", | ||
| 144 | xpage_index); | ||
| 145 | return -EIO; | 136 | return -EIO; |
| 146 | } | ||
| 147 | return 0; | 137 | return 0; |
| 148 | } | 138 | } |
| 149 | 139 | ||
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 3e5653c92f4b..d3ec9b55ab35 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
| @@ -93,9 +93,6 @@ struct raw3215_info { | |||
| 93 | struct raw3215_req *queued_write;/* pointer to queued write requests */ | 93 | struct raw3215_req *queued_write;/* pointer to queued write requests */ |
| 94 | wait_queue_head_t empty_wait; /* wait queue for flushing */ | 94 | wait_queue_head_t empty_wait; /* wait queue for flushing */ |
| 95 | struct timer_list timer; /* timer for delayed output */ | 95 | struct timer_list timer; /* timer for delayed output */ |
| 96 | char *message; /* pending message from raw3215_irq */ | ||
| 97 | int msg_dstat; /* dstat for pending message */ | ||
| 98 | int msg_cstat; /* cstat for pending message */ | ||
| 99 | int line_pos; /* position on the line (for tabs) */ | 96 | int line_pos; /* position on the line (for tabs) */ |
| 100 | char ubuffer[80]; /* copy_from_user buffer */ | 97 | char ubuffer[80]; /* copy_from_user buffer */ |
| 101 | }; | 98 | }; |
| @@ -359,11 +356,6 @@ raw3215_tasklet(void *data) | |||
| 359 | raw3215_mk_write_req(raw); | 356 | raw3215_mk_write_req(raw); |
| 360 | raw3215_try_io(raw); | 357 | raw3215_try_io(raw); |
| 361 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | 358 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); |
| 362 | /* Check for pending message from raw3215_irq */ | ||
| 363 | if (raw->message != NULL) { | ||
| 364 | printk(raw->message, raw->msg_dstat, raw->msg_cstat); | ||
| 365 | raw->message = NULL; | ||
| 366 | } | ||
| 367 | tty = raw->tty; | 359 | tty = raw->tty; |
| 368 | if (tty != NULL && | 360 | if (tty != NULL && |
| 369 | RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { | 361 | RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { |
| @@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 381 | struct raw3215_req *req; | 373 | struct raw3215_req *req; |
| 382 | struct tty_struct *tty; | 374 | struct tty_struct *tty; |
| 383 | int cstat, dstat; | 375 | int cstat, dstat; |
| 384 | int count, slen; | 376 | int count; |
| 385 | 377 | ||
| 386 | raw = cdev->dev.driver_data; | 378 | raw = cdev->dev.driver_data; |
| 387 | req = (struct raw3215_req *) intparm; | 379 | req = (struct raw3215_req *) intparm; |
| 388 | cstat = irb->scsw.cstat; | 380 | cstat = irb->scsw.cmd.cstat; |
| 389 | dstat = irb->scsw.dstat; | 381 | dstat = irb->scsw.cmd.dstat; |
| 390 | if (cstat != 0) { | 382 | if (cstat != 0) |
| 391 | raw->message = KERN_WARNING | ||
| 392 | "Got nonzero channel status in raw3215_irq " | ||
| 393 | "(dev sts 0x%2x, sch sts 0x%2x)"; | ||
| 394 | raw->msg_dstat = dstat; | ||
| 395 | raw->msg_cstat = cstat; | ||
| 396 | tasklet_schedule(&raw->tasklet); | 383 | tasklet_schedule(&raw->tasklet); |
| 397 | } | ||
| 398 | if (dstat & 0x01) { /* we got a unit exception */ | 384 | if (dstat & 0x01) { /* we got a unit exception */ |
| 399 | dstat &= ~0x01; /* we can ignore it */ | 385 | dstat &= ~0x01; /* we can ignore it */ |
| 400 | } | 386 | } |
| @@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 404 | break; | 390 | break; |
| 405 | /* Attention interrupt, someone hit the enter key */ | 391 | /* Attention interrupt, someone hit the enter key */ |
| 406 | raw3215_mk_read_req(raw); | 392 | raw3215_mk_read_req(raw); |
| 407 | if (MACHINE_IS_P390) | ||
| 408 | memset(raw->inbuf, 0, RAW3215_INBUF_SIZE); | ||
| 409 | tasklet_schedule(&raw->tasklet); | 393 | tasklet_schedule(&raw->tasklet); |
| 410 | break; | 394 | break; |
| 411 | case 0x08: | 395 | case 0x08: |
| @@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 415 | return; /* That shouldn't happen ... */ | 399 | return; /* That shouldn't happen ... */ |
| 416 | if (req->type == RAW3215_READ) { | 400 | if (req->type == RAW3215_READ) { |
| 417 | /* store residual count, then wait for device end */ | 401 | /* store residual count, then wait for device end */ |
| 418 | req->residual = irb->scsw.count; | 402 | req->residual = irb->scsw.cmd.count; |
| 419 | } | 403 | } |
| 420 | if (dstat == 0x08) | 404 | if (dstat == 0x08) |
| 421 | break; | 405 | break; |
| @@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 428 | 412 | ||
| 429 | tty = raw->tty; | 413 | tty = raw->tty; |
| 430 | count = 160 - req->residual; | 414 | count = 160 - req->residual; |
| 431 | if (MACHINE_IS_P390) { | ||
| 432 | slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE); | ||
| 433 | if (count > slen) | ||
| 434 | count = slen; | ||
| 435 | } else | ||
| 436 | EBCASC(raw->inbuf, count); | 415 | EBCASC(raw->inbuf, count); |
| 437 | cchar = ctrlchar_handle(raw->inbuf, count, tty); | 416 | cchar = ctrlchar_handle(raw->inbuf, count, tty); |
| 438 | switch (cchar & CTRLCHAR_MASK) { | 417 | switch (cchar & CTRLCHAR_MASK) { |
| @@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 481 | raw->flags &= ~RAW3215_WORKING; | 460 | raw->flags &= ~RAW3215_WORKING; |
| 482 | raw3215_free_req(req); | 461 | raw3215_free_req(req); |
| 483 | } | 462 | } |
| 484 | raw->message = KERN_WARNING | ||
| 485 | "Spurious interrupt in in raw3215_irq " | ||
| 486 | "(dev sts 0x%2x, sch sts 0x%2x)"; | ||
| 487 | raw->msg_dstat = dstat; | ||
| 488 | raw->msg_cstat = cstat; | ||
| 489 | tasklet_schedule(&raw->tasklet); | 463 | tasklet_schedule(&raw->tasklet); |
| 490 | } | 464 | } |
| 491 | return; | 465 | return; |
| @@ -883,7 +857,6 @@ con3215_init(void) | |||
| 883 | free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); | 857 | free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); |
| 884 | free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); | 858 | free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); |
| 885 | raw3215[0] = NULL; | 859 | raw3215[0] = NULL; |
| 886 | printk("Couldn't find a 3215 console device\n"); | ||
| 887 | return -ENODEV; | 860 | return -ENODEV; |
| 888 | } | 861 | } |
| 889 | register_console(&con3215); | 862 | register_console(&con3215); |
| @@ -1157,7 +1130,6 @@ tty3215_init(void) | |||
| 1157 | tty_set_operations(driver, &tty3215_ops); | 1130 | tty_set_operations(driver, &tty3215_ops); |
| 1158 | ret = tty_register_driver(driver); | 1131 | ret = tty_register_driver(driver); |
| 1159 | if (ret) { | 1132 | if (ret) { |
| 1160 | printk("Couldn't register tty3215 driver\n"); | ||
| 1161 | put_tty_driver(driver); | 1133 | put_tty_driver(driver); |
| 1162 | return ret; | 1134 | return ret; |
| 1163 | } | 1135 | } |
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 0b040557db02..3c07974886ed 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
| @@ -411,15 +411,15 @@ static int | |||
| 411 | con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) | 411 | con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) |
| 412 | { | 412 | { |
| 413 | /* Handle ATTN. Schedule tasklet to read aid. */ | 413 | /* Handle ATTN. Schedule tasklet to read aid. */ |
| 414 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) | 414 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) |
| 415 | con3270_issue_read(cp); | 415 | con3270_issue_read(cp); |
| 416 | 416 | ||
| 417 | if (rq) { | 417 | if (rq) { |
| 418 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 418 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
| 419 | rq->rc = -EIO; | 419 | rq->rc = -EIO; |
| 420 | else | 420 | else |
| 421 | /* Normal end. Copy residual count. */ | 421 | /* Normal end. Copy residual count. */ |
| 422 | rq->rescnt = irb->scsw.count; | 422 | rq->rescnt = irb->scsw.cmd.count; |
| 423 | } | 423 | } |
| 424 | return RAW3270_IO_DONE; | 424 | return RAW3270_IO_DONE; |
| 425 | } | 425 | } |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index ef36f2132aa4..e136d10a0de6 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
| @@ -216,17 +216,17 @@ static int | |||
| 216 | fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) | 216 | fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) |
| 217 | { | 217 | { |
| 218 | /* Handle ATTN. Set indication and wake waiters for attention. */ | 218 | /* Handle ATTN. Set indication and wake waiters for attention. */ |
| 219 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 219 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
| 220 | fp->attention = 1; | 220 | fp->attention = 1; |
| 221 | wake_up(&fp->wait); | 221 | wake_up(&fp->wait); |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | if (rq) { | 224 | if (rq) { |
| 225 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 225 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
| 226 | rq->rc = -EIO; | 226 | rq->rc = -EIO; |
| 227 | else | 227 | else |
| 228 | /* Normal end. Copy residual count. */ | 228 | /* Normal end. Copy residual count. */ |
| 229 | rq->rescnt = irb->scsw.count; | 229 | rq->rescnt = irb->scsw.cmd.count; |
| 230 | } | 230 | } |
| 231 | return RAW3270_IO_DONE; | 231 | return RAW3270_IO_DONE; |
| 232 | } | 232 | } |
| @@ -512,11 +512,8 @@ fs3270_init(void) | |||
| 512 | int rc; | 512 | int rc; |
| 513 | 513 | ||
| 514 | rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); | 514 | rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); |
| 515 | if (rc) { | 515 | if (rc) |
| 516 | printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n", | ||
| 517 | IBM_FS3270_MAJOR, rc); | ||
| 518 | return rc; | 516 | return rc; |
| 519 | } | ||
| 520 | return 0; | 517 | return 0; |
| 521 | } | 518 | } |
| 522 | 519 | ||
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 1e1f50655bbf..f0e4c96afbf8 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
| @@ -3,9 +3,8 @@ | |||
| 3 | * | 3 | * |
| 4 | * Character device driver for reading z/VM *MONITOR service records. | 4 | * Character device driver for reading z/VM *MONITOR service records. |
| 5 | * | 5 | * |
| 6 | * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. | 6 | * Copyright IBM Corp. 2004, 2008 |
| 7 | * | 7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
| 8 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | ||
| 9 | */ | 8 | */ |
| 10 | 9 | ||
| 11 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| @@ -18,12 +17,11 @@ | |||
| 18 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
| 19 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
| 20 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
| 20 | #include <linux/poll.h> | ||
| 21 | #include <net/iucv/iucv.h> | ||
| 21 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
| 22 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
| 23 | #include <asm/extmem.h> | 24 | #include <asm/extmem.h> |
| 24 | #include <linux/poll.h> | ||
| 25 | #include <net/iucv/iucv.h> | ||
| 26 | |||
| 27 | 25 | ||
| 28 | //#define MON_DEBUG /* Debug messages on/off */ | 26 | //#define MON_DEBUG /* Debug messages on/off */ |
| 29 | 27 | ||
| @@ -152,10 +150,7 @@ static int mon_check_mca(struct mon_msg *monmsg) | |||
| 152 | (mon_mca_end(monmsg) > mon_dcss_end) || | 150 | (mon_mca_end(monmsg) > mon_dcss_end) || |
| 153 | (mon_mca_start(monmsg) < mon_dcss_start) || | 151 | (mon_mca_start(monmsg) < mon_dcss_start) || |
| 154 | ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) | 152 | ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) |
| 155 | { | ||
| 156 | P_DEBUG("READ, IGNORED INVALID MCA\n\n"); | ||
| 157 | return -EINVAL; | 153 | return -EINVAL; |
| 158 | } | ||
| 159 | return 0; | 154 | return 0; |
| 160 | } | 155 | } |
| 161 | 156 | ||
| @@ -164,10 +159,6 @@ static int mon_send_reply(struct mon_msg *monmsg, | |||
| 164 | { | 159 | { |
| 165 | int rc; | 160 | int rc; |
| 166 | 161 | ||
| 167 | P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " | ||
| 168 | "0x%08X\n\n", | ||
| 169 | monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); | ||
| 170 | |||
| 171 | rc = iucv_message_reply(monpriv->path, &monmsg->msg, | 162 | rc = iucv_message_reply(monpriv->path, &monmsg->msg, |
| 172 | IUCV_IPRMDATA, NULL, 0); | 163 | IUCV_IPRMDATA, NULL, 0); |
| 173 | atomic_dec(&monpriv->msglim_count); | 164 | atomic_dec(&monpriv->msglim_count); |
| @@ -202,15 +193,12 @@ static struct mon_private *mon_alloc_mem(void) | |||
| 202 | struct mon_private *monpriv; | 193 | struct mon_private *monpriv; |
| 203 | 194 | ||
| 204 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); | 195 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); |
| 205 | if (!monpriv) { | 196 | if (!monpriv) |
| 206 | P_ERROR("no memory for monpriv\n"); | ||
| 207 | return NULL; | 197 | return NULL; |
| 208 | } | ||
| 209 | for (i = 0; i < MON_MSGLIM; i++) { | 198 | for (i = 0; i < MON_MSGLIM; i++) { |
| 210 | monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), | 199 | monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), |
| 211 | GFP_KERNEL); | 200 | GFP_KERNEL); |
| 212 | if (!monpriv->msg_array[i]) { | 201 | if (!monpriv->msg_array[i]) { |
| 213 | P_ERROR("open, no memory for msg_array\n"); | ||
| 214 | mon_free_mem(monpriv); | 202 | mon_free_mem(monpriv); |
| 215 | return NULL; | 203 | return NULL; |
| 216 | } | 204 | } |
| @@ -218,41 +206,10 @@ static struct mon_private *mon_alloc_mem(void) | |||
| 218 | return monpriv; | 206 | return monpriv; |
| 219 | } | 207 | } |
| 220 | 208 | ||
| 221 | static inline void mon_read_debug(struct mon_msg *monmsg, | ||
| 222 | struct mon_private *monpriv) | ||
| 223 | { | ||
| 224 | #ifdef MON_DEBUG | ||
| 225 | u8 msg_type[2], mca_type; | ||
| 226 | unsigned long records_len; | ||
| 227 | |||
| 228 | records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; | ||
| 229 | |||
| 230 | memcpy(msg_type, &monmsg->msg.class, 2); | ||
| 231 | EBCASC(msg_type, 2); | ||
| 232 | mca_type = mon_mca_type(monmsg, 0); | ||
| 233 | EBCASC(&mca_type, 1); | ||
| 234 | |||
| 235 | P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", | ||
| 236 | monpriv->read_index, monpriv->write_index); | ||
| 237 | P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", | ||
| 238 | monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); | ||
| 239 | P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", | ||
| 240 | msg_type[0], msg_type[1], mca_type ? mca_type : 'X', | ||
| 241 | mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); | ||
| 242 | P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n", | ||
| 243 | mon_mca_start(monmsg), mon_mca_end(monmsg)); | ||
| 244 | P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n", | ||
| 245 | mon_rec_start(monmsg), mon_rec_end(monmsg), records_len); | ||
| 246 | if (mon_mca_size(monmsg) > 12) | ||
| 247 | P_DEBUG("READ, MORE THAN ONE MCA\n\n"); | ||
| 248 | #endif | ||
| 249 | } | ||
| 250 | |||
| 251 | static inline void mon_next_mca(struct mon_msg *monmsg) | 209 | static inline void mon_next_mca(struct mon_msg *monmsg) |
| 252 | { | 210 | { |
| 253 | if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) | 211 | if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) |
| 254 | return; | 212 | return; |
| 255 | P_DEBUG("READ, NEXT MCA\n\n"); | ||
| 256 | monmsg->mca_offset += 12; | 213 | monmsg->mca_offset += 12; |
| 257 | monmsg->pos = 0; | 214 | monmsg->pos = 0; |
| 258 | } | 215 | } |
| @@ -269,7 +226,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv) | |||
| 269 | monmsg->msglim_reached = 0; | 226 | monmsg->msglim_reached = 0; |
| 270 | monmsg->pos = 0; | 227 | monmsg->pos = 0; |
| 271 | monmsg->mca_offset = 0; | 228 | monmsg->mca_offset = 0; |
| 272 | P_WARNING("read, message limit reached\n"); | ||
| 273 | monpriv->read_index = (monpriv->read_index + 1) % | 229 | monpriv->read_index = (monpriv->read_index + 1) % |
| 274 | MON_MSGLIM; | 230 | MON_MSGLIM; |
| 275 | atomic_dec(&monpriv->read_ready); | 231 | atomic_dec(&monpriv->read_ready); |
| @@ -286,10 +242,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) | |||
| 286 | { | 242 | { |
| 287 | struct mon_private *monpriv = path->private; | 243 | struct mon_private *monpriv = path->private; |
| 288 | 244 | ||
| 289 | P_DEBUG("IUCV connection completed\n"); | ||
| 290 | P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " | ||
| 291 | "0x%02X, Sample = 0x%02X\n", | ||
| 292 | ipuser[0], ipuser[1], ipuser[2]); | ||
| 293 | atomic_set(&monpriv->iucv_connected, 1); | 245 | atomic_set(&monpriv->iucv_connected, 1); |
| 294 | wake_up(&mon_conn_wait_queue); | 246 | wake_up(&mon_conn_wait_queue); |
| 295 | } | 247 | } |
| @@ -310,7 +262,6 @@ static void mon_iucv_message_pending(struct iucv_path *path, | |||
| 310 | { | 262 | { |
| 311 | struct mon_private *monpriv = path->private; | 263 | struct mon_private *monpriv = path->private; |
| 312 | 264 | ||
| 313 | P_DEBUG("IUCV message pending\n"); | ||
| 314 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, | 265 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, |
| 315 | msg, sizeof(*msg)); | 266 | msg, sizeof(*msg)); |
| 316 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { | 267 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { |
| @@ -375,7 +326,6 @@ static int mon_open(struct inode *inode, struct file *filp) | |||
| 375 | rc = -EIO; | 326 | rc = -EIO; |
| 376 | goto out_path; | 327 | goto out_path; |
| 377 | } | 328 | } |
| 378 | P_INFO("open, established connection to *MONITOR service\n\n"); | ||
| 379 | filp->private_data = monpriv; | 329 | filp->private_data = monpriv; |
| 380 | return nonseekable_open(inode, filp); | 330 | return nonseekable_open(inode, filp); |
| 381 | 331 | ||
| @@ -400,8 +350,6 @@ static int mon_close(struct inode *inode, struct file *filp) | |||
| 400 | rc = iucv_path_sever(monpriv->path, user_data_sever); | 350 | rc = iucv_path_sever(monpriv->path, user_data_sever); |
| 401 | if (rc) | 351 | if (rc) |
| 402 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); | 352 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); |
| 403 | else | ||
| 404 | P_INFO("close, terminated connection to *MONITOR service\n"); | ||
| 405 | 353 | ||
| 406 | atomic_set(&monpriv->iucv_severed, 0); | 354 | atomic_set(&monpriv->iucv_severed, 0); |
| 407 | atomic_set(&monpriv->iucv_connected, 0); | 355 | atomic_set(&monpriv->iucv_connected, 0); |
| @@ -442,10 +390,8 @@ static ssize_t mon_read(struct file *filp, char __user *data, | |||
| 442 | monmsg = monpriv->msg_array[monpriv->read_index]; | 390 | monmsg = monpriv->msg_array[monpriv->read_index]; |
| 443 | } | 391 | } |
| 444 | 392 | ||
| 445 | if (!monmsg->pos) { | 393 | if (!monmsg->pos) |
| 446 | monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; | 394 | monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset; |
| 447 | mon_read_debug(monmsg, monpriv); | ||
| 448 | } | ||
| 449 | if (mon_check_mca(monmsg)) | 395 | if (mon_check_mca(monmsg)) |
| 450 | goto reply; | 396 | goto reply; |
| 451 | 397 | ||
| @@ -531,7 +477,6 @@ static int __init mon_init(void) | |||
| 531 | P_ERROR("failed to register with iucv driver\n"); | 477 | P_ERROR("failed to register with iucv driver\n"); |
| 532 | return rc; | 478 | return rc; |
| 533 | } | 479 | } |
| 534 | P_INFO("open, registered with IUCV\n"); | ||
| 535 | 480 | ||
| 536 | rc = segment_type(mon_dcss_name); | 481 | rc = segment_type(mon_dcss_name); |
| 537 | if (rc < 0) { | 482 | if (rc < 0) { |
| @@ -555,13 +500,8 @@ static int __init mon_init(void) | |||
| 555 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); | 500 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); |
| 556 | 501 | ||
| 557 | rc = misc_register(&mon_dev); | 502 | rc = misc_register(&mon_dev); |
| 558 | if (rc < 0 ) { | 503 | if (rc < 0 ) |
| 559 | P_ERROR("misc_register failed, rc = %i\n", rc); | ||
| 560 | goto out; | 504 | goto out; |
| 561 | } | ||
| 562 | P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n", | ||
| 563 | mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end, | ||
| 564 | mon_dcss_end - mon_dcss_start + 1); | ||
| 565 | return 0; | 505 | return 0; |
| 566 | 506 | ||
| 567 | out: | 507 | out: |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 848ef7e8523f..81a96e019080 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
| @@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size) | |||
| 153 | struct raw3270_request *rq; | 153 | struct raw3270_request *rq; |
| 154 | 154 | ||
| 155 | rq = alloc_bootmem_low(sizeof(struct raw3270)); | 155 | rq = alloc_bootmem_low(sizeof(struct raw3270)); |
| 156 | if (!rq) | ||
| 157 | return ERR_PTR(-ENOMEM); | ||
| 158 | memset(rq, 0, sizeof(struct raw3270_request)); | ||
| 159 | 156 | ||
| 160 | /* alloc output buffer. */ | 157 | /* alloc output buffer. */ |
| 161 | if (size > 0) { | 158 | if (size > 0) |
| 162 | rq->buffer = alloc_bootmem_low(size); | 159 | rq->buffer = alloc_bootmem_low(size); |
| 163 | if (!rq->buffer) { | ||
| 164 | free_bootmem((unsigned long) rq, | ||
| 165 | sizeof(struct raw3270)); | ||
| 166 | return ERR_PTR(-ENOMEM); | ||
| 167 | } | ||
| 168 | } | ||
| 169 | rq->size = size; | 160 | rq->size = size; |
| 170 | INIT_LIST_HEAD(&rq->list); | 161 | INIT_LIST_HEAD(&rq->list); |
| 171 | 162 | ||
| @@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 372 | 363 | ||
| 373 | if (IS_ERR(irb)) | 364 | if (IS_ERR(irb)) |
| 374 | rc = RAW3270_IO_RETRY; | 365 | rc = RAW3270_IO_RETRY; |
| 375 | else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { | 366 | else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { |
| 376 | rq->rc = -EIO; | 367 | rq->rc = -EIO; |
| 377 | rc = RAW3270_IO_DONE; | 368 | rc = RAW3270_IO_DONE; |
| 378 | } else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | | 369 | } else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END | |
| 379 | DEV_STAT_UNIT_EXCEP)) { | 370 | DEV_STAT_UNIT_EXCEP)) { |
| 380 | /* Handle CE-DE-UE and subsequent UDE */ | 371 | /* Handle CE-DE-UE and subsequent UDE */ |
| 381 | set_bit(RAW3270_FLAGS_BUSY, &rp->flags); | 372 | set_bit(RAW3270_FLAGS_BUSY, &rp->flags); |
| 382 | rc = RAW3270_IO_BUSY; | 373 | rc = RAW3270_IO_BUSY; |
| 383 | } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { | 374 | } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) { |
| 384 | /* Wait for UDE if busy flag is set. */ | 375 | /* Wait for UDE if busy flag is set. */ |
| 385 | if (irb->scsw.dstat & DEV_STAT_DEV_END) { | 376 | if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { |
| 386 | clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); | 377 | clear_bit(RAW3270_FLAGS_BUSY, &rp->flags); |
| 387 | /* Got it, now retry. */ | 378 | /* Got it, now retry. */ |
| 388 | rc = RAW3270_IO_RETRY; | 379 | rc = RAW3270_IO_RETRY; |
| @@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, | |||
| 497 | * Unit-Check Processing: | 488 | * Unit-Check Processing: |
| 498 | * Expect Command Reject or Intervention Required. | 489 | * Expect Command Reject or Intervention Required. |
| 499 | */ | 490 | */ |
| 500 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 491 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
| 501 | /* Request finished abnormally. */ | 492 | /* Request finished abnormally. */ |
| 502 | if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { | 493 | if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { |
| 503 | set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); | 494 | set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); |
| @@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, | |||
| 505 | } | 496 | } |
| 506 | } | 497 | } |
| 507 | if (rq) { | 498 | if (rq) { |
| 508 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 499 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
| 509 | if (irb->ecw[0] & SNS0_CMD_REJECT) | 500 | if (irb->ecw[0] & SNS0_CMD_REJECT) |
| 510 | rq->rc = -EOPNOTSUPP; | 501 | rq->rc = -EOPNOTSUPP; |
| 511 | else | 502 | else |
| 512 | rq->rc = -EIO; | 503 | rq->rc = -EIO; |
| 513 | } else | 504 | } else |
| 514 | /* Request finished normally. Copy residual count. */ | 505 | /* Request finished normally. Copy residual count. */ |
| 515 | rq->rescnt = irb->scsw.count; | 506 | rq->rescnt = irb->scsw.cmd.count; |
| 516 | } | 507 | } |
| 517 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 508 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
| 518 | set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); | 509 | set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); |
| 519 | wake_up(&raw3270_wait_queue); | 510 | wake_up(&raw3270_wait_queue); |
| 520 | } | 511 | } |
| @@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp) | |||
| 619 | rp->cols = 132; | 610 | rp->cols = 132; |
| 620 | break; | 611 | break; |
| 621 | default: | 612 | default: |
| 622 | printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model); | ||
| 623 | rc = -EOPNOTSUPP; | 613 | rc = -EOPNOTSUPP; |
| 624 | break; | 614 | break; |
| 625 | } | 615 | } |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 2c7a1ee6b041..3c8b25e6c345 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
| @@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf) | |||
| 506 | if (scbuf->validity_sclp_send_mask) | 506 | if (scbuf->validity_sclp_send_mask) |
| 507 | sclp_send_mask = scbuf->sclp_send_mask; | 507 | sclp_send_mask = scbuf->sclp_send_mask; |
| 508 | spin_unlock_irqrestore(&sclp_lock, flags); | 508 | spin_unlock_irqrestore(&sclp_lock, flags); |
| 509 | if (scbuf->validity_sclp_active_facility_mask) | ||
| 510 | sclp_facilities = scbuf->sclp_active_facility_mask; | ||
| 509 | sclp_dispatch_state_change(); | 511 | sclp_dispatch_state_change(); |
| 510 | } | 512 | } |
| 511 | 513 | ||
| @@ -782,11 +784,9 @@ sclp_check_handler(__u16 code) | |||
| 782 | /* Is this the interrupt we are waiting for? */ | 784 | /* Is this the interrupt we are waiting for? */ |
| 783 | if (finished_sccb == 0) | 785 | if (finished_sccb == 0) |
| 784 | return; | 786 | return; |
| 785 | if (finished_sccb != (u32) (addr_t) sclp_init_sccb) { | 787 | if (finished_sccb != (u32) (addr_t) sclp_init_sccb) |
| 786 | printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt " | 788 | panic("sclp: unsolicited interrupt for buffer at 0x%x\n", |
| 787 | "for buffer at 0x%x\n", finished_sccb); | 789 | finished_sccb); |
| 788 | return; | ||
| 789 | } | ||
| 790 | spin_lock(&sclp_lock); | 790 | spin_lock(&sclp_lock); |
| 791 | if (sclp_running_state == sclp_running_state_running) { | 791 | if (sclp_running_state == sclp_running_state_running) { |
| 792 | sclp_init_req.status = SCLP_REQ_DONE; | 792 | sclp_init_req.status = SCLP_REQ_DONE; |
| @@ -883,8 +883,6 @@ sclp_init(void) | |||
| 883 | unsigned long flags; | 883 | unsigned long flags; |
| 884 | int rc; | 884 | int rc; |
| 885 | 885 | ||
| 886 | if (!MACHINE_HAS_SCLP) | ||
| 887 | return -ENODEV; | ||
| 888 | spin_lock_irqsave(&sclp_lock, flags); | 886 | spin_lock_irqsave(&sclp_lock, flags); |
| 889 | /* Check for previous or running initialization */ | 887 | /* Check for previous or running initialization */ |
| 890 | if (sclp_init_state != sclp_init_state_uninitialized) { | 888 | if (sclp_init_state != sclp_init_state_uninitialized) { |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index b5c23396f8fe..0c2b77493db4 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
| @@ -11,6 +11,9 @@ | |||
| 11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
| 12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| 14 | #include <linux/mm.h> | ||
| 15 | #include <linux/mmzone.h> | ||
| 16 | #include <linux/memory.h> | ||
| 14 | #include <asm/chpid.h> | 17 | #include <asm/chpid.h> |
| 15 | #include <asm/sclp.h> | 18 | #include <asm/sclp.h> |
| 16 | #include "sclp.h" | 19 | #include "sclp.h" |
| @@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid; | |||
| 43 | 46 | ||
| 44 | u64 sclp_facilities; | 47 | u64 sclp_facilities; |
| 45 | static u8 sclp_fac84; | 48 | static u8 sclp_fac84; |
| 49 | static unsigned long long rzm; | ||
| 50 | static unsigned long long rnmax; | ||
| 46 | 51 | ||
| 47 | static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) | 52 | static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) |
| 48 | { | 53 | { |
| @@ -62,7 +67,7 @@ out: | |||
| 62 | return rc; | 67 | return rc; |
| 63 | } | 68 | } |
| 64 | 69 | ||
| 65 | void __init sclp_read_info_early(void) | 70 | static void __init sclp_read_info_early(void) |
| 66 | { | 71 | { |
| 67 | int rc; | 72 | int rc; |
| 68 | int i; | 73 | int i; |
| @@ -92,34 +97,33 @@ void __init sclp_read_info_early(void) | |||
| 92 | 97 | ||
| 93 | void __init sclp_facilities_detect(void) | 98 | void __init sclp_facilities_detect(void) |
| 94 | { | 99 | { |
| 100 | struct read_info_sccb *sccb; | ||
| 101 | |||
| 102 | sclp_read_info_early(); | ||
| 95 | if (!early_read_info_sccb_valid) | 103 | if (!early_read_info_sccb_valid) |
| 96 | return; | 104 | return; |
| 97 | sclp_facilities = early_read_info_sccb.facilities; | 105 | |
| 98 | sclp_fac84 = early_read_info_sccb.fac84; | 106 | sccb = &early_read_info_sccb; |
| 107 | sclp_facilities = sccb->facilities; | ||
| 108 | sclp_fac84 = sccb->fac84; | ||
| 109 | rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; | ||
| 110 | rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; | ||
| 111 | rzm <<= 20; | ||
| 99 | } | 112 | } |
| 100 | 113 | ||
| 101 | unsigned long long __init sclp_memory_detect(void) | 114 | unsigned long long sclp_get_rnmax(void) |
| 102 | { | 115 | { |
| 103 | unsigned long long memsize; | 116 | return rnmax; |
| 104 | struct read_info_sccb *sccb; | 117 | } |
| 105 | 118 | ||
| 106 | if (!early_read_info_sccb_valid) | 119 | unsigned long long sclp_get_rzm(void) |
| 107 | return 0; | 120 | { |
| 108 | sccb = &early_read_info_sccb; | 121 | return rzm; |
| 109 | if (sccb->rnsize) | ||
| 110 | memsize = sccb->rnsize << 20; | ||
| 111 | else | ||
| 112 | memsize = sccb->rnsize2 << 20; | ||
| 113 | if (sccb->rnmax) | ||
| 114 | memsize *= sccb->rnmax; | ||
| 115 | else | ||
| 116 | memsize *= sccb->rnmax2; | ||
| 117 | return memsize; | ||
| 118 | } | 122 | } |
| 119 | 123 | ||
| 120 | /* | 124 | /* |
| 121 | * This function will be called after sclp_memory_detect(), which gets called | 125 | * This function will be called after sclp_facilities_detect(), which gets |
| 122 | * early from early.c code. Therefore the sccb should have valid contents. | 126 | * called from early.c code. Therefore the sccb should have valid contents. |
| 123 | */ | 127 | */ |
| 124 | void __init sclp_get_ipl_info(struct sclp_ipl_info *info) | 128 | void __init sclp_get_ipl_info(struct sclp_ipl_info *info) |
| 125 | { | 129 | { |
| @@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu) | |||
| 278 | return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); | 282 | return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); |
| 279 | } | 283 | } |
| 280 | 284 | ||
| 285 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
| 286 | |||
| 287 | static DEFINE_MUTEX(sclp_mem_mutex); | ||
| 288 | static LIST_HEAD(sclp_mem_list); | ||
| 289 | static u8 sclp_max_storage_id; | ||
| 290 | static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; | ||
| 291 | |||
| 292 | struct memory_increment { | ||
| 293 | struct list_head list; | ||
| 294 | u16 rn; | ||
| 295 | int standby; | ||
| 296 | int usecount; | ||
| 297 | }; | ||
| 298 | |||
| 299 | struct assign_storage_sccb { | ||
| 300 | struct sccb_header header; | ||
| 301 | u16 rn; | ||
| 302 | } __packed; | ||
| 303 | |||
| 304 | static unsigned long long rn2addr(u16 rn) | ||
| 305 | { | ||
| 306 | return (unsigned long long) (rn - 1) * rzm; | ||
| 307 | } | ||
| 308 | |||
| 309 | static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) | ||
| 310 | { | ||
| 311 | struct assign_storage_sccb *sccb; | ||
| 312 | int rc; | ||
| 313 | |||
| 314 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
| 315 | if (!sccb) | ||
| 316 | return -ENOMEM; | ||
| 317 | sccb->header.length = PAGE_SIZE; | ||
| 318 | sccb->rn = rn; | ||
| 319 | rc = do_sync_request(cmd, sccb); | ||
| 320 | if (rc) | ||
| 321 | goto out; | ||
| 322 | switch (sccb->header.response_code) { | ||
| 323 | case 0x0020: | ||
| 324 | case 0x0120: | ||
| 325 | break; | ||
| 326 | default: | ||
| 327 | rc = -EIO; | ||
| 328 | break; | ||
| 329 | } | ||
| 330 | out: | ||
| 331 | free_page((unsigned long) sccb); | ||
| 332 | return rc; | ||
| 333 | } | ||
| 334 | |||
| 335 | static int sclp_assign_storage(u16 rn) | ||
| 336 | { | ||
| 337 | return do_assign_storage(0x000d0001, rn); | ||
| 338 | } | ||
| 339 | |||
| 340 | static int sclp_unassign_storage(u16 rn) | ||
| 341 | { | ||
| 342 | return do_assign_storage(0x000c0001, rn); | ||
| 343 | } | ||
| 344 | |||
| 345 | struct attach_storage_sccb { | ||
| 346 | struct sccb_header header; | ||
| 347 | u16 :16; | ||
| 348 | u16 assigned; | ||
| 349 | u32 :32; | ||
| 350 | u32 entries[0]; | ||
| 351 | } __packed; | ||
| 352 | |||
| 353 | static int sclp_attach_storage(u8 id) | ||
| 354 | { | ||
| 355 | struct attach_storage_sccb *sccb; | ||
| 356 | int rc; | ||
| 357 | int i; | ||
| 358 | |||
| 359 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
| 360 | if (!sccb) | ||
| 361 | return -ENOMEM; | ||
| 362 | sccb->header.length = PAGE_SIZE; | ||
| 363 | rc = do_sync_request(0x00080001 | id << 8, sccb); | ||
| 364 | if (rc) | ||
| 365 | goto out; | ||
| 366 | switch (sccb->header.response_code) { | ||
| 367 | case 0x0020: | ||
| 368 | set_bit(id, sclp_storage_ids); | ||
| 369 | for (i = 0; i < sccb->assigned; i++) | ||
| 370 | sclp_unassign_storage(sccb->entries[i] >> 16); | ||
| 371 | break; | ||
| 372 | default: | ||
| 373 | rc = -EIO; | ||
| 374 | break; | ||
| 375 | } | ||
| 376 | out: | ||
| 377 | free_page((unsigned long) sccb); | ||
| 378 | return rc; | ||
| 379 | } | ||
| 380 | |||
| 381 | static int sclp_mem_change_state(unsigned long start, unsigned long size, | ||
| 382 | int online) | ||
| 383 | { | ||
| 384 | struct memory_increment *incr; | ||
| 385 | unsigned long long istart; | ||
| 386 | int rc = 0; | ||
| 387 | |||
| 388 | list_for_each_entry(incr, &sclp_mem_list, list) { | ||
| 389 | istart = rn2addr(incr->rn); | ||
| 390 | if (start + size - 1 < istart) | ||
| 391 | break; | ||
| 392 | if (start > istart + rzm - 1) | ||
| 393 | continue; | ||
| 394 | if (online) { | ||
| 395 | if (incr->usecount++) | ||
| 396 | continue; | ||
| 397 | /* | ||
| 398 | * Don't break the loop if one assign fails. Loop may | ||
| 399 | * be walked again on CANCEL and we can't save | ||
| 400 | * information if state changed before or not. | ||
| 401 | * So continue and increase usecount for all increments. | ||
| 402 | */ | ||
| 403 | rc |= sclp_assign_storage(incr->rn); | ||
| 404 | } else { | ||
| 405 | if (--incr->usecount) | ||
| 406 | continue; | ||
| 407 | sclp_unassign_storage(incr->rn); | ||
| 408 | } | ||
| 409 | } | ||
| 410 | return rc ? -EIO : 0; | ||
| 411 | } | ||
| 412 | |||
| 413 | static int sclp_mem_notifier(struct notifier_block *nb, | ||
| 414 | unsigned long action, void *data) | ||
| 415 | { | ||
| 416 | unsigned long start, size; | ||
| 417 | struct memory_notify *arg; | ||
| 418 | unsigned char id; | ||
| 419 | int rc = 0; | ||
| 420 | |||
| 421 | arg = data; | ||
| 422 | start = arg->start_pfn << PAGE_SHIFT; | ||
| 423 | size = arg->nr_pages << PAGE_SHIFT; | ||
| 424 | mutex_lock(&sclp_mem_mutex); | ||
| 425 | for (id = 0; id <= sclp_max_storage_id; id++) | ||
| 426 | if (!test_bit(id, sclp_storage_ids)) | ||
| 427 | sclp_attach_storage(id); | ||
| 428 | switch (action) { | ||
| 429 | case MEM_ONLINE: | ||
| 430 | break; | ||
| 431 | case MEM_GOING_ONLINE: | ||
| 432 | rc = sclp_mem_change_state(start, size, 1); | ||
| 433 | break; | ||
| 434 | case MEM_CANCEL_ONLINE: | ||
| 435 | sclp_mem_change_state(start, size, 0); | ||
| 436 | break; | ||
| 437 | default: | ||
| 438 | rc = -EINVAL; | ||
| 439 | break; | ||
| 440 | } | ||
| 441 | mutex_unlock(&sclp_mem_mutex); | ||
| 442 | return rc ? NOTIFY_BAD : NOTIFY_OK; | ||
| 443 | } | ||
| 444 | |||
| 445 | static struct notifier_block sclp_mem_nb = { | ||
| 446 | .notifier_call = sclp_mem_notifier, | ||
| 447 | }; | ||
| 448 | |||
| 449 | static void __init add_memory_merged(u16 rn) | ||
| 450 | { | ||
| 451 | static u16 first_rn, num; | ||
| 452 | unsigned long long start, size; | ||
| 453 | |||
| 454 | if (rn && first_rn && (first_rn + num == rn)) { | ||
| 455 | num++; | ||
| 456 | return; | ||
| 457 | } | ||
| 458 | if (!first_rn) | ||
| 459 | goto skip_add; | ||
| 460 | start = rn2addr(first_rn); | ||
| 461 | size = (unsigned long long ) num * rzm; | ||
| 462 | if (start >= VMEM_MAX_PHYS) | ||
| 463 | goto skip_add; | ||
| 464 | if (start + size > VMEM_MAX_PHYS) | ||
| 465 | size = VMEM_MAX_PHYS - start; | ||
| 466 | add_memory(0, start, size); | ||
| 467 | skip_add: | ||
| 468 | first_rn = rn; | ||
| 469 | num = 1; | ||
| 470 | } | ||
| 471 | |||
| 472 | static void __init sclp_add_standby_memory(void) | ||
| 473 | { | ||
| 474 | struct memory_increment *incr; | ||
| 475 | |||
| 476 | list_for_each_entry(incr, &sclp_mem_list, list) | ||
| 477 | if (incr->standby) | ||
| 478 | add_memory_merged(incr->rn); | ||
| 479 | add_memory_merged(0); | ||
| 480 | } | ||
| 481 | |||
| 482 | static void __init insert_increment(u16 rn, int standby, int assigned) | ||
| 483 | { | ||
| 484 | struct memory_increment *incr, *new_incr; | ||
| 485 | struct list_head *prev; | ||
| 486 | u16 last_rn; | ||
| 487 | |||
| 488 | new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); | ||
| 489 | if (!new_incr) | ||
| 490 | return; | ||
| 491 | new_incr->rn = rn; | ||
| 492 | new_incr->standby = standby; | ||
| 493 | last_rn = 0; | ||
| 494 | prev = &sclp_mem_list; | ||
| 495 | list_for_each_entry(incr, &sclp_mem_list, list) { | ||
| 496 | if (assigned && incr->rn > rn) | ||
| 497 | break; | ||
| 498 | if (!assigned && incr->rn - last_rn > 1) | ||
| 499 | break; | ||
| 500 | last_rn = incr->rn; | ||
| 501 | prev = &incr->list; | ||
| 502 | } | ||
| 503 | if (!assigned) | ||
| 504 | new_incr->rn = last_rn + 1; | ||
| 505 | if (new_incr->rn > rnmax) { | ||
| 506 | kfree(new_incr); | ||
| 507 | return; | ||
| 508 | } | ||
| 509 | list_add(&new_incr->list, prev); | ||
| 510 | } | ||
| 511 | |||
| 512 | struct read_storage_sccb { | ||
| 513 | struct sccb_header header; | ||
| 514 | u16 max_id; | ||
| 515 | u16 assigned; | ||
| 516 | u16 standby; | ||
| 517 | u16 :16; | ||
| 518 | u32 entries[0]; | ||
| 519 | } __packed; | ||
| 520 | |||
| 521 | static int __init sclp_detect_standby_memory(void) | ||
| 522 | { | ||
| 523 | struct read_storage_sccb *sccb; | ||
| 524 | int i, id, assigned, rc; | ||
| 525 | |||
| 526 | if (!early_read_info_sccb_valid) | ||
| 527 | return 0; | ||
| 528 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) | ||
| 529 | return 0; | ||
| 530 | rc = -ENOMEM; | ||
| 531 | sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); | ||
| 532 | if (!sccb) | ||
| 533 | goto out; | ||
| 534 | assigned = 0; | ||
| 535 | for (id = 0; id <= sclp_max_storage_id; id++) { | ||
| 536 | memset(sccb, 0, PAGE_SIZE); | ||
| 537 | sccb->header.length = PAGE_SIZE; | ||
| 538 | rc = do_sync_request(0x00040001 | id << 8, sccb); | ||
| 539 | if (rc) | ||
| 540 | goto out; | ||
| 541 | switch (sccb->header.response_code) { | ||
| 542 | case 0x0010: | ||
| 543 | set_bit(id, sclp_storage_ids); | ||
| 544 | for (i = 0; i < sccb->assigned; i++) { | ||
| 545 | if (!sccb->entries[i]) | ||
| 546 | continue; | ||
| 547 | assigned++; | ||
| 548 | insert_increment(sccb->entries[i] >> 16, 0, 1); | ||
| 549 | } | ||
| 550 | break; | ||
| 551 | case 0x0310: | ||
| 552 | break; | ||
| 553 | case 0x0410: | ||
| 554 | for (i = 0; i < sccb->assigned; i++) { | ||
| 555 | if (!sccb->entries[i]) | ||
| 556 | continue; | ||
| 557 | assigned++; | ||
| 558 | insert_increment(sccb->entries[i] >> 16, 1, 1); | ||
| 559 | } | ||
| 560 | break; | ||
| 561 | default: | ||
| 562 | rc = -EIO; | ||
| 563 | break; | ||
| 564 | } | ||
| 565 | if (!rc) | ||
| 566 | sclp_max_storage_id = sccb->max_id; | ||
| 567 | } | ||
| 568 | if (rc || list_empty(&sclp_mem_list)) | ||
| 569 | goto out; | ||
| 570 | for (i = 1; i <= rnmax - assigned; i++) | ||
| 571 | insert_increment(0, 1, 0); | ||
| 572 | rc = register_memory_notifier(&sclp_mem_nb); | ||
| 573 | if (rc) | ||
| 574 | goto out; | ||
| 575 | sclp_add_standby_memory(); | ||
| 576 | out: | ||
| 577 | free_page((unsigned long) sccb); | ||
| 578 | return rc; | ||
| 579 | } | ||
| 580 | __initcall(sclp_detect_standby_memory); | ||
| 581 | |||
| 582 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
| 583 | |||
| 281 | /* | 584 | /* |
| 282 | * Channel path configuration related functions. | 585 | * Channel path configuration related functions. |
| 283 | */ | 586 | */ |
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index ead1043d788e..7e619c534bf4 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c | |||
| @@ -14,14 +14,13 @@ | |||
| 14 | #include <linux/timer.h> | 14 | #include <linux/timer.h> |
| 15 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
| 16 | #include <linux/bootmem.h> | 16 | #include <linux/bootmem.h> |
| 17 | #include <linux/termios.h> | ||
| 17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 18 | 19 | ||
| 19 | #include "sclp.h" | 20 | #include "sclp.h" |
| 20 | #include "sclp_rw.h" | 21 | #include "sclp_rw.h" |
| 21 | #include "sclp_tty.h" | 22 | #include "sclp_tty.h" |
| 22 | 23 | ||
| 23 | #define SCLP_CON_PRINT_HEADER "sclp console driver: " | ||
| 24 | |||
| 25 | #define sclp_console_major 4 /* TTYAUX_MAJOR */ | 24 | #define sclp_console_major 4 /* TTYAUX_MAJOR */ |
| 26 | #define sclp_console_minor 64 | 25 | #define sclp_console_minor 64 |
| 27 | #define sclp_console_name "ttyS" | 26 | #define sclp_console_name "ttyS" |
| @@ -222,8 +221,6 @@ sclp_console_init(void) | |||
| 222 | INIT_LIST_HEAD(&sclp_con_pages); | 221 | INIT_LIST_HEAD(&sclp_con_pages); |
| 223 | for (i = 0; i < MAX_CONSOLE_PAGES; i++) { | 222 | for (i = 0; i < MAX_CONSOLE_PAGES; i++) { |
| 224 | page = alloc_bootmem_low_pages(PAGE_SIZE); | 223 | page = alloc_bootmem_low_pages(PAGE_SIZE); |
| 225 | if (page == NULL) | ||
| 226 | return -ENOMEM; | ||
| 227 | list_add_tail((struct list_head *) page, &sclp_con_pages); | 224 | list_add_tail((struct list_head *) page, &sclp_con_pages); |
| 228 | } | 225 | } |
| 229 | INIT_LIST_HEAD(&sclp_con_outqueue); | 226 | INIT_LIST_HEAD(&sclp_con_outqueue); |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index ad05a87bc480..fff4ff485d9b 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
| 9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
| 10 | #include <linux/cpu.h> | 10 | #include <linux/cpu.h> |
| 11 | #include <linux/kthread.h> | ||
| 11 | #include <linux/sysdev.h> | 12 | #include <linux/sysdev.h> |
| 12 | #include <linux/workqueue.h> | 13 | #include <linux/workqueue.h> |
| 13 | #include <asm/smp.h> | 14 | #include <asm/smp.h> |
| @@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
| 40 | put_online_cpus(); | 41 | put_online_cpus(); |
| 41 | } | 42 | } |
| 42 | 43 | ||
| 43 | static void __ref sclp_cpu_change_notify(struct work_struct *work) | 44 | static int sclp_cpu_kthread(void *data) |
| 44 | { | 45 | { |
| 45 | smp_rescan_cpus(); | 46 | smp_rescan_cpus(); |
| 47 | return 0; | ||
| 48 | } | ||
| 49 | |||
| 50 | static void __ref sclp_cpu_change_notify(struct work_struct *work) | ||
| 51 | { | ||
| 52 | /* Can't call smp_rescan_cpus() from workqueue context since it may | ||
| 53 | * deadlock in case of cpu hotplug. So we have to create a kernel | ||
| 54 | * thread in order to call it. | ||
| 55 | */ | ||
| 56 | kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan"); | ||
| 46 | } | 57 | } |
| 47 | 58 | ||
| 48 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) | 59 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) |
| @@ -74,10 +85,8 @@ static int __init sclp_conf_init(void) | |||
| 74 | INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); | 85 | INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); |
| 75 | 86 | ||
| 76 | rc = sclp_register(&sclp_conf_register); | 87 | rc = sclp_register(&sclp_conf_register); |
| 77 | if (rc) { | 88 | if (rc) |
| 78 | printk(KERN_ERR TAG "failed to register (%d).\n", rc); | ||
| 79 | return rc; | 89 | return rc; |
| 80 | } | ||
| 81 | 90 | ||
| 82 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { | 91 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { |
| 83 | printk(KERN_WARNING TAG "no configuration management.\n"); | 92 | printk(KERN_WARNING TAG "no configuration management.\n"); |
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index 9f37456222e9..d887bd261d28 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | #define CPI_LENGTH_NAME 8 | 27 | #define CPI_LENGTH_NAME 8 |
| 28 | #define CPI_LENGTH_LEVEL 16 | 28 | #define CPI_LENGTH_LEVEL 16 |
| 29 | 29 | ||
| 30 | static DEFINE_MUTEX(sclp_cpi_mutex); | ||
| 31 | |||
| 30 | struct cpi_evbuf { | 32 | struct cpi_evbuf { |
| 31 | struct evbuf_header header; | 33 | struct evbuf_header header; |
| 32 | u8 id_format; | 34 | u8 id_format; |
| @@ -124,21 +126,15 @@ static int cpi_req(void) | |||
| 124 | int response; | 126 | int response; |
| 125 | 127 | ||
| 126 | rc = sclp_register(&sclp_cpi_event); | 128 | rc = sclp_register(&sclp_cpi_event); |
| 127 | if (rc) { | 129 | if (rc) |
| 128 | printk(KERN_WARNING "cpi: could not register " | ||
| 129 | "to hardware console.\n"); | ||
| 130 | goto out; | 130 | goto out; |
| 131 | } | ||
| 132 | if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { | 131 | if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) { |
| 133 | printk(KERN_WARNING "cpi: no control program " | ||
| 134 | "identification support\n"); | ||
| 135 | rc = -EOPNOTSUPP; | 132 | rc = -EOPNOTSUPP; |
| 136 | goto out_unregister; | 133 | goto out_unregister; |
| 137 | } | 134 | } |
| 138 | 135 | ||
| 139 | req = cpi_prepare_req(); | 136 | req = cpi_prepare_req(); |
| 140 | if (IS_ERR(req)) { | 137 | if (IS_ERR(req)) { |
| 141 | printk(KERN_WARNING "cpi: could not allocate request\n"); | ||
| 142 | rc = PTR_ERR(req); | 138 | rc = PTR_ERR(req); |
| 143 | goto out_unregister; | 139 | goto out_unregister; |
| 144 | } | 140 | } |
| @@ -148,10 +144,8 @@ static int cpi_req(void) | |||
| 148 | 144 | ||
| 149 | /* Add request to sclp queue */ | 145 | /* Add request to sclp queue */ |
| 150 | rc = sclp_add_request(req); | 146 | rc = sclp_add_request(req); |
| 151 | if (rc) { | 147 | if (rc) |
| 152 | printk(KERN_WARNING "cpi: could not start request\n"); | ||
| 153 | goto out_free_req; | 148 | goto out_free_req; |
| 154 | } | ||
| 155 | 149 | ||
| 156 | wait_for_completion(&completion); | 150 | wait_for_completion(&completion); |
| 157 | 151 | ||
| @@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value) | |||
| 223 | static ssize_t system_name_show(struct kobject *kobj, | 217 | static ssize_t system_name_show(struct kobject *kobj, |
| 224 | struct kobj_attribute *attr, char *page) | 218 | struct kobj_attribute *attr, char *page) |
| 225 | { | 219 | { |
| 226 | return snprintf(page, PAGE_SIZE, "%s\n", system_name); | 220 | int rc; |
| 221 | |||
| 222 | mutex_lock(&sclp_cpi_mutex); | ||
| 223 | rc = snprintf(page, PAGE_SIZE, "%s\n", system_name); | ||
| 224 | mutex_unlock(&sclp_cpi_mutex); | ||
| 225 | return rc; | ||
| 227 | } | 226 | } |
| 228 | 227 | ||
| 229 | static ssize_t system_name_store(struct kobject *kobj, | 228 | static ssize_t system_name_store(struct kobject *kobj, |
| @@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj, | |||
| 237 | if (rc) | 236 | if (rc) |
| 238 | return rc; | 237 | return rc; |
| 239 | 238 | ||
| 239 | mutex_lock(&sclp_cpi_mutex); | ||
| 240 | set_string(system_name, buf); | 240 | set_string(system_name, buf); |
| 241 | mutex_unlock(&sclp_cpi_mutex); | ||
| 241 | 242 | ||
| 242 | return len; | 243 | return len; |
| 243 | } | 244 | } |
| @@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr = | |||
| 248 | static ssize_t sysplex_name_show(struct kobject *kobj, | 249 | static ssize_t sysplex_name_show(struct kobject *kobj, |
| 249 | struct kobj_attribute *attr, char *page) | 250 | struct kobj_attribute *attr, char *page) |
| 250 | { | 251 | { |
| 251 | return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); | 252 | int rc; |
| 253 | |||
| 254 | mutex_lock(&sclp_cpi_mutex); | ||
| 255 | rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); | ||
| 256 | mutex_unlock(&sclp_cpi_mutex); | ||
| 257 | return rc; | ||
| 252 | } | 258 | } |
| 253 | 259 | ||
| 254 | static ssize_t sysplex_name_store(struct kobject *kobj, | 260 | static ssize_t sysplex_name_store(struct kobject *kobj, |
| @@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj, | |||
| 262 | if (rc) | 268 | if (rc) |
| 263 | return rc; | 269 | return rc; |
| 264 | 270 | ||
| 271 | mutex_lock(&sclp_cpi_mutex); | ||
| 265 | set_string(sysplex_name, buf); | 272 | set_string(sysplex_name, buf); |
| 273 | mutex_unlock(&sclp_cpi_mutex); | ||
| 266 | 274 | ||
| 267 | return len; | 275 | return len; |
| 268 | } | 276 | } |
| @@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr = | |||
| 273 | static ssize_t system_type_show(struct kobject *kobj, | 281 | static ssize_t system_type_show(struct kobject *kobj, |
| 274 | struct kobj_attribute *attr, char *page) | 282 | struct kobj_attribute *attr, char *page) |
| 275 | { | 283 | { |
| 276 | return snprintf(page, PAGE_SIZE, "%s\n", system_type); | 284 | int rc; |
| 285 | |||
| 286 | mutex_lock(&sclp_cpi_mutex); | ||
| 287 | rc = snprintf(page, PAGE_SIZE, "%s\n", system_type); | ||
| 288 | mutex_unlock(&sclp_cpi_mutex); | ||
| 289 | return rc; | ||
| 277 | } | 290 | } |
| 278 | 291 | ||
| 279 | static ssize_t system_type_store(struct kobject *kobj, | 292 | static ssize_t system_type_store(struct kobject *kobj, |
| @@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj, | |||
| 287 | if (rc) | 300 | if (rc) |
| 288 | return rc; | 301 | return rc; |
| 289 | 302 | ||
| 303 | mutex_lock(&sclp_cpi_mutex); | ||
| 290 | set_string(system_type, buf); | 304 | set_string(system_type, buf); |
| 305 | mutex_unlock(&sclp_cpi_mutex); | ||
| 291 | 306 | ||
| 292 | return len; | 307 | return len; |
| 293 | } | 308 | } |
| @@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr = | |||
| 298 | static ssize_t system_level_show(struct kobject *kobj, | 313 | static ssize_t system_level_show(struct kobject *kobj, |
| 299 | struct kobj_attribute *attr, char *page) | 314 | struct kobj_attribute *attr, char *page) |
| 300 | { | 315 | { |
| 301 | unsigned long long level = system_level; | 316 | unsigned long long level; |
| 302 | 317 | ||
| 318 | mutex_lock(&sclp_cpi_mutex); | ||
| 319 | level = system_level; | ||
| 320 | mutex_unlock(&sclp_cpi_mutex); | ||
| 303 | return snprintf(page, PAGE_SIZE, "%#018llx\n", level); | 321 | return snprintf(page, PAGE_SIZE, "%#018llx\n", level); |
| 304 | } | 322 | } |
| 305 | 323 | ||
| @@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj, | |||
| 320 | if (*endp) | 338 | if (*endp) |
| 321 | return -EINVAL; | 339 | return -EINVAL; |
| 322 | 340 | ||
| 341 | mutex_lock(&sclp_cpi_mutex); | ||
| 323 | system_level = level; | 342 | system_level = level; |
| 324 | 343 | mutex_unlock(&sclp_cpi_mutex); | |
| 325 | return len; | 344 | return len; |
| 326 | } | 345 | } |
| 327 | 346 | ||
| @@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj, | |||
| 334 | { | 353 | { |
| 335 | int rc; | 354 | int rc; |
| 336 | 355 | ||
| 356 | mutex_lock(&sclp_cpi_mutex); | ||
| 337 | rc = cpi_req(); | 357 | rc = cpi_req(); |
| 358 | mutex_unlock(&sclp_cpi_mutex); | ||
| 338 | if (rc) | 359 | if (rc) |
| 339 | return rc; | 360 | return rc; |
| 340 | 361 | ||
| @@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type, | |||
| 373 | if (rc) | 394 | if (rc) |
| 374 | return rc; | 395 | return rc; |
| 375 | 396 | ||
| 397 | mutex_lock(&sclp_cpi_mutex); | ||
| 376 | set_string(system_name, system); | 398 | set_string(system_name, system); |
| 377 | set_string(sysplex_name, sysplex); | 399 | set_string(sysplex_name, sysplex); |
| 378 | set_string(system_type, type); | 400 | set_string(system_type, type); |
| 379 | system_level = level; | 401 | system_level = level; |
| 380 | 402 | ||
| 381 | return cpi_req(); | 403 | rc = cpi_req(); |
| 404 | mutex_unlock(&sclp_cpi_mutex); | ||
| 405 | |||
| 406 | return rc; | ||
| 382 | } | 407 | } |
| 383 | EXPORT_SYMBOL(sclp_cpi_set_data); | 408 | EXPORT_SYMBOL(sclp_cpi_set_data); |
| 384 | 409 | ||
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 45ff25e787cb..84c191c1cd62 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c | |||
| @@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = { | |||
| 51 | static int __init | 51 | static int __init |
| 52 | sclp_quiesce_init(void) | 52 | sclp_quiesce_init(void) |
| 53 | { | 53 | { |
| 54 | int rc; | 54 | return sclp_register(&sclp_quiesce_event); |
| 55 | |||
| 56 | rc = sclp_register(&sclp_quiesce_event); | ||
| 57 | if (rc) | ||
| 58 | printk(KERN_WARNING "sclp: could not register quiesce handler " | ||
| 59 | "(rc=%d)\n", rc); | ||
| 60 | return rc; | ||
| 61 | } | 55 | } |
| 62 | 56 | ||
| 63 | module_init(sclp_quiesce_init); | 57 | module_init(sclp_quiesce_init); |
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index da09781b32f7..710af42603f8 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c | |||
| @@ -19,8 +19,6 @@ | |||
| 19 | #include "sclp.h" | 19 | #include "sclp.h" |
| 20 | #include "sclp_rw.h" | 20 | #include "sclp_rw.h" |
| 21 | 21 | ||
| 22 | #define SCLP_RW_PRINT_HEADER "sclp low level driver: " | ||
| 23 | |||
| 24 | /* | 22 | /* |
| 25 | * The room for the SCCB (only for writing) is not equal to a pages size | 23 | * The room for the SCCB (only for writing) is not equal to a pages size |
| 26 | * (as it is specified as the maximum size in the SCLP documentation) | 24 | * (as it is specified as the maximum size in the SCLP documentation) |
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 1c064976b32b..8b854857ba07 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c | |||
| @@ -239,10 +239,8 @@ int __init sclp_sdias_init(void) | |||
| 239 | debug_register_view(sdias_dbf, &debug_sprintf_view); | 239 | debug_register_view(sdias_dbf, &debug_sprintf_view); |
| 240 | debug_set_level(sdias_dbf, 6); | 240 | debug_set_level(sdias_dbf, 6); |
| 241 | rc = sclp_register(&sclp_sdias_register); | 241 | rc = sclp_register(&sclp_sdias_register); |
| 242 | if (rc) { | 242 | if (rc) |
| 243 | ERROR_MSG("sclp register failed\n"); | ||
| 244 | return rc; | 243 | return rc; |
| 245 | } | ||
| 246 | init_waitqueue_head(&sdias_wq); | 244 | init_waitqueue_head(&sdias_wq); |
| 247 | TRACE("init done\n"); | 245 | TRACE("init done\n"); |
| 248 | return 0; | 246 | return 0; |
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 40b11521cd20..434ba04b1309 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <linux/tty.h> | 13 | #include <linux/tty.h> |
| 14 | #include <linux/tty_driver.h> | 14 | #include <linux/tty_driver.h> |
| 15 | #include <linux/tty_flip.h> | 15 | #include <linux/tty_flip.h> |
| 16 | #include <linux/wait.h> | ||
| 17 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| 18 | #include <linux/err.h> | 17 | #include <linux/err.h> |
| 19 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| @@ -25,8 +24,6 @@ | |||
| 25 | #include "sclp_rw.h" | 24 | #include "sclp_rw.h" |
| 26 | #include "sclp_tty.h" | 25 | #include "sclp_tty.h" |
| 27 | 26 | ||
| 28 | #define SCLP_TTY_PRINT_HEADER "sclp tty driver: " | ||
| 29 | |||
| 30 | /* | 27 | /* |
| 31 | * size of a buffer that collects single characters coming in | 28 | * size of a buffer that collects single characters coming in |
| 32 | * via sclp_tty_put_char() | 29 | * via sclp_tty_put_char() |
| @@ -50,8 +47,6 @@ static int sclp_tty_buffer_count; | |||
| 50 | static struct sclp_buffer *sclp_ttybuf; | 47 | static struct sclp_buffer *sclp_ttybuf; |
| 51 | /* Timer for delayed output of console messages. */ | 48 | /* Timer for delayed output of console messages. */ |
| 52 | static struct timer_list sclp_tty_timer; | 49 | static struct timer_list sclp_tty_timer; |
| 53 | /* Waitqueue to wait for buffers to get empty. */ | ||
| 54 | static wait_queue_head_t sclp_tty_waitq; | ||
| 55 | 50 | ||
| 56 | static struct tty_struct *sclp_tty; | 51 | static struct tty_struct *sclp_tty; |
| 57 | static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; | 52 | static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; |
| @@ -59,19 +54,11 @@ static unsigned short int sclp_tty_chars_count; | |||
| 59 | 54 | ||
| 60 | struct tty_driver *sclp_tty_driver; | 55 | struct tty_driver *sclp_tty_driver; |
| 61 | 56 | ||
| 62 | static struct sclp_ioctls sclp_ioctls; | 57 | static int sclp_tty_tolower; |
| 63 | static struct sclp_ioctls sclp_ioctls_init = | 58 | static int sclp_tty_columns = 80; |
| 64 | { | 59 | |
| 65 | 8, /* 1 hor. tab. = 8 spaces */ | 60 | #define SPACES_PER_TAB 8 |
| 66 | 0, /* no echo of input by this driver */ | 61 | #define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */ |
| 67 | 80, /* 80 characters/line */ | ||
| 68 | 1, /* write after 1/10 s without final new line */ | ||
| 69 | MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */ | ||
| 70 | MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */ | ||
| 71 | 0, /* do not convert to lower case */ | ||
| 72 | 0x6c /* to seprate upper and lower case */ | ||
| 73 | /* ('%' in EBCDIC) */ | ||
| 74 | }; | ||
| 75 | 62 | ||
| 76 | /* This routine is called whenever we try to open a SCLP terminal. */ | 63 | /* This routine is called whenever we try to open a SCLP terminal. */ |
| 77 | static int | 64 | static int |
| @@ -92,136 +79,6 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp) | |||
| 92 | sclp_tty = NULL; | 79 | sclp_tty = NULL; |
| 93 | } | 80 | } |
| 94 | 81 | ||
| 95 | /* execute commands to control the i/o behaviour of the SCLP tty at runtime */ | ||
| 96 | static int | ||
| 97 | sclp_tty_ioctl(struct tty_struct *tty, struct file * file, | ||
| 98 | unsigned int cmd, unsigned long arg) | ||
| 99 | { | ||
| 100 | unsigned long flags; | ||
| 101 | unsigned int obuf; | ||
| 102 | int check; | ||
| 103 | int rc; | ||
| 104 | |||
| 105 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
| 106 | return -EIO; | ||
| 107 | rc = 0; | ||
| 108 | check = 0; | ||
| 109 | switch (cmd) { | ||
| 110 | case TIOCSCLPSHTAB: | ||
| 111 | /* set width of horizontal tab */ | ||
| 112 | if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg)) | ||
| 113 | rc = -EFAULT; | ||
| 114 | else | ||
| 115 | check = 1; | ||
| 116 | break; | ||
| 117 | case TIOCSCLPGHTAB: | ||
| 118 | /* get width of horizontal tab */ | ||
| 119 | if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg)) | ||
| 120 | rc = -EFAULT; | ||
| 121 | break; | ||
| 122 | case TIOCSCLPSECHO: | ||
| 123 | /* enable/disable echo of input */ | ||
| 124 | if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg)) | ||
| 125 | rc = -EFAULT; | ||
| 126 | break; | ||
| 127 | case TIOCSCLPGECHO: | ||
| 128 | /* Is echo of input enabled ? */ | ||
| 129 | if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg)) | ||
| 130 | rc = -EFAULT; | ||
| 131 | break; | ||
| 132 | case TIOCSCLPSCOLS: | ||
| 133 | /* set number of columns for output */ | ||
| 134 | if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg)) | ||
| 135 | rc = -EFAULT; | ||
| 136 | else | ||
| 137 | check = 1; | ||
| 138 | break; | ||
| 139 | case TIOCSCLPGCOLS: | ||
| 140 | /* get number of columns for output */ | ||
| 141 | if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg)) | ||
| 142 | rc = -EFAULT; | ||
| 143 | break; | ||
| 144 | case TIOCSCLPSNL: | ||
| 145 | /* enable/disable writing without final new line character */ | ||
| 146 | if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg)) | ||
| 147 | rc = -EFAULT; | ||
| 148 | break; | ||
| 149 | case TIOCSCLPGNL: | ||
| 150 | /* Is writing without final new line character enabled ? */ | ||
| 151 | if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg)) | ||
| 152 | rc = -EFAULT; | ||
| 153 | break; | ||
| 154 | case TIOCSCLPSOBUF: | ||
| 155 | /* | ||
| 156 | * set the maximum buffers size for output, will be rounded | ||
| 157 | * up to next 4kB boundary and stored as number of SCCBs | ||
| 158 | * (4kB Buffers) limitation: 256 x 4kB | ||
| 159 | */ | ||
| 160 | if (get_user(obuf, (unsigned int __user *) arg) == 0) { | ||
| 161 | if (obuf & 0xFFF) | ||
| 162 | sclp_ioctls.max_sccb = (obuf >> 12) + 1; | ||
| 163 | else | ||
| 164 | sclp_ioctls.max_sccb = (obuf >> 12); | ||
| 165 | } else | ||
| 166 | rc = -EFAULT; | ||
| 167 | break; | ||
| 168 | case TIOCSCLPGOBUF: | ||
| 169 | /* get the maximum buffers size for output */ | ||
| 170 | obuf = sclp_ioctls.max_sccb << 12; | ||
| 171 | if (put_user(obuf, (unsigned int __user *) arg)) | ||
| 172 | rc = -EFAULT; | ||
| 173 | break; | ||
| 174 | case TIOCSCLPGKBUF: | ||
| 175 | /* get the number of buffers got from kernel at startup */ | ||
| 176 | if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg)) | ||
| 177 | rc = -EFAULT; | ||
| 178 | break; | ||
| 179 | case TIOCSCLPSCASE: | ||
| 180 | /* enable/disable conversion from upper to lower case */ | ||
| 181 | if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg)) | ||
| 182 | rc = -EFAULT; | ||
| 183 | break; | ||
| 184 | case TIOCSCLPGCASE: | ||
| 185 | /* Is conversion from upper to lower case of input enabled? */ | ||
| 186 | if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg)) | ||
| 187 | rc = -EFAULT; | ||
| 188 | break; | ||
| 189 | case TIOCSCLPSDELIM: | ||
| 190 | /* | ||
| 191 | * set special character used for separating upper and | ||
| 192 | * lower case, 0x00 disables this feature | ||
| 193 | */ | ||
| 194 | if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg)) | ||
| 195 | rc = -EFAULT; | ||
| 196 | break; | ||
| 197 | case TIOCSCLPGDELIM: | ||
| 198 | /* | ||
| 199 | * get special character used for separating upper and | ||
| 200 | * lower case, 0x00 disables this feature | ||
| 201 | */ | ||
| 202 | if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg)) | ||
| 203 | rc = -EFAULT; | ||
| 204 | break; | ||
| 205 | case TIOCSCLPSINIT: | ||
| 206 | /* set initial (default) sclp ioctls */ | ||
| 207 | sclp_ioctls = sclp_ioctls_init; | ||
| 208 | check = 1; | ||
| 209 | break; | ||
| 210 | default: | ||
| 211 | rc = -ENOIOCTLCMD; | ||
| 212 | break; | ||
| 213 | } | ||
| 214 | if (check) { | ||
| 215 | spin_lock_irqsave(&sclp_tty_lock, flags); | ||
| 216 | if (sclp_ttybuf != NULL) { | ||
| 217 | sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab); | ||
| 218 | sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns); | ||
| 219 | } | ||
| 220 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | ||
| 221 | } | ||
| 222 | return rc; | ||
| 223 | } | ||
| 224 | |||
| 225 | /* | 82 | /* |
| 226 | * This routine returns the numbers of characters the tty driver | 83 | * This routine returns the numbers of characters the tty driver |
| 227 | * will accept for queuing to be written. This number is subject | 84 | * will accept for queuing to be written. This number is subject |
| @@ -268,7 +125,6 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc) | |||
| 268 | struct sclp_buffer, list); | 125 | struct sclp_buffer, list); |
| 269 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | 126 | spin_unlock_irqrestore(&sclp_tty_lock, flags); |
| 270 | } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); | 127 | } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); |
| 271 | wake_up(&sclp_tty_waitq); | ||
| 272 | /* check if the tty needs a wake up call */ | 128 | /* check if the tty needs a wake up call */ |
| 273 | if (sclp_tty != NULL) { | 129 | if (sclp_tty != NULL) { |
| 274 | tty_wakeup(sclp_tty); | 130 | tty_wakeup(sclp_tty); |
| @@ -316,37 +172,37 @@ sclp_tty_timeout(unsigned long data) | |||
| 316 | /* | 172 | /* |
| 317 | * Write a string to the sclp tty. | 173 | * Write a string to the sclp tty. |
| 318 | */ | 174 | */ |
| 319 | static void | 175 | static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail) |
| 320 | sclp_tty_write_string(const unsigned char *str, int count) | ||
| 321 | { | 176 | { |
| 322 | unsigned long flags; | 177 | unsigned long flags; |
| 323 | void *page; | 178 | void *page; |
| 324 | int written; | 179 | int written; |
| 180 | int overall_written; | ||
| 325 | struct sclp_buffer *buf; | 181 | struct sclp_buffer *buf; |
| 326 | 182 | ||
| 327 | if (count <= 0) | 183 | if (count <= 0) |
| 328 | return; | 184 | return 0; |
| 185 | overall_written = 0; | ||
| 329 | spin_lock_irqsave(&sclp_tty_lock, flags); | 186 | spin_lock_irqsave(&sclp_tty_lock, flags); |
| 330 | do { | 187 | do { |
| 331 | /* Create a sclp output buffer if none exists yet */ | 188 | /* Create a sclp output buffer if none exists yet */ |
| 332 | if (sclp_ttybuf == NULL) { | 189 | if (sclp_ttybuf == NULL) { |
| 333 | while (list_empty(&sclp_tty_pages)) { | 190 | while (list_empty(&sclp_tty_pages)) { |
| 334 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | 191 | spin_unlock_irqrestore(&sclp_tty_lock, flags); |
| 335 | if (in_interrupt()) | 192 | if (may_fail) |
| 336 | sclp_sync_wait(); | 193 | goto out; |
| 337 | else | 194 | else |
| 338 | wait_event(sclp_tty_waitq, | 195 | sclp_sync_wait(); |
| 339 | !list_empty(&sclp_tty_pages)); | ||
| 340 | spin_lock_irqsave(&sclp_tty_lock, flags); | 196 | spin_lock_irqsave(&sclp_tty_lock, flags); |
| 341 | } | 197 | } |
| 342 | page = sclp_tty_pages.next; | 198 | page = sclp_tty_pages.next; |
| 343 | list_del((struct list_head *) page); | 199 | list_del((struct list_head *) page); |
| 344 | sclp_ttybuf = sclp_make_buffer(page, | 200 | sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns, |
| 345 | sclp_ioctls.columns, | 201 | SPACES_PER_TAB); |
| 346 | sclp_ioctls.htab); | ||
| 347 | } | 202 | } |
| 348 | /* try to write the string to the current output buffer */ | 203 | /* try to write the string to the current output buffer */ |
| 349 | written = sclp_write(sclp_ttybuf, str, count); | 204 | written = sclp_write(sclp_ttybuf, str, count); |
| 205 | overall_written += written; | ||
| 350 | if (written == count) | 206 | if (written == count) |
| 351 | break; | 207 | break; |
| 352 | /* | 208 | /* |
| @@ -363,27 +219,17 @@ sclp_tty_write_string(const unsigned char *str, int count) | |||
| 363 | count -= written; | 219 | count -= written; |
| 364 | } while (count > 0); | 220 | } while (count > 0); |
| 365 | /* Setup timer to output current console buffer after 1/10 second */ | 221 | /* Setup timer to output current console buffer after 1/10 second */ |
| 366 | if (sclp_ioctls.final_nl) { | 222 | if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) && |
| 367 | if (sclp_ttybuf != NULL && | 223 | !timer_pending(&sclp_tty_timer)) { |
| 368 | sclp_chars_in_buffer(sclp_ttybuf) != 0 && | 224 | init_timer(&sclp_tty_timer); |
| 369 | !timer_pending(&sclp_tty_timer)) { | 225 | sclp_tty_timer.function = sclp_tty_timeout; |
| 370 | init_timer(&sclp_tty_timer); | 226 | sclp_tty_timer.data = 0UL; |
| 371 | sclp_tty_timer.function = sclp_tty_timeout; | 227 | sclp_tty_timer.expires = jiffies + HZ/10; |
| 372 | sclp_tty_timer.data = 0UL; | 228 | add_timer(&sclp_tty_timer); |
| 373 | sclp_tty_timer.expires = jiffies + HZ/10; | ||
| 374 | add_timer(&sclp_tty_timer); | ||
| 375 | } | ||
| 376 | } else { | ||
| 377 | if (sclp_ttybuf != NULL && | ||
| 378 | sclp_chars_in_buffer(sclp_ttybuf) != 0) { | ||
| 379 | buf = sclp_ttybuf; | ||
| 380 | sclp_ttybuf = NULL; | ||
| 381 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | ||
| 382 | __sclp_ttybuf_emit(buf); | ||
| 383 | spin_lock_irqsave(&sclp_tty_lock, flags); | ||
| 384 | } | ||
| 385 | } | 229 | } |
| 386 | spin_unlock_irqrestore(&sclp_tty_lock, flags); | 230 | spin_unlock_irqrestore(&sclp_tty_lock, flags); |
| 231 | out: | ||
| 232 | return overall_written; | ||
| 387 | } | 233 | } |
| 388 | 234 | ||
| 389 | /* | 235 | /* |
| @@ -395,11 +241,10 @@ static int | |||
| 395 | sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) | 241 | sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) |
| 396 | { | 242 | { |
| 397 | if (sclp_tty_chars_count > 0) { | 243 | if (sclp_tty_chars_count > 0) { |
| 398 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 244 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); |
| 399 | sclp_tty_chars_count = 0; | 245 | sclp_tty_chars_count = 0; |
| 400 | } | 246 | } |
| 401 | sclp_tty_write_string(buf, count); | 247 | return sclp_tty_write_string(buf, count, 1); |
| 402 | return count; | ||
| 403 | } | 248 | } |
| 404 | 249 | ||
| 405 | /* | 250 | /* |
| @@ -417,9 +262,10 @@ sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 417 | { | 262 | { |
| 418 | sclp_tty_chars[sclp_tty_chars_count++] = ch; | 263 | sclp_tty_chars[sclp_tty_chars_count++] = ch; |
| 419 | if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { | 264 | if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { |
| 420 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 265 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); |
| 421 | sclp_tty_chars_count = 0; | 266 | sclp_tty_chars_count = 0; |
| 422 | } return 1; | 267 | } |
| 268 | return 1; | ||
| 423 | } | 269 | } |
| 424 | 270 | ||
| 425 | /* | 271 | /* |
| @@ -430,7 +276,7 @@ static void | |||
| 430 | sclp_tty_flush_chars(struct tty_struct *tty) | 276 | sclp_tty_flush_chars(struct tty_struct *tty) |
| 431 | { | 277 | { |
| 432 | if (sclp_tty_chars_count > 0) { | 278 | if (sclp_tty_chars_count > 0) { |
| 433 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 279 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); |
| 434 | sclp_tty_chars_count = 0; | 280 | sclp_tty_chars_count = 0; |
| 435 | } | 281 | } |
| 436 | } | 282 | } |
| @@ -469,7 +315,7 @@ static void | |||
| 469 | sclp_tty_flush_buffer(struct tty_struct *tty) | 315 | sclp_tty_flush_buffer(struct tty_struct *tty) |
| 470 | { | 316 | { |
| 471 | if (sclp_tty_chars_count > 0) { | 317 | if (sclp_tty_chars_count > 0) { |
| 472 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 318 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0); |
| 473 | sclp_tty_chars_count = 0; | 319 | sclp_tty_chars_count = 0; |
| 474 | } | 320 | } |
| 475 | } | 321 | } |
| @@ -517,9 +363,7 @@ sclp_tty_input(unsigned char* buf, unsigned int count) | |||
| 517 | * modifiy original string, | 363 | * modifiy original string, |
| 518 | * returns length of resulting string | 364 | * returns length of resulting string |
| 519 | */ | 365 | */ |
| 520 | static int | 366 | static int sclp_switch_cases(unsigned char *buf, int count) |
| 521 | sclp_switch_cases(unsigned char *buf, int count, | ||
| 522 | unsigned char delim, int tolower) | ||
| 523 | { | 367 | { |
| 524 | unsigned char *ip, *op; | 368 | unsigned char *ip, *op; |
| 525 | int toggle; | 369 | int toggle; |
| @@ -529,9 +373,9 @@ sclp_switch_cases(unsigned char *buf, int count, | |||
| 529 | ip = op = buf; | 373 | ip = op = buf; |
| 530 | while (count-- > 0) { | 374 | while (count-- > 0) { |
| 531 | /* compare with special character */ | 375 | /* compare with special character */ |
| 532 | if (*ip == delim) { | 376 | if (*ip == CASE_DELIMITER) { |
| 533 | /* followed by another special character? */ | 377 | /* followed by another special character? */ |
| 534 | if (count && ip[1] == delim) { | 378 | if (count && ip[1] == CASE_DELIMITER) { |
| 535 | /* | 379 | /* |
| 536 | * ... then put a single copy of the special | 380 | * ... then put a single copy of the special |
| 537 | * character to the output string | 381 | * character to the output string |
| @@ -550,7 +394,7 @@ sclp_switch_cases(unsigned char *buf, int count, | |||
| 550 | /* not the special character */ | 394 | /* not the special character */ |
| 551 | if (toggle) | 395 | if (toggle) |
| 552 | /* but case switching is on */ | 396 | /* but case switching is on */ |
| 553 | if (tolower) | 397 | if (sclp_tty_tolower) |
| 554 | /* switch to uppercase */ | 398 | /* switch to uppercase */ |
| 555 | *op++ = _ebc_toupper[(int) *ip++]; | 399 | *op++ = _ebc_toupper[(int) *ip++]; |
| 556 | else | 400 | else |
| @@ -570,30 +414,12 @@ sclp_get_input(unsigned char *start, unsigned char *end) | |||
| 570 | int count; | 414 | int count; |
| 571 | 415 | ||
| 572 | count = end - start; | 416 | count = end - start; |
| 573 | /* | 417 | if (sclp_tty_tolower) |
| 574 | * if set in ioctl convert EBCDIC to lower case | ||
| 575 | * (modify original input in SCCB) | ||
| 576 | */ | ||
| 577 | if (sclp_ioctls.tolower) | ||
| 578 | EBC_TOLOWER(start, count); | 418 | EBC_TOLOWER(start, count); |
| 579 | 419 | count = sclp_switch_cases(start, count); | |
| 580 | /* | ||
| 581 | * if set in ioctl find out characters in lower or upper case | ||
| 582 | * (depends on current case) separated by a special character, | ||
| 583 | * works on EBCDIC | ||
| 584 | */ | ||
| 585 | if (sclp_ioctls.delim) | ||
| 586 | count = sclp_switch_cases(start, count, | ||
| 587 | sclp_ioctls.delim, | ||
| 588 | sclp_ioctls.tolower); | ||
| 589 | |||
| 590 | /* convert EBCDIC to ASCII (modify original input in SCCB) */ | 420 | /* convert EBCDIC to ASCII (modify original input in SCCB) */ |
| 591 | sclp_ebcasc_str(start, count); | 421 | sclp_ebcasc_str(start, count); |
| 592 | 422 | ||
| 593 | /* if set in ioctl write operators input to console */ | ||
| 594 | if (sclp_ioctls.echo) | ||
| 595 | sclp_tty_write(sclp_tty, start, count); | ||
| 596 | |||
| 597 | /* transfer input to high level driver */ | 423 | /* transfer input to high level driver */ |
| 598 | sclp_tty_input(start, count); | 424 | sclp_tty_input(start, count); |
| 599 | } | 425 | } |
| @@ -717,7 +543,6 @@ static const struct tty_operations sclp_ops = { | |||
| 717 | .write_room = sclp_tty_write_room, | 543 | .write_room = sclp_tty_write_room, |
| 718 | .chars_in_buffer = sclp_tty_chars_in_buffer, | 544 | .chars_in_buffer = sclp_tty_chars_in_buffer, |
| 719 | .flush_buffer = sclp_tty_flush_buffer, | 545 | .flush_buffer = sclp_tty_flush_buffer, |
| 720 | .ioctl = sclp_tty_ioctl, | ||
| 721 | }; | 546 | }; |
| 722 | 547 | ||
| 723 | static int __init | 548 | static int __init |
| @@ -736,9 +561,6 @@ sclp_tty_init(void) | |||
| 736 | 561 | ||
| 737 | rc = sclp_rw_init(); | 562 | rc = sclp_rw_init(); |
| 738 | if (rc) { | 563 | if (rc) { |
| 739 | printk(KERN_ERR SCLP_TTY_PRINT_HEADER | ||
| 740 | "could not register tty - " | ||
| 741 | "sclp_rw_init returned %d\n", rc); | ||
| 742 | put_tty_driver(driver); | 564 | put_tty_driver(driver); |
| 743 | return rc; | 565 | return rc; |
| 744 | } | 566 | } |
| @@ -754,7 +576,6 @@ sclp_tty_init(void) | |||
| 754 | } | 576 | } |
| 755 | INIT_LIST_HEAD(&sclp_tty_outqueue); | 577 | INIT_LIST_HEAD(&sclp_tty_outqueue); |
| 756 | spin_lock_init(&sclp_tty_lock); | 578 | spin_lock_init(&sclp_tty_lock); |
| 757 | init_waitqueue_head(&sclp_tty_waitq); | ||
| 758 | init_timer(&sclp_tty_timer); | 579 | init_timer(&sclp_tty_timer); |
| 759 | sclp_ttybuf = NULL; | 580 | sclp_ttybuf = NULL; |
| 760 | sclp_tty_buffer_count = 0; | 581 | sclp_tty_buffer_count = 0; |
| @@ -763,11 +584,10 @@ sclp_tty_init(void) | |||
| 763 | * save 4 characters for the CPU number | 584 | * save 4 characters for the CPU number |
| 764 | * written at start of each line by VM/CP | 585 | * written at start of each line by VM/CP |
| 765 | */ | 586 | */ |
| 766 | sclp_ioctls_init.columns = 76; | 587 | sclp_tty_columns = 76; |
| 767 | /* case input lines to lowercase */ | 588 | /* case input lines to lowercase */ |
| 768 | sclp_ioctls_init.tolower = 1; | 589 | sclp_tty_tolower = 1; |
| 769 | } | 590 | } |
| 770 | sclp_ioctls = sclp_ioctls_init; | ||
| 771 | sclp_tty_chars_count = 0; | 591 | sclp_tty_chars_count = 0; |
| 772 | sclp_tty = NULL; | 592 | sclp_tty = NULL; |
| 773 | 593 | ||
| @@ -792,9 +612,6 @@ sclp_tty_init(void) | |||
| 792 | tty_set_operations(driver, &sclp_ops); | 612 | tty_set_operations(driver, &sclp_ops); |
| 793 | rc = tty_register_driver(driver); | 613 | rc = tty_register_driver(driver); |
| 794 | if (rc) { | 614 | if (rc) { |
| 795 | printk(KERN_ERR SCLP_TTY_PRINT_HEADER | ||
| 796 | "could not register tty - " | ||
| 797 | "tty_register_driver returned %d\n", rc); | ||
| 798 | put_tty_driver(driver); | 615 | put_tty_driver(driver); |
| 799 | return rc; | 616 | return rc; |
| 800 | } | 617 | } |
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h index 0ce2c1fc5340..4b965b22fecd 100644 --- a/drivers/s390/char/sclp_tty.h +++ b/drivers/s390/char/sclp_tty.h | |||
| @@ -11,61 +11,8 @@ | |||
| 11 | #ifndef __SCLP_TTY_H__ | 11 | #ifndef __SCLP_TTY_H__ |
| 12 | #define __SCLP_TTY_H__ | 12 | #define __SCLP_TTY_H__ |
| 13 | 13 | ||
| 14 | #include <linux/ioctl.h> | ||
| 15 | #include <linux/termios.h> | ||
| 16 | #include <linux/tty_driver.h> | 14 | #include <linux/tty_driver.h> |
| 17 | 15 | ||
| 18 | /* This is the type of data structures storing sclp ioctl setting. */ | ||
| 19 | struct sclp_ioctls { | ||
| 20 | unsigned short htab; | ||
| 21 | unsigned char echo; | ||
| 22 | unsigned short columns; | ||
| 23 | unsigned char final_nl; | ||
| 24 | unsigned short max_sccb; | ||
| 25 | unsigned short kmem_sccb; /* can't be modified at run time */ | ||
| 26 | unsigned char tolower; | ||
| 27 | unsigned char delim; | ||
| 28 | }; | ||
| 29 | |||
| 30 | /* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */ | ||
| 31 | #define SCLP_IOCTL_LETTER 'B' | ||
| 32 | |||
| 33 | /* set width of horizontal tabulator */ | ||
| 34 | #define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short) | ||
| 35 | /* enable/disable echo of input (independent from line discipline) */ | ||
| 36 | #define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char) | ||
| 37 | /* set number of colums for output */ | ||
| 38 | #define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short) | ||
| 39 | /* enable/disable writing without final new line character */ | ||
| 40 | #define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char) | ||
| 41 | /* set the maximum buffers size for output, rounded up to next 4kB boundary */ | ||
| 42 | #define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short) | ||
| 43 | /* set initial (default) sclp ioctls */ | ||
| 44 | #define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6) | ||
| 45 | /* enable/disable conversion from upper to lower case of input */ | ||
| 46 | #define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char) | ||
| 47 | /* set special character used for separating upper and lower case, */ | ||
| 48 | /* 0x00 disables this feature */ | ||
| 49 | #define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char) | ||
| 50 | |||
| 51 | /* get width of horizontal tabulator */ | ||
| 52 | #define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short) | ||
| 53 | /* Is echo of input enabled ? (independent from line discipline) */ | ||
| 54 | #define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char) | ||
| 55 | /* get number of colums for output */ | ||
| 56 | #define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short) | ||
| 57 | /* Is writing without final new line character enabled ? */ | ||
| 58 | #define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char) | ||
| 59 | /* get the maximum buffers size for output */ | ||
| 60 | #define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short) | ||
| 61 | /* Is conversion from upper to lower case of input enabled ? */ | ||
| 62 | #define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char) | ||
| 63 | /* get special character used for separating upper and lower case, */ | ||
| 64 | /* 0x00 disables this feature */ | ||
| 65 | #define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char) | ||
| 66 | /* get the number of buffers/pages got from kernel at startup */ | ||
| 67 | #define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short) | ||
| 68 | |||
| 69 | extern struct tty_driver *sclp_tty_driver; | 16 | extern struct tty_driver *sclp_tty_driver; |
| 70 | 17 | ||
| 71 | #endif /* __SCLP_TTY_H__ */ | 18 | #endif /* __SCLP_TTY_H__ */ |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 3e577f655b18..ad51738c4261 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
| 28 | #include "sclp.h" | 28 | #include "sclp.h" |
| 29 | 29 | ||
| 30 | #define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: " | ||
| 31 | #define SCLP_VT220_MAJOR TTY_MAJOR | 30 | #define SCLP_VT220_MAJOR TTY_MAJOR |
| 32 | #define SCLP_VT220_MINOR 65 | 31 | #define SCLP_VT220_MINOR 65 |
| 33 | #define SCLP_VT220_DRIVER_NAME "sclp_vt220" | 32 | #define SCLP_VT220_DRIVER_NAME "sclp_vt220" |
| @@ -82,8 +81,8 @@ static struct sclp_vt220_request *sclp_vt220_current_request; | |||
| 82 | /* Number of characters in current request buffer */ | 81 | /* Number of characters in current request buffer */ |
| 83 | static int sclp_vt220_buffered_chars; | 82 | static int sclp_vt220_buffered_chars; |
| 84 | 83 | ||
| 85 | /* Flag indicating whether this driver has already been initialized */ | 84 | /* Counter controlling core driver initialization. */ |
| 86 | static int sclp_vt220_initialized = 0; | 85 | static int __initdata sclp_vt220_init_count; |
| 87 | 86 | ||
| 88 | /* Flag indicating that sclp_vt220_current_request should really | 87 | /* Flag indicating that sclp_vt220_current_request should really |
| 89 | * have been already queued but wasn't because the SCLP was processing | 88 | * have been already queued but wasn't because the SCLP was processing |
| @@ -609,10 +608,8 @@ sclp_vt220_flush_buffer(struct tty_struct *tty) | |||
| 609 | sclp_vt220_emit_current(); | 608 | sclp_vt220_emit_current(); |
| 610 | } | 609 | } |
| 611 | 610 | ||
| 612 | /* | 611 | /* Release allocated pages. */ |
| 613 | * Initialize all relevant components and register driver with system. | 612 | static void __init __sclp_vt220_free_pages(void) |
| 614 | */ | ||
| 615 | static void __init __sclp_vt220_cleanup(void) | ||
| 616 | { | 613 | { |
| 617 | struct list_head *page, *p; | 614 | struct list_head *page, *p; |
| 618 | 615 | ||
| @@ -623,21 +620,30 @@ static void __init __sclp_vt220_cleanup(void) | |||
| 623 | else | 620 | else |
| 624 | free_bootmem((unsigned long) page, PAGE_SIZE); | 621 | free_bootmem((unsigned long) page, PAGE_SIZE); |
| 625 | } | 622 | } |
| 626 | if (!list_empty(&sclp_vt220_register.list)) | ||
| 627 | sclp_unregister(&sclp_vt220_register); | ||
| 628 | sclp_vt220_initialized = 0; | ||
| 629 | } | 623 | } |
| 630 | 624 | ||
| 631 | static int __init __sclp_vt220_init(void) | 625 | /* Release memory and unregister from sclp core. Controlled by init counting - |
| 626 | * only the last invoker will actually perform these actions. */ | ||
| 627 | static void __init __sclp_vt220_cleanup(void) | ||
| 628 | { | ||
| 629 | sclp_vt220_init_count--; | ||
| 630 | if (sclp_vt220_init_count != 0) | ||
| 631 | return; | ||
| 632 | sclp_unregister(&sclp_vt220_register); | ||
| 633 | __sclp_vt220_free_pages(); | ||
| 634 | } | ||
| 635 | |||
| 636 | /* Allocate buffer pages and register with sclp core. Controlled by init | ||
| 637 | * counting - only the first invoker will actually perform these actions. */ | ||
| 638 | static int __init __sclp_vt220_init(int num_pages) | ||
| 632 | { | 639 | { |
| 633 | void *page; | 640 | void *page; |
| 634 | int i; | 641 | int i; |
| 635 | int num_pages; | ||
| 636 | int rc; | 642 | int rc; |
| 637 | 643 | ||
| 638 | if (sclp_vt220_initialized) | 644 | sclp_vt220_init_count++; |
| 645 | if (sclp_vt220_init_count != 1) | ||
| 639 | return 0; | 646 | return 0; |
| 640 | sclp_vt220_initialized = 1; | ||
| 641 | spin_lock_init(&sclp_vt220_lock); | 647 | spin_lock_init(&sclp_vt220_lock); |
| 642 | INIT_LIST_HEAD(&sclp_vt220_empty); | 648 | INIT_LIST_HEAD(&sclp_vt220_empty); |
| 643 | INIT_LIST_HEAD(&sclp_vt220_outqueue); | 649 | INIT_LIST_HEAD(&sclp_vt220_outqueue); |
| @@ -649,24 +655,22 @@ static int __init __sclp_vt220_init(void) | |||
| 649 | sclp_vt220_flush_later = 0; | 655 | sclp_vt220_flush_later = 0; |
| 650 | 656 | ||
| 651 | /* Allocate pages for output buffering */ | 657 | /* Allocate pages for output buffering */ |
| 652 | num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES; | ||
| 653 | for (i = 0; i < num_pages; i++) { | 658 | for (i = 0; i < num_pages; i++) { |
| 654 | if (slab_is_available()) | 659 | if (slab_is_available()) |
| 655 | page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 660 | page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
| 656 | else | 661 | else |
| 657 | page = alloc_bootmem_low_pages(PAGE_SIZE); | 662 | page = alloc_bootmem_low_pages(PAGE_SIZE); |
| 658 | if (!page) { | 663 | if (!page) { |
| 659 | __sclp_vt220_cleanup(); | 664 | rc = -ENOMEM; |
| 660 | return -ENOMEM; | 665 | goto out; |
| 661 | } | 666 | } |
| 662 | list_add_tail((struct list_head *) page, &sclp_vt220_empty); | 667 | list_add_tail((struct list_head *) page, &sclp_vt220_empty); |
| 663 | } | 668 | } |
| 664 | rc = sclp_register(&sclp_vt220_register); | 669 | rc = sclp_register(&sclp_vt220_register); |
| 670 | out: | ||
| 665 | if (rc) { | 671 | if (rc) { |
| 666 | printk(KERN_ERR SCLP_VT220_PRINT_HEADER | 672 | __sclp_vt220_free_pages(); |
| 667 | "could not register vt220 - " | 673 | sclp_vt220_init_count--; |
| 668 | "sclp_register returned %d\n", rc); | ||
| 669 | __sclp_vt220_cleanup(); | ||
| 670 | } | 674 | } |
| 671 | return rc; | 675 | return rc; |
| 672 | } | 676 | } |
| @@ -689,15 +693,13 @@ static int __init sclp_vt220_tty_init(void) | |||
| 689 | { | 693 | { |
| 690 | struct tty_driver *driver; | 694 | struct tty_driver *driver; |
| 691 | int rc; | 695 | int rc; |
| 692 | int cleanup; | ||
| 693 | 696 | ||
| 694 | /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve | 697 | /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve |
| 695 | * symmetry between VM and LPAR systems regarding ttyS1. */ | 698 | * symmetry between VM and LPAR systems regarding ttyS1. */ |
| 696 | driver = alloc_tty_driver(1); | 699 | driver = alloc_tty_driver(1); |
| 697 | if (!driver) | 700 | if (!driver) |
| 698 | return -ENOMEM; | 701 | return -ENOMEM; |
| 699 | cleanup = !sclp_vt220_initialized; | 702 | rc = __sclp_vt220_init(MAX_KMEM_PAGES); |
| 700 | rc = __sclp_vt220_init(); | ||
| 701 | if (rc) | 703 | if (rc) |
| 702 | goto out_driver; | 704 | goto out_driver; |
| 703 | 705 | ||
| @@ -713,18 +715,13 @@ static int __init sclp_vt220_tty_init(void) | |||
| 713 | tty_set_operations(driver, &sclp_vt220_ops); | 715 | tty_set_operations(driver, &sclp_vt220_ops); |
| 714 | 716 | ||
| 715 | rc = tty_register_driver(driver); | 717 | rc = tty_register_driver(driver); |
| 716 | if (rc) { | 718 | if (rc) |
| 717 | printk(KERN_ERR SCLP_VT220_PRINT_HEADER | ||
| 718 | "could not register tty - " | ||
| 719 | "tty_register_driver returned %d\n", rc); | ||
| 720 | goto out_init; | 719 | goto out_init; |
| 721 | } | ||
| 722 | sclp_vt220_driver = driver; | 720 | sclp_vt220_driver = driver; |
| 723 | return 0; | 721 | return 0; |
| 724 | 722 | ||
| 725 | out_init: | 723 | out_init: |
| 726 | if (cleanup) | 724 | __sclp_vt220_cleanup(); |
| 727 | __sclp_vt220_cleanup(); | ||
| 728 | out_driver: | 725 | out_driver: |
| 729 | put_tty_driver(driver); | 726 | put_tty_driver(driver); |
| 730 | return rc; | 727 | return rc; |
| @@ -773,10 +770,9 @@ sclp_vt220_con_init(void) | |||
| 773 | { | 770 | { |
| 774 | int rc; | 771 | int rc; |
| 775 | 772 | ||
| 776 | INIT_LIST_HEAD(&sclp_vt220_register.list); | ||
| 777 | if (!CONSOLE_IS_SCLP) | 773 | if (!CONSOLE_IS_SCLP) |
| 778 | return 0; | 774 | return 0; |
| 779 | rc = __sclp_vt220_init(); | 775 | rc = __sclp_vt220_init(MAX_CONSOLE_PAGES); |
| 780 | if (rc) | 776 | if (rc) |
| 781 | return rc; | 777 | return rc; |
| 782 | /* Attach linux console */ | 778 | /* Attach linux console */ |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 874adf365e46..22ca34361ed7 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
| @@ -196,7 +196,7 @@ tape_34xx_erp_retry(struct tape_request *request) | |||
| 196 | static int | 196 | static int |
| 197 | tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) | 197 | tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) |
| 198 | { | 198 | { |
| 199 | if (irb->scsw.dstat == 0x85 /* READY */) { | 199 | if (irb->scsw.cmd.dstat == 0x85) { /* READY */ |
| 200 | /* A medium was inserted in the drive. */ | 200 | /* A medium was inserted in the drive. */ |
| 201 | DBF_EVENT(6, "xuud med\n"); | 201 | DBF_EVENT(6, "xuud med\n"); |
| 202 | tape_34xx_delete_sbid_from(device, 0); | 202 | tape_34xx_delete_sbid_from(device, 0); |
| @@ -844,22 +844,22 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request, | |||
| 844 | if (request == NULL) | 844 | if (request == NULL) |
| 845 | return tape_34xx_unsolicited_irq(device, irb); | 845 | return tape_34xx_unsolicited_irq(device, irb); |
| 846 | 846 | ||
| 847 | if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && | 847 | if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && |
| 848 | (irb->scsw.dstat & DEV_STAT_DEV_END) && | 848 | (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && |
| 849 | (request->op == TO_WRI)) { | 849 | (request->op == TO_WRI)) { |
| 850 | /* Write at end of volume */ | 850 | /* Write at end of volume */ |
| 851 | PRINT_INFO("End of volume\n"); /* XXX */ | 851 | PRINT_INFO("End of volume\n"); /* XXX */ |
| 852 | return tape_34xx_erp_failed(request, -ENOSPC); | 852 | return tape_34xx_erp_failed(request, -ENOSPC); |
| 853 | } | 853 | } |
| 854 | 854 | ||
| 855 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 855 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
| 856 | return tape_34xx_unit_check(device, request, irb); | 856 | return tape_34xx_unit_check(device, request, irb); |
| 857 | 857 | ||
| 858 | if (irb->scsw.dstat & DEV_STAT_DEV_END) { | 858 | if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { |
| 859 | /* | 859 | /* |
| 860 | * A unit exception occurs on skipping over a tapemark block. | 860 | * A unit exception occurs on skipping over a tapemark block. |
| 861 | */ | 861 | */ |
| 862 | if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { | 862 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { |
| 863 | if (request->op == TO_BSB || request->op == TO_FSB) | 863 | if (request->op == TO_BSB || request->op == TO_FSB) |
| 864 | request->rescnt++; | 864 | request->rescnt++; |
| 865 | else | 865 | else |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 42ce7915fc5d..839987618ffd 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
| @@ -837,13 +837,13 @@ tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, | |||
| 837 | static int | 837 | static int |
| 838 | tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) | 838 | tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) |
| 839 | { | 839 | { |
| 840 | if (irb->scsw.dstat == DEV_STAT_CHN_END) | 840 | if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END) |
| 841 | /* Probably result of halt ssch */ | 841 | /* Probably result of halt ssch */ |
| 842 | return TAPE_IO_PENDING; | 842 | return TAPE_IO_PENDING; |
| 843 | else if (irb->scsw.dstat == 0x85) | 843 | else if (irb->scsw.cmd.dstat == 0x85) |
| 844 | /* Device Ready */ | 844 | /* Device Ready */ |
| 845 | DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); | 845 | DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); |
| 846 | else if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 846 | else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
| 847 | tape_3590_schedule_work(device, TO_READ_ATTMSG); | 847 | tape_3590_schedule_work(device, TO_READ_ATTMSG); |
| 848 | } else { | 848 | } else { |
| 849 | DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); | 849 | DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); |
| @@ -1515,18 +1515,19 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request, | |||
| 1515 | if (request == NULL) | 1515 | if (request == NULL) |
| 1516 | return tape_3590_unsolicited_irq(device, irb); | 1516 | return tape_3590_unsolicited_irq(device, irb); |
| 1517 | 1517 | ||
| 1518 | if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && | 1518 | if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && |
| 1519 | (irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { | 1519 | (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && |
| 1520 | (request->op == TO_WRI)) { | ||
| 1520 | /* Write at end of volume */ | 1521 | /* Write at end of volume */ |
| 1521 | DBF_EVENT(2, "End of volume\n"); | 1522 | DBF_EVENT(2, "End of volume\n"); |
| 1522 | return tape_3590_erp_failed(device, request, irb, -ENOSPC); | 1523 | return tape_3590_erp_failed(device, request, irb, -ENOSPC); |
| 1523 | } | 1524 | } |
| 1524 | 1525 | ||
| 1525 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 1526 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
| 1526 | return tape_3590_unit_check(device, request, irb); | 1527 | return tape_3590_unit_check(device, request, irb); |
| 1527 | 1528 | ||
| 1528 | if (irb->scsw.dstat & DEV_STAT_DEV_END) { | 1529 | if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { |
| 1529 | if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) { | 1530 | if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) { |
| 1530 | if (request->op == TO_FSB || request->op == TO_BSB) | 1531 | if (request->op == TO_FSB || request->op == TO_BSB) |
| 1531 | request->rescnt++; | 1532 | request->rescnt++; |
| 1532 | else | 1533 | else |
| @@ -1536,12 +1537,12 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request, | |||
| 1536 | return tape_3590_done(device, request); | 1537 | return tape_3590_done(device, request); |
| 1537 | } | 1538 | } |
| 1538 | 1539 | ||
| 1539 | if (irb->scsw.dstat & DEV_STAT_CHN_END) { | 1540 | if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) { |
| 1540 | DBF_EVENT(2, "cannel end\n"); | 1541 | DBF_EVENT(2, "cannel end\n"); |
| 1541 | return TAPE_IO_PENDING; | 1542 | return TAPE_IO_PENDING; |
| 1542 | } | 1543 | } |
| 1543 | 1544 | ||
| 1544 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 1545 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
| 1545 | DBF_EVENT(2, "Unit Attention when busy..\n"); | 1546 | DBF_EVENT(2, "Unit Attention when busy..\n"); |
| 1546 | return TAPE_IO_PENDING; | 1547 | return TAPE_IO_PENDING; |
| 1547 | } | 1548 | } |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index c20e3c548343..181a5441af16 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
| @@ -839,7 +839,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request, | |||
| 839 | 839 | ||
| 840 | PRINT_INFO("-------------------------------------------------\n"); | 840 | PRINT_INFO("-------------------------------------------------\n"); |
| 841 | PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", | 841 | PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", |
| 842 | irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); | 842 | irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa); |
| 843 | PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); | 843 | PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); |
| 844 | if (request != NULL) | 844 | if (request != NULL) |
| 845 | PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); | 845 | PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); |
| @@ -867,7 +867,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, | |||
| 867 | else | 867 | else |
| 868 | op = "---"; | 868 | op = "---"; |
| 869 | DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", | 869 | DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", |
| 870 | irb->scsw.dstat,irb->scsw.cstat); | 870 | irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); |
| 871 | DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); | 871 | DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); |
| 872 | sptr = (unsigned int *) irb->ecw; | 872 | sptr = (unsigned int *) irb->ecw; |
| 873 | DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); | 873 | DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); |
| @@ -1083,10 +1083,11 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 1083 | * error might still apply. So we just schedule the request to be | 1083 | * error might still apply. So we just schedule the request to be |
| 1084 | * started later. | 1084 | * started later. |
| 1085 | */ | 1085 | */ |
| 1086 | if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && | 1086 | if (irb->scsw.cmd.cc != 0 && |
| 1087 | (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
| 1087 | (request->status == TAPE_REQUEST_IN_IO)) { | 1088 | (request->status == TAPE_REQUEST_IN_IO)) { |
| 1088 | DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", | 1089 | DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", |
| 1089 | device->cdev_id, irb->scsw.cc, irb->scsw.fctl); | 1090 | device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); |
| 1090 | request->status = TAPE_REQUEST_QUEUED; | 1091 | request->status = TAPE_REQUEST_QUEUED; |
| 1091 | schedule_delayed_work(&device->tape_dnr, HZ); | 1092 | schedule_delayed_work(&device->tape_dnr, HZ); |
| 1092 | return; | 1093 | return; |
| @@ -1094,8 +1095,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 1094 | 1095 | ||
| 1095 | /* May be an unsolicited irq */ | 1096 | /* May be an unsolicited irq */ |
| 1096 | if(request != NULL) | 1097 | if(request != NULL) |
| 1097 | request->rescnt = irb->scsw.count; | 1098 | request->rescnt = irb->scsw.cmd.count; |
| 1098 | else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && | 1099 | else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && |
| 1099 | !list_empty(&device->req_queue)) { | 1100 | !list_empty(&device->req_queue)) { |
| 1100 | /* Not Ready to Ready after long busy ? */ | 1101 | /* Not Ready to Ready after long busy ? */ |
| 1101 | struct tape_request *req; | 1102 | struct tape_request *req; |
| @@ -1111,7 +1112,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 1111 | return; | 1112 | return; |
| 1112 | } | 1113 | } |
| 1113 | } | 1114 | } |
| 1114 | if (irb->scsw.dstat != 0x0c) { | 1115 | if (irb->scsw.cmd.dstat != 0x0c) { |
| 1115 | /* Set the 'ONLINE' flag depending on sense byte 1 */ | 1116 | /* Set the 'ONLINE' flag depending on sense byte 1 */ |
| 1116 | if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) | 1117 | if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) |
| 1117 | device->tape_generic_status |= GMT_ONLINE(~0); | 1118 | device->tape_generic_status |= GMT_ONLINE(~0); |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 5043150019ac..a7fe6302c982 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
| @@ -663,7 +663,7 @@ static int | |||
| 663 | tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) | 663 | tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) |
| 664 | { | 664 | { |
| 665 | /* Handle ATTN. Schedule tasklet to read aid. */ | 665 | /* Handle ATTN. Schedule tasklet to read aid. */ |
| 666 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 666 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
| 667 | if (!tp->throttle) | 667 | if (!tp->throttle) |
| 668 | tty3270_issue_read(tp, 0); | 668 | tty3270_issue_read(tp, 0); |
| 669 | else | 669 | else |
| @@ -671,11 +671,11 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) | |||
| 671 | } | 671 | } |
| 672 | 672 | ||
| 673 | if (rq) { | 673 | if (rq) { |
| 674 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) | 674 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
| 675 | rq->rc = -EIO; | 675 | rq->rc = -EIO; |
| 676 | else | 676 | else |
| 677 | /* Normal end. Copy residual count. */ | 677 | /* Normal end. Copy residual count. */ |
| 678 | rq->rescnt = irb->scsw.count; | 678 | rq->rescnt = irb->scsw.cmd.count; |
| 679 | } | 679 | } |
| 680 | return RAW3270_IO_DONE; | 680 | return RAW3270_IO_DONE; |
| 681 | } | 681 | } |
| @@ -1792,15 +1792,12 @@ static int __init tty3270_init(void) | |||
| 1792 | tty_set_operations(driver, &tty3270_ops); | 1792 | tty_set_operations(driver, &tty3270_ops); |
| 1793 | ret = tty_register_driver(driver); | 1793 | ret = tty_register_driver(driver); |
| 1794 | if (ret) { | 1794 | if (ret) { |
| 1795 | printk(KERN_ERR "tty3270 registration failed with %d\n", ret); | ||
| 1796 | put_tty_driver(driver); | 1795 | put_tty_driver(driver); |
| 1797 | return ret; | 1796 | return ret; |
| 1798 | } | 1797 | } |
| 1799 | tty3270_driver = driver; | 1798 | tty3270_driver = driver; |
| 1800 | ret = raw3270_register_notifier(tty3270_notifier); | 1799 | ret = raw3270_register_notifier(tty3270_notifier); |
| 1801 | if (ret) { | 1800 | if (ret) { |
| 1802 | printk(KERN_ERR "tty3270 notifier registration failed " | ||
| 1803 | "with %d\n", ret); | ||
| 1804 | put_tty_driver(driver); | 1801 | put_tty_driver(driver); |
| 1805 | return ret; | 1802 | return ret; |
| 1806 | 1803 | ||
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 2f419b0ea628..401ea84b3059 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c | |||
| @@ -61,30 +61,24 @@ static int vmcp_release(struct inode *inode, struct file *file) | |||
| 61 | static ssize_t | 61 | static ssize_t |
| 62 | vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) | 62 | vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) |
| 63 | { | 63 | { |
| 64 | size_t tocopy; | 64 | ssize_t ret; |
| 65 | size_t size; | ||
| 65 | struct vmcp_session *session; | 66 | struct vmcp_session *session; |
| 66 | 67 | ||
| 67 | session = (struct vmcp_session *)file->private_data; | 68 | session = file->private_data; |
| 68 | if (mutex_lock_interruptible(&session->mutex)) | 69 | if (mutex_lock_interruptible(&session->mutex)) |
| 69 | return -ERESTARTSYS; | 70 | return -ERESTARTSYS; |
| 70 | if (!session->response) { | 71 | if (!session->response) { |
| 71 | mutex_unlock(&session->mutex); | 72 | mutex_unlock(&session->mutex); |
| 72 | return 0; | 73 | return 0; |
| 73 | } | 74 | } |
| 74 | if (*ppos > session->resp_size) { | 75 | size = min_t(size_t, session->resp_size, session->bufsize); |
| 75 | mutex_unlock(&session->mutex); | 76 | ret = simple_read_from_buffer(buff, count, ppos, |
| 76 | return 0; | 77 | session->response, size); |
| 77 | } | ||
| 78 | tocopy = min(session->resp_size - (size_t) (*ppos), count); | ||
| 79 | tocopy = min(tocopy, session->bufsize - (size_t) (*ppos)); | ||
| 80 | 78 | ||
| 81 | if (copy_to_user(buff, session->response + (*ppos), tocopy)) { | ||
| 82 | mutex_unlock(&session->mutex); | ||
| 83 | return -EFAULT; | ||
| 84 | } | ||
| 85 | mutex_unlock(&session->mutex); | 79 | mutex_unlock(&session->mutex); |
| 86 | *ppos += tocopy; | 80 | |
| 87 | return tocopy; | 81 | return ret; |
| 88 | } | 82 | } |
| 89 | 83 | ||
| 90 | static ssize_t | 84 | static ssize_t |
| @@ -198,27 +192,23 @@ static int __init vmcp_init(void) | |||
| 198 | PRINT_WARN("z/VM CP interface is only available under z/VM\n"); | 192 | PRINT_WARN("z/VM CP interface is only available under z/VM\n"); |
| 199 | return -ENODEV; | 193 | return -ENODEV; |
| 200 | } | 194 | } |
| 195 | |||
| 201 | vmcp_debug = debug_register("vmcp", 1, 1, 240); | 196 | vmcp_debug = debug_register("vmcp", 1, 1, 240); |
| 202 | if (!vmcp_debug) { | 197 | if (!vmcp_debug) |
| 203 | PRINT_ERR("z/VM CP interface not loaded. Could not register " | ||
| 204 | "debug feature\n"); | ||
| 205 | return -ENOMEM; | 198 | return -ENOMEM; |
| 206 | } | 199 | |
| 207 | ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); | 200 | ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); |
| 208 | if (ret) { | 201 | if (ret) { |
| 209 | PRINT_ERR("z/VM CP interface not loaded. Could not register " | ||
| 210 | "debug feature view. Error code: %d\n", ret); | ||
| 211 | debug_unregister(vmcp_debug); | 202 | debug_unregister(vmcp_debug); |
| 212 | return ret; | 203 | return ret; |
| 213 | } | 204 | } |
| 205 | |||
| 214 | ret = misc_register(&vmcp_dev); | 206 | ret = misc_register(&vmcp_dev); |
| 215 | if (ret) { | 207 | if (ret) { |
| 216 | PRINT_ERR("z/VM CP interface not loaded. Could not register " | ||
| 217 | "misc device. Error code: %d\n", ret); | ||
| 218 | debug_unregister(vmcp_debug); | 208 | debug_unregister(vmcp_debug); |
| 219 | return ret; | 209 | return ret; |
| 220 | } | 210 | } |
| 221 | PRINT_INFO("z/VM CP interface loaded\n"); | 211 | |
| 222 | return 0; | 212 | return 0; |
| 223 | } | 213 | } |
| 224 | 214 | ||
| @@ -226,7 +216,6 @@ static void __exit vmcp_exit(void) | |||
| 226 | { | 216 | { |
| 227 | misc_deregister(&vmcp_dev); | 217 | misc_deregister(&vmcp_dev); |
| 228 | debug_unregister(vmcp_debug); | 218 | debug_unregister(vmcp_debug); |
| 229 | PRINT_INFO("z/VM CP interface unloaded.\n"); | ||
| 230 | } | 219 | } |
| 231 | 220 | ||
| 232 | module_init(vmcp_init); | 221 | module_init(vmcp_init); |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 2c2428cc05d8..a246bc73ae64 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
| @@ -216,9 +216,7 @@ static int vmlogrdr_get_recording_class_AB(void) | |||
| 216 | char *tail; | 216 | char *tail; |
| 217 | int len,i; | 217 | int len,i; |
| 218 | 218 | ||
| 219 | printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command); | ||
| 220 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 219 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
| 221 | printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response); | ||
| 222 | len = strnlen(cp_response,sizeof(cp_response)); | 220 | len = strnlen(cp_response,sizeof(cp_response)); |
| 223 | // now the parsing | 221 | // now the parsing |
| 224 | tail=strnchr(cp_response,len,'='); | 222 | tail=strnchr(cp_response,len,'='); |
| @@ -268,11 +266,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, | |||
| 268 | logptr->recording_name, | 266 | logptr->recording_name, |
| 269 | qid_string); | 267 | qid_string); |
| 270 | 268 | ||
| 271 | printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", | ||
| 272 | cp_command); | ||
| 273 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 269 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
| 274 | printk (KERN_DEBUG "vmlogrdr: recording response: %s", | ||
| 275 | cp_response); | ||
| 276 | } | 270 | } |
| 277 | 271 | ||
| 278 | memset(cp_command, 0x00, sizeof(cp_command)); | 272 | memset(cp_command, 0x00, sizeof(cp_command)); |
| @@ -282,10 +276,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, | |||
| 282 | onoff, | 276 | onoff, |
| 283 | qid_string); | 277 | qid_string); |
| 284 | 278 | ||
| 285 | printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command); | ||
| 286 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 279 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
| 287 | printk (KERN_DEBUG "vmlogrdr: recording response: %s", | ||
| 288 | cp_response); | ||
| 289 | /* The recording command will usually answer with 'Command complete' | 280 | /* The recording command will usually answer with 'Command complete' |
| 290 | * on success, but when the specific service was never connected | 281 | * on success, but when the specific service was never connected |
| 291 | * before then there might be an additional informational message | 282 | * before then there might be an additional informational message |
| @@ -567,10 +558,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev, | |||
| 567 | "RECORDING %s PURGE ", | 558 | "RECORDING %s PURGE ", |
| 568 | priv->recording_name); | 559 | priv->recording_name); |
| 569 | 560 | ||
| 570 | printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command); | ||
| 571 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); | 561 | cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); |
| 572 | printk (KERN_DEBUG "vmlogrdr: recording response: %s", | ||
| 573 | cp_response); | ||
| 574 | 562 | ||
| 575 | return count; | 563 | return count; |
| 576 | } | 564 | } |
| @@ -682,28 +670,20 @@ static int vmlogrdr_register_driver(void) | |||
| 682 | 670 | ||
| 683 | /* Register with iucv driver */ | 671 | /* Register with iucv driver */ |
| 684 | ret = iucv_register(&vmlogrdr_iucv_handler, 1); | 672 | ret = iucv_register(&vmlogrdr_iucv_handler, 1); |
| 685 | if (ret) { | 673 | if (ret) |
| 686 | printk (KERN_ERR "vmlogrdr: failed to register with " | ||
| 687 | "iucv driver\n"); | ||
| 688 | goto out; | 674 | goto out; |
| 689 | } | ||
| 690 | 675 | ||
| 691 | ret = driver_register(&vmlogrdr_driver); | 676 | ret = driver_register(&vmlogrdr_driver); |
| 692 | if (ret) { | 677 | if (ret) |
| 693 | printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); | ||
| 694 | goto out_iucv; | 678 | goto out_iucv; |
| 695 | } | ||
| 696 | 679 | ||
| 697 | ret = driver_create_file(&vmlogrdr_driver, | 680 | ret = driver_create_file(&vmlogrdr_driver, |
| 698 | &driver_attr_recording_status); | 681 | &driver_attr_recording_status); |
| 699 | if (ret) { | 682 | if (ret) |
| 700 | printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); | ||
| 701 | goto out_driver; | 683 | goto out_driver; |
| 702 | } | ||
| 703 | 684 | ||
| 704 | vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); | 685 | vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); |
| 705 | if (IS_ERR(vmlogrdr_class)) { | 686 | if (IS_ERR(vmlogrdr_class)) { |
| 706 | printk(KERN_ERR "vmlogrdr: failed to create class.\n"); | ||
| 707 | ret = PTR_ERR(vmlogrdr_class); | 687 | ret = PTR_ERR(vmlogrdr_class); |
| 708 | vmlogrdr_class = NULL; | 688 | vmlogrdr_class = NULL; |
| 709 | goto out_attr; | 689 | goto out_attr; |
| @@ -871,12 +851,10 @@ static int __init vmlogrdr_init(void) | |||
| 871 | rc = vmlogrdr_register_cdev(dev); | 851 | rc = vmlogrdr_register_cdev(dev); |
| 872 | if (rc) | 852 | if (rc) |
| 873 | goto cleanup; | 853 | goto cleanup; |
| 874 | printk (KERN_INFO "vmlogrdr: driver loaded\n"); | ||
| 875 | return 0; | 854 | return 0; |
| 876 | 855 | ||
| 877 | cleanup: | 856 | cleanup: |
| 878 | vmlogrdr_cleanup(); | 857 | vmlogrdr_cleanup(); |
| 879 | printk (KERN_ERR "vmlogrdr: driver not loaded.\n"); | ||
| 880 | return rc; | 858 | return rc; |
| 881 | } | 859 | } |
| 882 | 860 | ||
| @@ -884,7 +862,6 @@ cleanup: | |||
| 884 | static void __exit vmlogrdr_exit(void) | 862 | static void __exit vmlogrdr_exit(void) |
| 885 | { | 863 | { |
| 886 | vmlogrdr_cleanup(); | 864 | vmlogrdr_cleanup(); |
| 887 | printk (KERN_INFO "vmlogrdr: driver unloaded\n"); | ||
| 888 | return; | 865 | return; |
| 889 | } | 866 | } |
| 890 | 867 | ||
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 83ae9a852f00..49cba9effe89 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
| @@ -277,7 +277,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
| 277 | struct urdev *urd; | 277 | struct urdev *urd; |
| 278 | 278 | ||
| 279 | TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", | 279 | TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", |
| 280 | intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); | 280 | intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, |
| 281 | irb->scsw.cmd.count); | ||
| 281 | 282 | ||
| 282 | if (!intparm) { | 283 | if (!intparm) { |
| 283 | TRACE("ur_int_handler: unsolicited interrupt\n"); | 284 | TRACE("ur_int_handler: unsolicited interrupt\n"); |
| @@ -288,7 +289,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
| 288 | /* On special conditions irb is an error pointer */ | 289 | /* On special conditions irb is an error pointer */ |
| 289 | if (IS_ERR(irb)) | 290 | if (IS_ERR(irb)) |
| 290 | urd->io_request_rc = PTR_ERR(irb); | 291 | urd->io_request_rc = PTR_ERR(irb); |
| 291 | else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | 292 | else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) |
| 292 | urd->io_request_rc = 0; | 293 | urd->io_request_rc = 0; |
| 293 | else | 294 | else |
| 294 | urd->io_request_rc = -EIO; | 295 | urd->io_request_rc = -EIO; |
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 19f8389291b6..56b3eab019cb 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c | |||
| @@ -92,23 +92,15 @@ static int vmwdt_keepalive(void) | |||
| 92 | 92 | ||
| 93 | func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; | 93 | func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; |
| 94 | ret = __diag288(func, vmwdt_interval, ebc_cmd, len); | 94 | ret = __diag288(func, vmwdt_interval, ebc_cmd, len); |
| 95 | WARN_ON(ret != 0); | ||
| 95 | kfree(ebc_cmd); | 96 | kfree(ebc_cmd); |
| 96 | |||
| 97 | if (ret) { | ||
| 98 | printk(KERN_WARNING "%s: problem setting interval %d, " | ||
| 99 | "cmd %s\n", __func__, vmwdt_interval, | ||
| 100 | vmwdt_cmd); | ||
| 101 | } | ||
| 102 | return ret; | 97 | return ret; |
| 103 | } | 98 | } |
| 104 | 99 | ||
| 105 | static int vmwdt_disable(void) | 100 | static int vmwdt_disable(void) |
| 106 | { | 101 | { |
| 107 | int ret = __diag288(wdt_cancel, 0, "", 0); | 102 | int ret = __diag288(wdt_cancel, 0, "", 0); |
| 108 | if (ret) { | 103 | WARN_ON(ret != 0); |
| 109 | printk(KERN_WARNING "%s: problem disabling watchdog\n", | ||
| 110 | __func__); | ||
| 111 | } | ||
| 112 | return ret; | 104 | return ret; |
| 113 | } | 105 | } |
| 114 | 106 | ||
| @@ -121,10 +113,8 @@ static int __init vmwdt_probe(void) | |||
| 121 | static char __initdata ebc_begin[] = { | 113 | static char __initdata ebc_begin[] = { |
| 122 | 194, 197, 199, 201, 213 | 114 | 194, 197, 199, 201, 213 |
| 123 | }; | 115 | }; |
| 124 | if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) { | 116 | if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) |
| 125 | printk(KERN_INFO "z/VM watchdog not available\n"); | ||
| 126 | return -EINVAL; | 117 | return -EINVAL; |
| 127 | } | ||
| 128 | return vmwdt_disable(); | 118 | return vmwdt_disable(); |
| 129 | } | 119 | } |
| 130 | 120 | ||
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index bbbd14e9d48f..047dd92ae804 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
| @@ -223,12 +223,10 @@ static int __init init_cpu_info(enum arch_id arch) | |||
| 223 | /* get info for boot cpu from lowcore, stored in the HSA */ | 223 | /* get info for boot cpu from lowcore, stored in the HSA */ |
| 224 | 224 | ||
| 225 | sa = kmalloc(sizeof(*sa), GFP_KERNEL); | 225 | sa = kmalloc(sizeof(*sa), GFP_KERNEL); |
| 226 | if (!sa) { | 226 | if (!sa) |
| 227 | ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__); | ||
| 228 | return -ENOMEM; | 227 | return -ENOMEM; |
| 229 | } | ||
| 230 | if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { | 228 | if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { |
| 231 | ERROR_MSG("could not copy from HSA\n"); | 229 | TRACE("could not copy from HSA\n"); |
| 232 | kfree(sa); | 230 | kfree(sa); |
| 233 | return -EIO; | 231 | return -EIO; |
| 234 | } | 232 | } |
| @@ -511,6 +509,8 @@ static void __init set_s390x_lc_mask(union save_area *map) | |||
| 511 | */ | 509 | */ |
| 512 | static int __init sys_info_init(enum arch_id arch) | 510 | static int __init sys_info_init(enum arch_id arch) |
| 513 | { | 511 | { |
| 512 | int rc; | ||
| 513 | |||
| 514 | switch (arch) { | 514 | switch (arch) { |
| 515 | case ARCH_S390X: | 515 | case ARCH_S390X: |
| 516 | MSG("DETECTED 'S390X (64 bit) OS'\n"); | 516 | MSG("DETECTED 'S390X (64 bit) OS'\n"); |
| @@ -529,10 +529,9 @@ static int __init sys_info_init(enum arch_id arch) | |||
| 529 | return -EINVAL; | 529 | return -EINVAL; |
| 530 | } | 530 | } |
| 531 | sys_info.arch = arch; | 531 | sys_info.arch = arch; |
| 532 | if (init_cpu_info(arch)) { | 532 | rc = init_cpu_info(arch); |
| 533 | ERROR_MSG("get cpu info failed\n"); | 533 | if (rc) |
| 534 | return -ENOMEM; | 534 | return rc; |
| 535 | } | ||
| 536 | sys_info.mem_size = real_memory_size; | 535 | sys_info.mem_size = real_memory_size; |
| 537 | 536 | ||
| 538 | return 0; | 537 | return 0; |
| @@ -544,12 +543,12 @@ static int __init check_sdias(void) | |||
| 544 | 543 | ||
| 545 | rc = sclp_sdias_blk_count(); | 544 | rc = sclp_sdias_blk_count(); |
| 546 | if (rc < 0) { | 545 | if (rc < 0) { |
| 547 | ERROR_MSG("Could not determine HSA size\n"); | 546 | TRACE("Could not determine HSA size\n"); |
| 548 | return rc; | 547 | return rc; |
| 549 | } | 548 | } |
| 550 | act_hsa_size = (rc - 1) * PAGE_SIZE; | 549 | act_hsa_size = (rc - 1) * PAGE_SIZE; |
| 551 | if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { | 550 | if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { |
| 552 | ERROR_MSG("HSA size too small: %i\n", act_hsa_size); | 551 | TRACE("HSA size too small: %i\n", act_hsa_size); |
| 553 | return -EINVAL; | 552 | return -EINVAL; |
| 554 | } | 553 | } |
| 555 | return 0; | 554 | return 0; |
| @@ -590,16 +589,12 @@ static int __init zcore_init(void) | |||
| 590 | goto fail; | 589 | goto fail; |
| 591 | 590 | ||
| 592 | rc = check_sdias(); | 591 | rc = check_sdias(); |
| 593 | if (rc) { | 592 | if (rc) |
| 594 | ERROR_MSG("Dump initialization failed\n"); | ||
| 595 | goto fail; | 593 | goto fail; |
| 596 | } | ||
| 597 | 594 | ||
| 598 | rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); | 595 | rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); |
| 599 | if (rc) { | 596 | if (rc) |
| 600 | ERROR_MSG("sdial memcpy for arch id failed\n"); | ||
| 601 | goto fail; | 597 | goto fail; |
| 602 | } | ||
| 603 | 598 | ||
| 604 | #ifndef __s390x__ | 599 | #ifndef __s390x__ |
| 605 | if (arch == ARCH_S390X) { | 600 | if (arch == ARCH_S390X) { |
| @@ -610,10 +605,8 @@ static int __init zcore_init(void) | |||
| 610 | #endif | 605 | #endif |
| 611 | 606 | ||
| 612 | rc = sys_info_init(arch); | 607 | rc = sys_info_init(arch); |
| 613 | if (rc) { | 608 | if (rc) |
| 614 | ERROR_MSG("arch init failed\n"); | ||
| 615 | goto fail; | 609 | goto fail; |
| 616 | } | ||
| 617 | 610 | ||
| 618 | zcore_header_init(arch, &zcore_header); | 611 | zcore_header_init(arch, &zcore_header); |
| 619 | 612 | ||
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index cfaf77b320f5..91e9e3f3073a 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile | |||
| @@ -2,9 +2,11 @@ | |||
| 2 | # Makefile for the S/390 common i/o drivers | 2 | # Makefile for the S/390 common i/o drivers |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o | 5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ |
| 6 | fcx.o itcw.o | ||
| 6 | ccw_device-objs += device.o device_fsm.o device_ops.o | 7 | ccw_device-objs += device.o device_fsm.o device_ops.o |
| 7 | ccw_device-objs += device_id.o device_pgid.o device_status.o | 8 | ccw_device-objs += device_id.o device_pgid.o device_status.o |
| 8 | obj-y += ccw_device.o cmf.o | 9 | obj-y += ccw_device.o cmf.o |
| 10 | obj-$(CONFIG_CHSC_SCH) += chsc_sch.o | ||
| 9 | obj-$(CONFIG_CCWGROUP) += ccwgroup.o | 11 | obj-$(CONFIG_CCWGROUP) += ccwgroup.o |
| 10 | obj-$(CONFIG_QDIO) += qdio.o | 12 | obj-$(CONFIG_QDIO) += qdio.o |
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index b7a07a866291..fe6cea15bbaf 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/rcupdate.h> | 15 | #include <linux/rcupdate.h> |
| 16 | 16 | ||
| 17 | #include <asm/airq.h> | 17 | #include <asm/airq.h> |
| 18 | #include <asm/isc.h> | ||
| 18 | 19 | ||
| 19 | #include "cio.h" | 20 | #include "cio.h" |
| 20 | #include "cio_debug.h" | 21 | #include "cio_debug.h" |
| @@ -33,15 +34,15 @@ struct airq_t { | |||
| 33 | void *drv_data; | 34 | void *drv_data; |
| 34 | }; | 35 | }; |
| 35 | 36 | ||
| 36 | static union indicator_t indicators; | 37 | static union indicator_t indicators[MAX_ISC]; |
| 37 | static struct airq_t *airqs[NR_AIRQS]; | 38 | static struct airq_t *airqs[MAX_ISC][NR_AIRQS]; |
| 38 | 39 | ||
| 39 | static int register_airq(struct airq_t *airq) | 40 | static int register_airq(struct airq_t *airq, u8 isc) |
| 40 | { | 41 | { |
| 41 | int i; | 42 | int i; |
| 42 | 43 | ||
| 43 | for (i = 0; i < NR_AIRQS; i++) | 44 | for (i = 0; i < NR_AIRQS; i++) |
| 44 | if (!cmpxchg(&airqs[i], NULL, airq)) | 45 | if (!cmpxchg(&airqs[isc][i], NULL, airq)) |
| 45 | return i; | 46 | return i; |
| 46 | return -ENOMEM; | 47 | return -ENOMEM; |
| 47 | } | 48 | } |
| @@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq) | |||
| 50 | * s390_register_adapter_interrupt() - register adapter interrupt handler | 51 | * s390_register_adapter_interrupt() - register adapter interrupt handler |
| 51 | * @handler: adapter handler to be registered | 52 | * @handler: adapter handler to be registered |
| 52 | * @drv_data: driver data passed with each call to the handler | 53 | * @drv_data: driver data passed with each call to the handler |
| 54 | * @isc: isc for which the handler should be called | ||
| 53 | * | 55 | * |
| 54 | * Returns: | 56 | * Returns: |
| 55 | * Pointer to the indicator to be used on success | 57 | * Pointer to the indicator to be used on success |
| 56 | * ERR_PTR() if registration failed | 58 | * ERR_PTR() if registration failed |
| 57 | */ | 59 | */ |
| 58 | void *s390_register_adapter_interrupt(adapter_int_handler_t handler, | 60 | void *s390_register_adapter_interrupt(adapter_int_handler_t handler, |
| 59 | void *drv_data) | 61 | void *drv_data, u8 isc) |
| 60 | { | 62 | { |
| 61 | struct airq_t *airq; | 63 | struct airq_t *airq; |
| 62 | char dbf_txt[16]; | 64 | char dbf_txt[16]; |
| 63 | int ret; | 65 | int ret; |
| 64 | 66 | ||
| 67 | if (isc > MAX_ISC) | ||
| 68 | return ERR_PTR(-EINVAL); | ||
| 65 | airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); | 69 | airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); |
| 66 | if (!airq) { | 70 | if (!airq) { |
| 67 | ret = -ENOMEM; | 71 | ret = -ENOMEM; |
| @@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler, | |||
| 69 | } | 73 | } |
| 70 | airq->handler = handler; | 74 | airq->handler = handler; |
| 71 | airq->drv_data = drv_data; | 75 | airq->drv_data = drv_data; |
| 72 | ret = register_airq(airq); | 76 | |
| 73 | if (ret < 0) | 77 | ret = register_airq(airq, isc); |
| 74 | kfree(airq); | ||
| 75 | out: | 78 | out: |
| 76 | snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); | 79 | snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); |
| 77 | CIO_TRACE_EVENT(4, dbf_txt); | 80 | CIO_TRACE_EVENT(4, dbf_txt); |
| 78 | if (ret < 0) | 81 | if (ret < 0) { |
| 82 | kfree(airq); | ||
| 79 | return ERR_PTR(ret); | 83 | return ERR_PTR(ret); |
| 80 | else | 84 | } else |
| 81 | return &indicators.byte[ret]; | 85 | return &indicators[isc].byte[ret]; |
| 82 | } | 86 | } |
| 83 | EXPORT_SYMBOL(s390_register_adapter_interrupt); | 87 | EXPORT_SYMBOL(s390_register_adapter_interrupt); |
| 84 | 88 | ||
| 85 | /** | 89 | /** |
| 86 | * s390_unregister_adapter_interrupt - unregister adapter interrupt handler | 90 | * s390_unregister_adapter_interrupt - unregister adapter interrupt handler |
| 87 | * @ind: indicator for which the handler is to be unregistered | 91 | * @ind: indicator for which the handler is to be unregistered |
| 92 | * @isc: interruption subclass | ||
| 88 | */ | 93 | */ |
| 89 | void s390_unregister_adapter_interrupt(void *ind) | 94 | void s390_unregister_adapter_interrupt(void *ind, u8 isc) |
| 90 | { | 95 | { |
| 91 | struct airq_t *airq; | 96 | struct airq_t *airq; |
| 92 | char dbf_txt[16]; | 97 | char dbf_txt[16]; |
| 93 | int i; | 98 | int i; |
| 94 | 99 | ||
| 95 | i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); | 100 | i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]); |
| 96 | snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); | 101 | snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); |
| 97 | CIO_TRACE_EVENT(4, dbf_txt); | 102 | CIO_TRACE_EVENT(4, dbf_txt); |
| 98 | indicators.byte[i] = 0; | 103 | indicators[isc].byte[i] = 0; |
| 99 | airq = xchg(&airqs[i], NULL); | 104 | airq = xchg(&airqs[isc][i], NULL); |
| 100 | /* | 105 | /* |
| 101 | * Allow interrupts to complete. This will ensure that the airq handle | 106 | * Allow interrupts to complete. This will ensure that the airq handle |
| 102 | * is no longer referenced by any interrupt handler. | 107 | * is no longer referenced by any interrupt handler. |
| @@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt); | |||
| 108 | 113 | ||
| 109 | #define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) | 114 | #define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) |
| 110 | 115 | ||
| 111 | void do_adapter_IO(void) | 116 | void do_adapter_IO(u8 isc) |
| 112 | { | 117 | { |
| 113 | int w; | 118 | int w; |
| 114 | int i; | 119 | int i; |
| @@ -120,22 +125,22 @@ void do_adapter_IO(void) | |||
| 120 | * fetch operations. | 125 | * fetch operations. |
| 121 | */ | 126 | */ |
| 122 | for (w = 0; w < NR_AIRQ_WORDS; w++) { | 127 | for (w = 0; w < NR_AIRQ_WORDS; w++) { |
| 123 | word = indicators.word[w]; | 128 | word = indicators[isc].word[w]; |
| 124 | i = w * NR_AIRQS_PER_WORD; | 129 | i = w * NR_AIRQS_PER_WORD; |
| 125 | /* | 130 | /* |
| 126 | * Check bytes within word for active indicators. | 131 | * Check bytes within word for active indicators. |
| 127 | */ | 132 | */ |
| 128 | while (word) { | 133 | while (word) { |
| 129 | if (word & INDICATOR_MASK) { | 134 | if (word & INDICATOR_MASK) { |
| 130 | airq = airqs[i]; | 135 | airq = airqs[isc][i]; |
| 131 | if (likely(airq)) | 136 | if (likely(airq)) |
| 132 | airq->handler(&indicators.byte[i], | 137 | airq->handler(&indicators[isc].byte[i], |
| 133 | airq->drv_data); | 138 | airq->drv_data); |
| 134 | else | 139 | else |
| 135 | /* | 140 | /* |
| 136 | * Reset ill-behaved indicator. | 141 | * Reset ill-behaved indicator. |
| 137 | */ | 142 | */ |
| 138 | indicators.byte[i] = 0; | 143 | indicators[isc].byte[i] = 0; |
| 139 | } | 144 | } |
| 140 | word <<= 8; | 145 | word <<= 8; |
| 141 | i++; | 146 | i++; |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 297cdceb0ca4..db00b0591733 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <asm/chpid.h> | 18 | #include <asm/chpid.h> |
| 19 | #include <asm/sclp.h> | 19 | #include <asm/sclp.h> |
| 20 | 20 | ||
| 21 | #include "../s390mach.h" | ||
| 21 | #include "cio.h" | 22 | #include "cio.h" |
| 22 | #include "css.h" | 23 | #include "css.h" |
| 23 | #include "ioasm.h" | 24 | #include "ioasm.h" |
| @@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch) | |||
| 94 | } | 95 | } |
| 95 | return opm; | 96 | return opm; |
| 96 | } | 97 | } |
| 98 | EXPORT_SYMBOL_GPL(chp_get_sch_opm); | ||
| 97 | 99 | ||
| 98 | /** | 100 | /** |
| 99 | * chp_is_registered - check if a channel-path is registered | 101 | * chp_is_registered - check if a channel-path is registered |
| @@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on) | |||
| 121 | CIO_TRACE_EVENT(2, dbf_text); | 123 | CIO_TRACE_EVENT(2, dbf_text); |
| 122 | 124 | ||
| 123 | status = chp_get_status(chpid); | 125 | status = chp_get_status(chpid); |
| 124 | if (!on && !status) { | 126 | if (!on && !status) |
| 125 | printk(KERN_ERR "cio: chpid %x.%02x is already offline\n", | 127 | return 0; |
| 126 | chpid.cssid, chpid.id); | ||
| 127 | return -EINVAL; | ||
| 128 | } | ||
| 129 | 128 | ||
| 130 | set_chp_logically_online(chpid, on); | 129 | set_chp_logically_online(chpid, on); |
| 131 | chsc_chp_vary(chpid, on); | 130 | chsc_chp_vary(chpid, on); |
| @@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj, | |||
| 141 | { | 140 | { |
| 142 | struct channel_path *chp; | 141 | struct channel_path *chp; |
| 143 | struct device *device; | 142 | struct device *device; |
| 144 | unsigned int size; | ||
| 145 | 143 | ||
| 146 | device = container_of(kobj, struct device, kobj); | 144 | device = container_of(kobj, struct device, kobj); |
| 147 | chp = to_channelpath(device); | 145 | chp = to_channelpath(device); |
| 148 | if (!chp->cmg_chars) | 146 | if (!chp->cmg_chars) |
| 149 | return 0; | 147 | return 0; |
| 150 | 148 | ||
| 151 | size = sizeof(struct cmg_chars); | 149 | return memory_read_from_buffer(buf, count, &off, |
| 152 | 150 | chp->cmg_chars, sizeof(struct cmg_chars)); | |
| 153 | if (off > size) | ||
| 154 | return 0; | ||
| 155 | if (off + count > size) | ||
| 156 | count = size - off; | ||
| 157 | memcpy(buf, chp->cmg_chars + off, count); | ||
| 158 | return count; | ||
| 159 | } | 151 | } |
| 160 | 152 | ||
| 161 | static struct bin_attribute chp_measurement_chars_attr = { | 153 | static struct bin_attribute chp_measurement_chars_attr = { |
| @@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid) | |||
| 405 | chpid.id); | 397 | chpid.id); |
| 406 | 398 | ||
| 407 | /* Obtain channel path description and fill it in. */ | 399 | /* Obtain channel path description and fill it in. */ |
| 408 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | 400 | ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); |
| 409 | if (ret) | 401 | if (ret) |
| 410 | goto out_free; | 402 | goto out_free; |
| 411 | if ((chp->desc.flags & 0x80) == 0) { | 403 | if ((chp->desc.flags & 0x80) == 0) { |
| @@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid) | |||
| 413 | goto out_free; | 405 | goto out_free; |
| 414 | } | 406 | } |
| 415 | /* Get channel-measurement characteristics. */ | 407 | /* Get channel-measurement characteristics. */ |
| 416 | if (css_characteristics_avail && css_chsc_characteristics.scmc | 408 | if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) { |
| 417 | && css_chsc_characteristics.secm) { | ||
| 418 | ret = chsc_get_channel_measurement_chars(chp); | 409 | ret = chsc_get_channel_measurement_chars(chp); |
| 419 | if (ret) | 410 | if (ret) |
| 420 | goto out_free; | 411 | goto out_free; |
| @@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid) | |||
| 476 | 467 | ||
| 477 | /** | 468 | /** |
| 478 | * chp_process_crw - process channel-path status change | 469 | * chp_process_crw - process channel-path status change |
| 479 | * @id: channel-path ID number | 470 | * @crw0: channel report-word to handler |
| 480 | * @status: non-zero if channel-path has become available, zero otherwise | 471 | * @crw1: second channel-report word (always NULL) |
| 472 | * @overflow: crw overflow indication | ||
| 481 | * | 473 | * |
| 482 | * Handle channel-report-words indicating that the status of a channel-path | 474 | * Handle channel-report-words indicating that the status of a channel-path |
| 483 | * has changed. | 475 | * has changed. |
| 484 | */ | 476 | */ |
| 485 | void chp_process_crw(int id, int status) | 477 | static void chp_process_crw(struct crw *crw0, struct crw *crw1, |
| 478 | int overflow) | ||
| 486 | { | 479 | { |
| 487 | struct chp_id chpid; | 480 | struct chp_id chpid; |
| 488 | 481 | ||
| 482 | if (overflow) { | ||
| 483 | css_schedule_eval_all(); | ||
| 484 | return; | ||
| 485 | } | ||
| 486 | CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " | ||
| 487 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | ||
| 488 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, | ||
| 489 | crw0->erc, crw0->rsid); | ||
| 490 | /* | ||
| 491 | * Check for solicited machine checks. These are | ||
| 492 | * created by reset channel path and need not be | ||
| 493 | * handled here. | ||
| 494 | */ | ||
| 495 | if (crw0->slct) { | ||
| 496 | CIO_CRW_EVENT(2, "solicited machine check for " | ||
| 497 | "channel path %02X\n", crw0->rsid); | ||
| 498 | return; | ||
| 499 | } | ||
| 489 | chp_id_init(&chpid); | 500 | chp_id_init(&chpid); |
| 490 | chpid.id = id; | 501 | chpid.id = crw0->rsid; |
| 491 | if (status) { | 502 | switch (crw0->erc) { |
| 503 | case CRW_ERC_IPARM: /* Path has come. */ | ||
| 492 | if (!chp_is_registered(chpid)) | 504 | if (!chp_is_registered(chpid)) |
| 493 | chp_new(chpid); | 505 | chp_new(chpid); |
| 494 | chsc_chp_online(chpid); | 506 | chsc_chp_online(chpid); |
| 495 | } else | 507 | break; |
| 508 | case CRW_ERC_PERRI: /* Path has gone. */ | ||
| 509 | case CRW_ERC_PERRN: | ||
| 496 | chsc_chp_offline(chpid); | 510 | chsc_chp_offline(chpid); |
| 511 | break; | ||
| 512 | default: | ||
| 513 | CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n", | ||
| 514 | crw0->erc); | ||
| 515 | } | ||
| 497 | } | 516 | } |
| 498 | 517 | ||
| 518 | int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link) | ||
| 519 | { | ||
| 520 | int i; | ||
| 521 | int mask; | ||
| 522 | |||
| 523 | for (i = 0; i < 8; i++) { | ||
| 524 | mask = 0x80 >> i; | ||
| 525 | if (!(ssd->path_mask & mask)) | ||
| 526 | continue; | ||
| 527 | if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid)) | ||
| 528 | continue; | ||
| 529 | if ((ssd->fla_valid_mask & mask) && | ||
| 530 | ((ssd->fla[i] & link->fla_mask) != link->fla)) | ||
| 531 | continue; | ||
| 532 | return mask; | ||
| 533 | } | ||
| 534 | return 0; | ||
| 535 | } | ||
| 536 | EXPORT_SYMBOL_GPL(chp_ssd_get_mask); | ||
| 537 | |||
| 499 | static inline int info_bit_num(struct chp_id id) | 538 | static inline int info_bit_num(struct chp_id id) |
| 500 | { | 539 | { |
| 501 | return id.id + id.cssid * (__MAX_CHPID + 1); | 540 | return id.id + id.cssid * (__MAX_CHPID + 1); |
| @@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work) | |||
| 575 | { | 614 | { |
| 576 | struct chp_id chpid; | 615 | struct chp_id chpid; |
| 577 | enum cfg_task_t t; | 616 | enum cfg_task_t t; |
| 617 | int rc; | ||
| 578 | 618 | ||
| 579 | mutex_lock(&cfg_lock); | 619 | mutex_lock(&cfg_lock); |
| 580 | t = cfg_none; | 620 | t = cfg_none; |
| @@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work) | |||
| 589 | 629 | ||
| 590 | switch (t) { | 630 | switch (t) { |
| 591 | case cfg_configure: | 631 | case cfg_configure: |
| 592 | sclp_chp_configure(chpid); | 632 | rc = sclp_chp_configure(chpid); |
| 593 | info_expire(); | 633 | if (rc) |
| 594 | chsc_chp_online(chpid); | 634 | CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)=" |
| 635 | "%d\n", chpid.cssid, chpid.id, rc); | ||
| 636 | else { | ||
| 637 | info_expire(); | ||
| 638 | chsc_chp_online(chpid); | ||
| 639 | } | ||
| 595 | break; | 640 | break; |
| 596 | case cfg_deconfigure: | 641 | case cfg_deconfigure: |
| 597 | sclp_chp_deconfigure(chpid); | 642 | rc = sclp_chp_deconfigure(chpid); |
| 598 | info_expire(); | 643 | if (rc) |
| 599 | chsc_chp_offline(chpid); | 644 | CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)=" |
| 645 | "%d\n", chpid.cssid, chpid.id, rc); | ||
| 646 | else { | ||
| 647 | info_expire(); | ||
| 648 | chsc_chp_offline(chpid); | ||
| 649 | } | ||
| 600 | break; | 650 | break; |
| 601 | case cfg_none: | 651 | case cfg_none: |
| 602 | /* Get updated information after last change. */ | 652 | /* Get updated information after last change. */ |
| @@ -654,10 +704,16 @@ static int cfg_wait_idle(void) | |||
| 654 | static int __init chp_init(void) | 704 | static int __init chp_init(void) |
| 655 | { | 705 | { |
| 656 | struct chp_id chpid; | 706 | struct chp_id chpid; |
| 707 | int ret; | ||
| 657 | 708 | ||
| 709 | ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw); | ||
| 710 | if (ret) | ||
| 711 | return ret; | ||
| 658 | chp_wq = create_singlethread_workqueue("cio_chp"); | 712 | chp_wq = create_singlethread_workqueue("cio_chp"); |
| 659 | if (!chp_wq) | 713 | if (!chp_wq) { |
| 714 | s390_unregister_crw_handler(CRW_RSC_CPATH); | ||
| 660 | return -ENOMEM; | 715 | return -ENOMEM; |
| 716 | } | ||
| 661 | INIT_WORK(&cfg_work, cfg_func); | 717 | INIT_WORK(&cfg_work, cfg_func); |
| 662 | init_waitqueue_head(&cfg_wait_queue); | 718 | init_waitqueue_head(&cfg_wait_queue); |
| 663 | if (info_update()) | 719 | if (info_update()) |
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 65286563c592..26c3d2246176 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h | |||
| @@ -12,12 +12,24 @@ | |||
| 12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
| 13 | #include <asm/chpid.h> | 13 | #include <asm/chpid.h> |
| 14 | #include "chsc.h" | 14 | #include "chsc.h" |
| 15 | #include "css.h" | ||
| 15 | 16 | ||
| 16 | #define CHP_STATUS_STANDBY 0 | 17 | #define CHP_STATUS_STANDBY 0 |
| 17 | #define CHP_STATUS_CONFIGURED 1 | 18 | #define CHP_STATUS_CONFIGURED 1 |
| 18 | #define CHP_STATUS_RESERVED 2 | 19 | #define CHP_STATUS_RESERVED 2 |
| 19 | #define CHP_STATUS_NOT_RECOGNIZED 3 | 20 | #define CHP_STATUS_NOT_RECOGNIZED 3 |
| 20 | 21 | ||
| 22 | #define CHP_ONLINE 0 | ||
| 23 | #define CHP_OFFLINE 1 | ||
| 24 | #define CHP_VARY_ON 2 | ||
| 25 | #define CHP_VARY_OFF 3 | ||
| 26 | |||
| 27 | struct chp_link { | ||
| 28 | struct chp_id chpid; | ||
| 29 | u32 fla_mask; | ||
| 30 | u16 fla; | ||
| 31 | }; | ||
| 32 | |||
| 21 | static inline int chp_test_bit(u8 *bitmap, int num) | 33 | static inline int chp_test_bit(u8 *bitmap, int num) |
| 22 | { | 34 | { |
| 23 | int byte = num >> 3; | 35 | int byte = num >> 3; |
| @@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid); | |||
| 42 | u8 chp_get_sch_opm(struct subchannel *sch); | 54 | u8 chp_get_sch_opm(struct subchannel *sch); |
| 43 | int chp_is_registered(struct chp_id chpid); | 55 | int chp_is_registered(struct chp_id chpid); |
| 44 | void *chp_get_chp_desc(struct chp_id chpid); | 56 | void *chp_get_chp_desc(struct chp_id chpid); |
| 45 | void chp_process_crw(int id, int available); | ||
| 46 | void chp_remove_cmg_attr(struct channel_path *chp); | 57 | void chp_remove_cmg_attr(struct channel_path *chp); |
| 47 | int chp_add_cmg_attr(struct channel_path *chp); | 58 | int chp_add_cmg_attr(struct channel_path *chp); |
| 48 | int chp_new(struct chp_id chpid); | 59 | int chp_new(struct chp_id chpid); |
| 49 | void chp_cfg_schedule(struct chp_id chpid, int configure); | 60 | void chp_cfg_schedule(struct chp_id chpid, int configure); |
| 50 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); | 61 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); |
| 51 | int chp_info_get_status(struct chp_id chpid); | 62 | int chp_info_get_status(struct chp_id chpid); |
| 52 | 63 | int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *); | |
| 53 | #endif /* S390_CHP_H */ | 64 | #endif /* S390_CHP_H */ |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 5de86908b0d0..65264a38057d 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
| @@ -2,8 +2,7 @@ | |||
| 2 | * drivers/s390/cio/chsc.c | 2 | * drivers/s390/cio/chsc.c |
| 3 | * S/390 common I/O routines -- channel subsystem call | 3 | * S/390 common I/O routines -- channel subsystem call |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 1999,2008 |
| 6 | * IBM Corporation | ||
| 7 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
| 8 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
| 9 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
| @@ -16,7 +15,9 @@ | |||
| 16 | 15 | ||
| 17 | #include <asm/cio.h> | 16 | #include <asm/cio.h> |
| 18 | #include <asm/chpid.h> | 17 | #include <asm/chpid.h> |
| 18 | #include <asm/chsc.h> | ||
| 19 | 19 | ||
| 20 | #include "../s390mach.h" | ||
| 20 | #include "css.h" | 21 | #include "css.h" |
| 21 | #include "cio.h" | 22 | #include "cio.h" |
| 22 | #include "cio_debug.h" | 23 | #include "cio_debug.h" |
| @@ -127,77 +128,12 @@ out_free: | |||
| 127 | return ret; | 128 | return ret; |
| 128 | } | 129 | } |
| 129 | 130 | ||
| 130 | static int check_for_io_on_path(struct subchannel *sch, int mask) | ||
| 131 | { | ||
| 132 | int cc; | ||
| 133 | |||
| 134 | cc = stsch(sch->schid, &sch->schib); | ||
| 135 | if (cc) | ||
| 136 | return 0; | ||
| 137 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) | ||
| 138 | return 1; | ||
| 139 | return 0; | ||
| 140 | } | ||
| 141 | |||
| 142 | static void terminate_internal_io(struct subchannel *sch) | ||
| 143 | { | ||
| 144 | if (cio_clear(sch)) { | ||
| 145 | /* Recheck device in case clear failed. */ | ||
| 146 | sch->lpm = 0; | ||
| 147 | if (device_trigger_verify(sch) != 0) | ||
| 148 | css_schedule_eval(sch->schid); | ||
| 149 | return; | ||
| 150 | } | ||
| 151 | /* Request retry of internal operation. */ | ||
| 152 | device_set_intretry(sch); | ||
| 153 | /* Call handler. */ | ||
| 154 | if (sch->driver && sch->driver->termination) | ||
| 155 | sch->driver->termination(sch); | ||
| 156 | } | ||
| 157 | |||
| 158 | static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) | 131 | static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) |
| 159 | { | 132 | { |
| 160 | int j; | ||
| 161 | int mask; | ||
| 162 | struct chp_id *chpid = data; | ||
| 163 | struct schib schib; | ||
| 164 | |||
| 165 | for (j = 0; j < 8; j++) { | ||
| 166 | mask = 0x80 >> j; | ||
| 167 | if ((sch->schib.pmcw.pim & mask) && | ||
| 168 | (sch->schib.pmcw.chpid[j] == chpid->id)) | ||
| 169 | break; | ||
| 170 | } | ||
| 171 | if (j >= 8) | ||
| 172 | return 0; | ||
| 173 | |||
| 174 | spin_lock_irq(sch->lock); | 133 | spin_lock_irq(sch->lock); |
| 175 | 134 | if (sch->driver && sch->driver->chp_event) | |
| 176 | stsch(sch->schid, &schib); | 135 | if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) |
| 177 | if (!css_sch_is_valid(&schib)) | ||
| 178 | goto out_unreg; | ||
| 179 | memcpy(&sch->schib, &schib, sizeof(struct schib)); | ||
| 180 | /* Check for single path devices. */ | ||
| 181 | if (sch->schib.pmcw.pim == 0x80) | ||
| 182 | goto out_unreg; | ||
| 183 | |||
| 184 | if (check_for_io_on_path(sch, mask)) { | ||
| 185 | if (device_is_online(sch)) | ||
| 186 | device_kill_io(sch); | ||
| 187 | else { | ||
| 188 | terminate_internal_io(sch); | ||
| 189 | /* Re-start path verification. */ | ||
| 190 | if (sch->driver && sch->driver->verify) | ||
| 191 | sch->driver->verify(sch); | ||
| 192 | } | ||
| 193 | } else { | ||
| 194 | /* trigger path verification. */ | ||
| 195 | if (sch->driver && sch->driver->verify) | ||
| 196 | sch->driver->verify(sch); | ||
| 197 | else if (sch->lpm == mask) | ||
| 198 | goto out_unreg; | 136 | goto out_unreg; |
| 199 | } | ||
| 200 | |||
| 201 | spin_unlock_irq(sch->lock); | 137 | spin_unlock_irq(sch->lock); |
| 202 | return 0; | 138 | return 0; |
| 203 | 139 | ||
| @@ -211,15 +147,18 @@ out_unreg: | |||
| 211 | void chsc_chp_offline(struct chp_id chpid) | 147 | void chsc_chp_offline(struct chp_id chpid) |
| 212 | { | 148 | { |
| 213 | char dbf_txt[15]; | 149 | char dbf_txt[15]; |
| 150 | struct chp_link link; | ||
| 214 | 151 | ||
| 215 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); | 152 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); |
| 216 | CIO_TRACE_EVENT(2, dbf_txt); | 153 | CIO_TRACE_EVENT(2, dbf_txt); |
| 217 | 154 | ||
| 218 | if (chp_get_status(chpid) <= 0) | 155 | if (chp_get_status(chpid) <= 0) |
| 219 | return; | 156 | return; |
| 157 | memset(&link, 0, sizeof(struct chp_link)); | ||
| 158 | link.chpid = chpid; | ||
| 220 | /* Wait until previous actions have settled. */ | 159 | /* Wait until previous actions have settled. */ |
| 221 | css_wait_for_slow_path(); | 160 | css_wait_for_slow_path(); |
| 222 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); | 161 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); |
| 223 | } | 162 | } |
| 224 | 163 | ||
| 225 | static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) | 164 | static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) |
| @@ -242,67 +181,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) | |||
| 242 | return 0; | 181 | return 0; |
| 243 | } | 182 | } |
| 244 | 183 | ||
| 245 | struct res_acc_data { | ||
| 246 | struct chp_id chpid; | ||
| 247 | u32 fla_mask; | ||
| 248 | u16 fla; | ||
| 249 | }; | ||
| 250 | |||
| 251 | static int get_res_chpid_mask(struct chsc_ssd_info *ssd, | ||
| 252 | struct res_acc_data *data) | ||
| 253 | { | ||
| 254 | int i; | ||
| 255 | int mask; | ||
| 256 | |||
| 257 | for (i = 0; i < 8; i++) { | ||
| 258 | mask = 0x80 >> i; | ||
| 259 | if (!(ssd->path_mask & mask)) | ||
| 260 | continue; | ||
| 261 | if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) | ||
| 262 | continue; | ||
| 263 | if ((ssd->fla_valid_mask & mask) && | ||
| 264 | ((ssd->fla[i] & data->fla_mask) != data->fla)) | ||
| 265 | continue; | ||
| 266 | return mask; | ||
| 267 | } | ||
| 268 | return 0; | ||
| 269 | } | ||
| 270 | |||
| 271 | static int __s390_process_res_acc(struct subchannel *sch, void *data) | 184 | static int __s390_process_res_acc(struct subchannel *sch, void *data) |
| 272 | { | 185 | { |
| 273 | int chp_mask, old_lpm; | ||
| 274 | struct res_acc_data *res_data = data; | ||
| 275 | |||
| 276 | spin_lock_irq(sch->lock); | 186 | spin_lock_irq(sch->lock); |
| 277 | chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); | 187 | if (sch->driver && sch->driver->chp_event) |
| 278 | if (chp_mask == 0) | 188 | sch->driver->chp_event(sch, data, CHP_ONLINE); |
| 279 | goto out; | ||
| 280 | if (stsch(sch->schid, &sch->schib)) | ||
| 281 | goto out; | ||
| 282 | old_lpm = sch->lpm; | ||
| 283 | sch->lpm = ((sch->schib.pmcw.pim & | ||
| 284 | sch->schib.pmcw.pam & | ||
| 285 | sch->schib.pmcw.pom) | ||
| 286 | | chp_mask) & sch->opm; | ||
| 287 | if (!old_lpm && sch->lpm) | ||
| 288 | device_trigger_reprobe(sch); | ||
| 289 | else if (sch->driver && sch->driver->verify) | ||
| 290 | sch->driver->verify(sch); | ||
| 291 | out: | ||
| 292 | spin_unlock_irq(sch->lock); | 189 | spin_unlock_irq(sch->lock); |
| 293 | 190 | ||
| 294 | return 0; | 191 | return 0; |
| 295 | } | 192 | } |
| 296 | 193 | ||
| 297 | static void s390_process_res_acc (struct res_acc_data *res_data) | 194 | static void s390_process_res_acc(struct chp_link *link) |
| 298 | { | 195 | { |
| 299 | char dbf_txt[15]; | 196 | char dbf_txt[15]; |
| 300 | 197 | ||
| 301 | sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, | 198 | sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, |
| 302 | res_data->chpid.id); | 199 | link->chpid.id); |
| 303 | CIO_TRACE_EVENT( 2, dbf_txt); | 200 | CIO_TRACE_EVENT( 2, dbf_txt); |
| 304 | if (res_data->fla != 0) { | 201 | if (link->fla != 0) { |
| 305 | sprintf(dbf_txt, "fla%x", res_data->fla); | 202 | sprintf(dbf_txt, "fla%x", link->fla); |
| 306 | CIO_TRACE_EVENT( 2, dbf_txt); | 203 | CIO_TRACE_EVENT( 2, dbf_txt); |
| 307 | } | 204 | } |
| 308 | /* Wait until previous actions have settled. */ | 205 | /* Wait until previous actions have settled. */ |
| @@ -315,7 +212,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data) | |||
| 315 | * will we have to do. | 212 | * will we have to do. |
| 316 | */ | 213 | */ |
| 317 | for_each_subchannel_staged(__s390_process_res_acc, | 214 | for_each_subchannel_staged(__s390_process_res_acc, |
| 318 | s390_process_res_acc_new_sch, res_data); | 215 | s390_process_res_acc_new_sch, link); |
| 319 | } | 216 | } |
| 320 | 217 | ||
| 321 | static int | 218 | static int |
| @@ -388,7 +285,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | |||
| 388 | 285 | ||
| 389 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | 286 | static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) |
| 390 | { | 287 | { |
| 391 | struct res_acc_data res_data; | 288 | struct chp_link link; |
| 392 | struct chp_id chpid; | 289 | struct chp_id chpid; |
| 393 | int status; | 290 | int status; |
| 394 | 291 | ||
| @@ -404,18 +301,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | |||
| 404 | chp_new(chpid); | 301 | chp_new(chpid); |
| 405 | else if (!status) | 302 | else if (!status) |
| 406 | return; | 303 | return; |
| 407 | memset(&res_data, 0, sizeof(struct res_acc_data)); | 304 | memset(&link, 0, sizeof(struct chp_link)); |
| 408 | res_data.chpid = chpid; | 305 | link.chpid = chpid; |
| 409 | if ((sei_area->vf & 0xc0) != 0) { | 306 | if ((sei_area->vf & 0xc0) != 0) { |
| 410 | res_data.fla = sei_area->fla; | 307 | link.fla = sei_area->fla; |
| 411 | if ((sei_area->vf & 0xc0) == 0xc0) | 308 | if ((sei_area->vf & 0xc0) == 0xc0) |
| 412 | /* full link address */ | 309 | /* full link address */ |
| 413 | res_data.fla_mask = 0xffff; | 310 | link.fla_mask = 0xffff; |
| 414 | else | 311 | else |
| 415 | /* link address */ | 312 | /* link address */ |
| 416 | res_data.fla_mask = 0xff00; | 313 | link.fla_mask = 0xff00; |
| 417 | } | 314 | } |
| 418 | s390_process_res_acc(&res_data); | 315 | s390_process_res_acc(&link); |
| 419 | } | 316 | } |
| 420 | 317 | ||
| 421 | struct chp_config_data { | 318 | struct chp_config_data { |
| @@ -480,17 +377,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area) | |||
| 480 | } | 377 | } |
| 481 | } | 378 | } |
| 482 | 379 | ||
| 483 | void chsc_process_crw(void) | 380 | static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) |
| 484 | { | 381 | { |
| 485 | struct chsc_sei_area *sei_area; | 382 | struct chsc_sei_area *sei_area; |
| 486 | 383 | ||
| 384 | if (overflow) { | ||
| 385 | css_schedule_eval_all(); | ||
| 386 | return; | ||
| 387 | } | ||
| 388 | CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " | ||
| 389 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | ||
| 390 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, | ||
| 391 | crw0->erc, crw0->rsid); | ||
| 487 | if (!sei_page) | 392 | if (!sei_page) |
| 488 | return; | 393 | return; |
| 489 | /* Access to sei_page is serialized through machine check handler | 394 | /* Access to sei_page is serialized through machine check handler |
| 490 | * thread, so no need for locking. */ | 395 | * thread, so no need for locking. */ |
| 491 | sei_area = sei_page; | 396 | sei_area = sei_page; |
| 492 | 397 | ||
| 493 | CIO_TRACE_EVENT( 2, "prcss"); | 398 | CIO_TRACE_EVENT(2, "prcss"); |
| 494 | do { | 399 | do { |
| 495 | memset(sei_area, 0, sizeof(*sei_area)); | 400 | memset(sei_area, 0, sizeof(*sei_area)); |
| 496 | sei_area->request.length = 0x0010; | 401 | sei_area->request.length = 0x0010; |
| @@ -509,114 +414,36 @@ void chsc_process_crw(void) | |||
| 509 | } while (sei_area->flags & 0x80); | 414 | } while (sei_area->flags & 0x80); |
| 510 | } | 415 | } |
| 511 | 416 | ||
| 512 | static int __chp_add_new_sch(struct subchannel_id schid, void *data) | ||
| 513 | { | ||
| 514 | struct schib schib; | ||
| 515 | |||
| 516 | if (stsch_err(schid, &schib)) | ||
| 517 | /* We're through */ | ||
| 518 | return -ENXIO; | ||
| 519 | |||
| 520 | /* Put it on the slow path. */ | ||
| 521 | css_schedule_eval(schid); | ||
| 522 | return 0; | ||
| 523 | } | ||
| 524 | |||
| 525 | |||
| 526 | static int __chp_add(struct subchannel *sch, void *data) | ||
| 527 | { | ||
| 528 | int i, mask; | ||
| 529 | struct chp_id *chpid = data; | ||
| 530 | |||
| 531 | spin_lock_irq(sch->lock); | ||
| 532 | for (i=0; i<8; i++) { | ||
| 533 | mask = 0x80 >> i; | ||
| 534 | if ((sch->schib.pmcw.pim & mask) && | ||
| 535 | (sch->schib.pmcw.chpid[i] == chpid->id)) | ||
| 536 | break; | ||
| 537 | } | ||
| 538 | if (i==8) { | ||
| 539 | spin_unlock_irq(sch->lock); | ||
| 540 | return 0; | ||
| 541 | } | ||
| 542 | if (stsch(sch->schid, &sch->schib)) { | ||
| 543 | spin_unlock_irq(sch->lock); | ||
| 544 | css_schedule_eval(sch->schid); | ||
| 545 | return 0; | ||
| 546 | } | ||
| 547 | sch->lpm = ((sch->schib.pmcw.pim & | ||
| 548 | sch->schib.pmcw.pam & | ||
| 549 | sch->schib.pmcw.pom) | ||
| 550 | | mask) & sch->opm; | ||
| 551 | |||
| 552 | if (sch->driver && sch->driver->verify) | ||
| 553 | sch->driver->verify(sch); | ||
| 554 | |||
| 555 | spin_unlock_irq(sch->lock); | ||
| 556 | |||
| 557 | return 0; | ||
| 558 | } | ||
| 559 | |||
| 560 | void chsc_chp_online(struct chp_id chpid) | 417 | void chsc_chp_online(struct chp_id chpid) |
| 561 | { | 418 | { |
| 562 | char dbf_txt[15]; | 419 | char dbf_txt[15]; |
| 420 | struct chp_link link; | ||
| 563 | 421 | ||
| 564 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); | 422 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); |
| 565 | CIO_TRACE_EVENT(2, dbf_txt); | 423 | CIO_TRACE_EVENT(2, dbf_txt); |
| 566 | 424 | ||
| 567 | if (chp_get_status(chpid) != 0) { | 425 | if (chp_get_status(chpid) != 0) { |
| 426 | memset(&link, 0, sizeof(struct chp_link)); | ||
| 427 | link.chpid = chpid; | ||
| 568 | /* Wait until previous actions have settled. */ | 428 | /* Wait until previous actions have settled. */ |
| 569 | css_wait_for_slow_path(); | 429 | css_wait_for_slow_path(); |
| 570 | for_each_subchannel_staged(__chp_add, __chp_add_new_sch, | 430 | for_each_subchannel_staged(__s390_process_res_acc, NULL, |
| 571 | &chpid); | 431 | &link); |
| 572 | } | 432 | } |
| 573 | } | 433 | } |
| 574 | 434 | ||
| 575 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, | 435 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
| 576 | struct chp_id chpid, int on) | 436 | struct chp_id chpid, int on) |
| 577 | { | 437 | { |
| 578 | int chp, old_lpm; | ||
| 579 | int mask; | ||
| 580 | unsigned long flags; | 438 | unsigned long flags; |
| 439 | struct chp_link link; | ||
| 581 | 440 | ||
| 441 | memset(&link, 0, sizeof(struct chp_link)); | ||
| 442 | link.chpid = chpid; | ||
| 582 | spin_lock_irqsave(sch->lock, flags); | 443 | spin_lock_irqsave(sch->lock, flags); |
| 583 | old_lpm = sch->lpm; | 444 | if (sch->driver && sch->driver->chp_event) |
| 584 | for (chp = 0; chp < 8; chp++) { | 445 | sch->driver->chp_event(sch, &link, |
| 585 | mask = 0x80 >> chp; | 446 | on ? CHP_VARY_ON : CHP_VARY_OFF); |
| 586 | if (!(sch->ssd_info.path_mask & mask)) | ||
| 587 | continue; | ||
| 588 | if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) | ||
| 589 | continue; | ||
| 590 | |||
| 591 | if (on) { | ||
| 592 | sch->opm |= mask; | ||
| 593 | sch->lpm |= mask; | ||
| 594 | if (!old_lpm) | ||
| 595 | device_trigger_reprobe(sch); | ||
| 596 | else if (sch->driver && sch->driver->verify) | ||
| 597 | sch->driver->verify(sch); | ||
| 598 | break; | ||
| 599 | } | ||
| 600 | sch->opm &= ~mask; | ||
| 601 | sch->lpm &= ~mask; | ||
| 602 | if (check_for_io_on_path(sch, mask)) { | ||
| 603 | if (device_is_online(sch)) | ||
| 604 | /* Path verification is done after killing. */ | ||
| 605 | device_kill_io(sch); | ||
| 606 | else { | ||
| 607 | /* Kill and retry internal I/O. */ | ||
| 608 | terminate_internal_io(sch); | ||
| 609 | /* Re-start path verification. */ | ||
| 610 | if (sch->driver && sch->driver->verify) | ||
| 611 | sch->driver->verify(sch); | ||
| 612 | } | ||
| 613 | } else if (!sch->lpm) { | ||
| 614 | if (device_trigger_verify(sch) != 0) | ||
| 615 | css_schedule_eval(sch->schid); | ||
| 616 | } else if (sch->driver && sch->driver->verify) | ||
| 617 | sch->driver->verify(sch); | ||
| 618 | break; | ||
| 619 | } | ||
| 620 | spin_unlock_irqrestore(sch->lock, flags); | 447 | spin_unlock_irqrestore(sch->lock, flags); |
| 621 | } | 448 | } |
| 622 | 449 | ||
| @@ -656,6 +483,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) | |||
| 656 | */ | 483 | */ |
| 657 | int chsc_chp_vary(struct chp_id chpid, int on) | 484 | int chsc_chp_vary(struct chp_id chpid, int on) |
| 658 | { | 485 | { |
| 486 | struct chp_link link; | ||
| 487 | |||
| 488 | memset(&link, 0, sizeof(struct chp_link)); | ||
| 489 | link.chpid = chpid; | ||
| 659 | /* Wait until previous actions have settled. */ | 490 | /* Wait until previous actions have settled. */ |
| 660 | css_wait_for_slow_path(); | 491 | css_wait_for_slow_path(); |
| 661 | /* | 492 | /* |
| @@ -664,10 +495,10 @@ int chsc_chp_vary(struct chp_id chpid, int on) | |||
| 664 | 495 | ||
| 665 | if (on) | 496 | if (on) |
| 666 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, | 497 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, |
| 667 | __s390_vary_chpid_on, &chpid); | 498 | __s390_vary_chpid_on, &link); |
| 668 | else | 499 | else |
| 669 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, | 500 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, |
| 670 | NULL, &chpid); | 501 | NULL, &link); |
| 671 | 502 | ||
| 672 | return 0; | 503 | return 0; |
| 673 | } | 504 | } |
| @@ -797,23 +628,33 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
| 797 | return ret; | 628 | return ret; |
| 798 | } | 629 | } |
| 799 | 630 | ||
| 800 | int chsc_determine_channel_path_description(struct chp_id chpid, | 631 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
| 801 | struct channel_path_desc *desc) | 632 | int c, int m, |
| 633 | struct chsc_response_struct *resp) | ||
| 802 | { | 634 | { |
| 803 | int ccode, ret; | 635 | int ccode, ret; |
| 804 | 636 | ||
| 805 | struct { | 637 | struct { |
| 806 | struct chsc_header request; | 638 | struct chsc_header request; |
| 807 | u32 : 24; | 639 | u32 : 2; |
| 640 | u32 m : 1; | ||
| 641 | u32 c : 1; | ||
| 642 | u32 fmt : 4; | ||
| 643 | u32 cssid : 8; | ||
| 644 | u32 : 4; | ||
| 645 | u32 rfmt : 4; | ||
| 808 | u32 first_chpid : 8; | 646 | u32 first_chpid : 8; |
| 809 | u32 : 24; | 647 | u32 : 24; |
| 810 | u32 last_chpid : 8; | 648 | u32 last_chpid : 8; |
| 811 | u32 zeroes1; | 649 | u32 zeroes1; |
| 812 | struct chsc_header response; | 650 | struct chsc_header response; |
| 813 | u32 zeroes2; | 651 | u8 data[PAGE_SIZE - 20]; |
| 814 | struct channel_path_desc desc; | ||
| 815 | } __attribute__ ((packed)) *scpd_area; | 652 | } __attribute__ ((packed)) *scpd_area; |
| 816 | 653 | ||
| 654 | if ((rfmt == 1) && !css_general_characteristics.fcs) | ||
| 655 | return -EINVAL; | ||
| 656 | if ((rfmt == 2) && !css_general_characteristics.cib) | ||
| 657 | return -EINVAL; | ||
| 817 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 658 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
| 818 | if (!scpd_area) | 659 | if (!scpd_area) |
| 819 | return -ENOMEM; | 660 | return -ENOMEM; |
| @@ -821,8 +662,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid, | |||
| 821 | scpd_area->request.length = 0x0010; | 662 | scpd_area->request.length = 0x0010; |
| 822 | scpd_area->request.code = 0x0002; | 663 | scpd_area->request.code = 0x0002; |
| 823 | 664 | ||
| 665 | scpd_area->cssid = chpid.cssid; | ||
| 824 | scpd_area->first_chpid = chpid.id; | 666 | scpd_area->first_chpid = chpid.id; |
| 825 | scpd_area->last_chpid = chpid.id; | 667 | scpd_area->last_chpid = chpid.id; |
| 668 | scpd_area->m = m; | ||
| 669 | scpd_area->c = c; | ||
| 670 | scpd_area->fmt = fmt; | ||
| 671 | scpd_area->rfmt = rfmt; | ||
| 826 | 672 | ||
| 827 | ccode = chsc(scpd_area); | 673 | ccode = chsc(scpd_area); |
| 828 | if (ccode > 0) { | 674 | if (ccode > 0) { |
| @@ -833,8 +679,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid, | |||
| 833 | ret = chsc_error_from_response(scpd_area->response.code); | 679 | ret = chsc_error_from_response(scpd_area->response.code); |
| 834 | if (ret == 0) | 680 | if (ret == 0) |
| 835 | /* Success. */ | 681 | /* Success. */ |
| 836 | memcpy(desc, &scpd_area->desc, | 682 | memcpy(resp, &scpd_area->response, scpd_area->response.length); |
| 837 | sizeof(struct channel_path_desc)); | ||
| 838 | else | 683 | else |
| 839 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", | 684 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", |
| 840 | scpd_area->response.code); | 685 | scpd_area->response.code); |
| @@ -842,6 +687,25 @@ out: | |||
| 842 | free_page((unsigned long)scpd_area); | 687 | free_page((unsigned long)scpd_area); |
| 843 | return ret; | 688 | return ret; |
| 844 | } | 689 | } |
| 690 | EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); | ||
| 691 | |||
| 692 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, | ||
| 693 | struct channel_path_desc *desc) | ||
| 694 | { | ||
| 695 | struct chsc_response_struct *chsc_resp; | ||
| 696 | int ret; | ||
| 697 | |||
| 698 | chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); | ||
| 699 | if (!chsc_resp) | ||
| 700 | return -ENOMEM; | ||
| 701 | ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); | ||
| 702 | if (ret) | ||
| 703 | goto out_free; | ||
| 704 | memcpy(desc, &chsc_resp->data, chsc_resp->length); | ||
| 705 | out_free: | ||
| 706 | kfree(chsc_resp); | ||
| 707 | return ret; | ||
| 708 | } | ||
| 845 | 709 | ||
| 846 | static void | 710 | static void |
| 847 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | 711 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, |
| @@ -937,15 +801,23 @@ out: | |||
| 937 | 801 | ||
| 938 | int __init chsc_alloc_sei_area(void) | 802 | int __init chsc_alloc_sei_area(void) |
| 939 | { | 803 | { |
| 804 | int ret; | ||
| 805 | |||
| 940 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 806 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
| 941 | if (!sei_page) | 807 | if (!sei_page) { |
| 942 | CIO_MSG_EVENT(0, "Can't allocate page for processing of " | 808 | CIO_MSG_EVENT(0, "Can't allocate page for processing of " |
| 943 | "chsc machine checks!\n"); | 809 | "chsc machine checks!\n"); |
| 944 | return (sei_page ? 0 : -ENOMEM); | 810 | return -ENOMEM; |
| 811 | } | ||
| 812 | ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw); | ||
| 813 | if (ret) | ||
| 814 | kfree(sei_page); | ||
| 815 | return ret; | ||
| 945 | } | 816 | } |
| 946 | 817 | ||
| 947 | void __init chsc_free_sei_area(void) | 818 | void __init chsc_free_sei_area(void) |
| 948 | { | 819 | { |
| 820 | s390_unregister_crw_handler(CRW_RSC_CSS); | ||
| 949 | kfree(sei_page); | 821 | kfree(sei_page); |
| 950 | } | 822 | } |
| 951 | 823 | ||
| @@ -1043,3 +915,52 @@ exit: | |||
| 1043 | 915 | ||
| 1044 | EXPORT_SYMBOL_GPL(css_general_characteristics); | 916 | EXPORT_SYMBOL_GPL(css_general_characteristics); |
| 1045 | EXPORT_SYMBOL_GPL(css_chsc_characteristics); | 917 | EXPORT_SYMBOL_GPL(css_chsc_characteristics); |
| 918 | |||
| 919 | int chsc_sstpc(void *page, unsigned int op, u16 ctrl) | ||
| 920 | { | ||
| 921 | struct { | ||
| 922 | struct chsc_header request; | ||
| 923 | unsigned int rsvd0; | ||
| 924 | unsigned int op : 8; | ||
| 925 | unsigned int rsvd1 : 8; | ||
| 926 | unsigned int ctrl : 16; | ||
| 927 | unsigned int rsvd2[5]; | ||
| 928 | struct chsc_header response; | ||
| 929 | unsigned int rsvd3[7]; | ||
| 930 | } __attribute__ ((packed)) *rr; | ||
| 931 | int rc; | ||
| 932 | |||
| 933 | memset(page, 0, PAGE_SIZE); | ||
| 934 | rr = page; | ||
| 935 | rr->request.length = 0x0020; | ||
| 936 | rr->request.code = 0x0033; | ||
| 937 | rr->op = op; | ||
| 938 | rr->ctrl = ctrl; | ||
| 939 | rc = chsc(rr); | ||
| 940 | if (rc) | ||
| 941 | return -EIO; | ||
| 942 | rc = (rr->response.code == 0x0001) ? 0 : -EIO; | ||
| 943 | return rc; | ||
| 944 | } | ||
| 945 | |||
| 946 | int chsc_sstpi(void *page, void *result, size_t size) | ||
| 947 | { | ||
| 948 | struct { | ||
| 949 | struct chsc_header request; | ||
| 950 | unsigned int rsvd0[3]; | ||
| 951 | struct chsc_header response; | ||
| 952 | char data[size]; | ||
| 953 | } __attribute__ ((packed)) *rr; | ||
| 954 | int rc; | ||
| 955 | |||
| 956 | memset(page, 0, PAGE_SIZE); | ||
| 957 | rr = page; | ||
| 958 | rr->request.length = 0x0010; | ||
| 959 | rr->request.code = 0x0038; | ||
| 960 | rc = chsc(rr); | ||
| 961 | if (rc) | ||
| 962 | return -EIO; | ||
| 963 | memcpy(result, &rr->data, size); | ||
| 964 | return (rr->response.code == 0x0001) ? 0 : -EIO; | ||
| 965 | } | ||
| 966 | |||
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index d1f5db1e69b9..fb6c4d6c45b4 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
| @@ -4,7 +4,8 @@ | |||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
| 6 | #include <asm/chpid.h> | 6 | #include <asm/chpid.h> |
| 7 | #include "schid.h" | 7 | #include <asm/chsc.h> |
| 8 | #include <asm/schid.h> | ||
| 8 | 9 | ||
| 9 | #define CHSC_SDA_OC_MSS 0x2 | 10 | #define CHSC_SDA_OC_MSS 0x2 |
| 10 | 11 | ||
| @@ -36,14 +37,15 @@ struct channel_path_desc { | |||
| 36 | 37 | ||
| 37 | struct channel_path; | 38 | struct channel_path; |
| 38 | 39 | ||
| 39 | extern void chsc_process_crw(void); | ||
| 40 | |||
| 41 | struct css_general_char { | 40 | struct css_general_char { |
| 42 | u64 : 41; | 41 | u64 : 12; |
| 42 | u32 dynio : 1; /* bit 12 */ | ||
| 43 | u32 : 28; | ||
| 43 | u32 aif : 1; /* bit 41 */ | 44 | u32 aif : 1; /* bit 41 */ |
| 44 | u32 : 3; | 45 | u32 : 3; |
| 45 | u32 mcss : 1; /* bit 45 */ | 46 | u32 mcss : 1; /* bit 45 */ |
| 46 | u32 : 2; | 47 | u32 fcs : 1; /* bit 46 */ |
| 48 | u32 : 1; | ||
| 47 | u32 ext_mb : 1; /* bit 48 */ | 49 | u32 ext_mb : 1; /* bit 48 */ |
| 48 | u32 : 7; | 50 | u32 : 7; |
| 49 | u32 aif_tdd : 1; /* bit 56 */ | 51 | u32 aif_tdd : 1; /* bit 56 */ |
| @@ -51,7 +53,11 @@ struct css_general_char { | |||
| 51 | u32 qebsm : 1; /* bit 58 */ | 53 | u32 qebsm : 1; /* bit 58 */ |
| 52 | u32 : 8; | 54 | u32 : 8; |
| 53 | u32 aif_osa : 1; /* bit 67 */ | 55 | u32 aif_osa : 1; /* bit 67 */ |
| 54 | u32 : 28; | 56 | u32 : 14; |
| 57 | u32 cib : 1; /* bit 82 */ | ||
| 58 | u32 : 5; | ||
| 59 | u32 fcx : 1; /* bit 88 */ | ||
| 60 | u32 : 7; | ||
| 55 | }__attribute__((packed)); | 61 | }__attribute__((packed)); |
| 56 | 62 | ||
| 57 | struct css_chsc_char { | 63 | struct css_chsc_char { |
| @@ -78,7 +84,6 @@ struct chsc_ssd_info { | |||
| 78 | extern int chsc_get_ssd_info(struct subchannel_id schid, | 84 | extern int chsc_get_ssd_info(struct subchannel_id schid, |
| 79 | struct chsc_ssd_info *ssd); | 85 | struct chsc_ssd_info *ssd); |
| 80 | extern int chsc_determine_css_characteristics(void); | 86 | extern int chsc_determine_css_characteristics(void); |
| 81 | extern int css_characteristics_avail; | ||
| 82 | extern int chsc_alloc_sei_area(void); | 87 | extern int chsc_alloc_sei_area(void); |
| 83 | extern void chsc_free_sei_area(void); | 88 | extern void chsc_free_sei_area(void); |
| 84 | 89 | ||
| @@ -87,8 +92,11 @@ struct channel_subsystem; | |||
| 87 | extern int chsc_secm(struct channel_subsystem *, int); | 92 | extern int chsc_secm(struct channel_subsystem *, int); |
| 88 | 93 | ||
| 89 | int chsc_chp_vary(struct chp_id chpid, int on); | 94 | int chsc_chp_vary(struct chp_id chpid, int on); |
| 90 | int chsc_determine_channel_path_description(struct chp_id chpid, | 95 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
| 91 | struct channel_path_desc *desc); | 96 | int c, int m, |
| 97 | struct chsc_response_struct *resp); | ||
| 98 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, | ||
| 99 | struct channel_path_desc *desc); | ||
| 92 | void chsc_chp_online(struct chp_id chpid); | 100 | void chsc_chp_online(struct chp_id chpid); |
| 93 | void chsc_chp_offline(struct chp_id chpid); | 101 | void chsc_chp_offline(struct chp_id chpid); |
| 94 | int chsc_get_channel_measurement_chars(struct channel_path *chp); | 102 | int chsc_get_channel_measurement_chars(struct channel_path *chp); |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c new file mode 100644 index 000000000000..91ca87aa9f97 --- /dev/null +++ b/drivers/s390/cio/chsc_sch.c | |||
| @@ -0,0 +1,820 @@ | |||
| 1 | /* | ||
| 2 | * Driver for s390 chsc subchannels | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | ||
| 6 | * | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/device.h> | ||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/uaccess.h> | ||
| 12 | #include <linux/miscdevice.h> | ||
| 13 | |||
| 14 | #include <asm/cio.h> | ||
| 15 | #include <asm/chsc.h> | ||
| 16 | #include <asm/isc.h> | ||
| 17 | |||
| 18 | #include "cio.h" | ||
| 19 | #include "cio_debug.h" | ||
| 20 | #include "css.h" | ||
| 21 | #include "chsc_sch.h" | ||
| 22 | #include "ioasm.h" | ||
| 23 | |||
| 24 | static debug_info_t *chsc_debug_msg_id; | ||
| 25 | static debug_info_t *chsc_debug_log_id; | ||
| 26 | |||
| 27 | #define CHSC_MSG(imp, args...) do { \ | ||
| 28 | debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ | ||
| 29 | } while (0) | ||
| 30 | |||
| 31 | #define CHSC_LOG(imp, txt) do { \ | ||
| 32 | debug_text_event(chsc_debug_log_id, imp , txt); \ | ||
| 33 | } while (0) | ||
| 34 | |||
| 35 | static void CHSC_LOG_HEX(int level, void *data, int length) | ||
| 36 | { | ||
| 37 | while (length > 0) { | ||
| 38 | debug_event(chsc_debug_log_id, level, data, length); | ||
| 39 | length -= chsc_debug_log_id->buf_size; | ||
| 40 | data += chsc_debug_log_id->buf_size; | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | MODULE_AUTHOR("IBM Corporation"); | ||
| 45 | MODULE_DESCRIPTION("driver for s390 chsc subchannels"); | ||
| 46 | MODULE_LICENSE("GPL"); | ||
| 47 | |||
| 48 | static void chsc_subchannel_irq(struct subchannel *sch) | ||
| 49 | { | ||
| 50 | struct chsc_private *private = sch->private; | ||
| 51 | struct chsc_request *request = private->request; | ||
| 52 | struct irb *irb = (struct irb *)__LC_IRB; | ||
| 53 | |||
| 54 | CHSC_LOG(4, "irb"); | ||
| 55 | CHSC_LOG_HEX(4, irb, sizeof(*irb)); | ||
| 56 | /* Copy irb to provided request and set done. */ | ||
| 57 | if (!request) { | ||
| 58 | CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", | ||
| 59 | sch->schid.ssid, sch->schid.sch_no); | ||
| 60 | return; | ||
| 61 | } | ||
| 62 | private->request = NULL; | ||
| 63 | memcpy(&request->irb, irb, sizeof(*irb)); | ||
| 64 | stsch(sch->schid, &sch->schib); | ||
| 65 | complete(&request->completion); | ||
| 66 | put_device(&sch->dev); | ||
| 67 | } | ||
| 68 | |||
| 69 | static int chsc_subchannel_probe(struct subchannel *sch) | ||
| 70 | { | ||
| 71 | struct chsc_private *private; | ||
| 72 | int ret; | ||
| 73 | |||
| 74 | CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", | ||
| 75 | sch->schid.ssid, sch->schid.sch_no); | ||
| 76 | sch->isc = CHSC_SCH_ISC; | ||
| 77 | private = kzalloc(sizeof(*private), GFP_KERNEL); | ||
| 78 | if (!private) | ||
| 79 | return -ENOMEM; | ||
| 80 | ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); | ||
| 81 | if (ret) { | ||
| 82 | CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", | ||
| 83 | sch->schid.ssid, sch->schid.sch_no, ret); | ||
| 84 | kfree(private); | ||
| 85 | } else { | ||
| 86 | sch->private = private; | ||
| 87 | if (sch->dev.uevent_suppress) { | ||
| 88 | sch->dev.uevent_suppress = 0; | ||
| 89 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); | ||
| 90 | } | ||
| 91 | } | ||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | |||
| 95 | static int chsc_subchannel_remove(struct subchannel *sch) | ||
| 96 | { | ||
| 97 | struct chsc_private *private; | ||
| 98 | |||
| 99 | cio_disable_subchannel(sch); | ||
| 100 | private = sch->private; | ||
| 101 | sch->private = NULL; | ||
| 102 | if (private->request) { | ||
| 103 | complete(&private->request->completion); | ||
| 104 | put_device(&sch->dev); | ||
| 105 | } | ||
| 106 | kfree(private); | ||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | |||
| 110 | static void chsc_subchannel_shutdown(struct subchannel *sch) | ||
| 111 | { | ||
| 112 | cio_disable_subchannel(sch); | ||
| 113 | } | ||
| 114 | |||
| 115 | static struct css_device_id chsc_subchannel_ids[] = { | ||
| 116 | { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, | ||
| 117 | { /* end of list */ }, | ||
| 118 | }; | ||
| 119 | MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); | ||
| 120 | |||
| 121 | static struct css_driver chsc_subchannel_driver = { | ||
| 122 | .owner = THIS_MODULE, | ||
| 123 | .subchannel_type = chsc_subchannel_ids, | ||
| 124 | .irq = chsc_subchannel_irq, | ||
| 125 | .probe = chsc_subchannel_probe, | ||
| 126 | .remove = chsc_subchannel_remove, | ||
| 127 | .shutdown = chsc_subchannel_shutdown, | ||
| 128 | .name = "chsc_subchannel", | ||
| 129 | }; | ||
| 130 | |||
| 131 | static int __init chsc_init_dbfs(void) | ||
| 132 | { | ||
| 133 | chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, | ||
| 134 | 16 * sizeof(long)); | ||
| 135 | if (!chsc_debug_msg_id) | ||
| 136 | goto out; | ||
| 137 | debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); | ||
| 138 | debug_set_level(chsc_debug_msg_id, 2); | ||
| 139 | chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); | ||
| 140 | if (!chsc_debug_log_id) | ||
| 141 | goto out; | ||
| 142 | debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); | ||
| 143 | debug_set_level(chsc_debug_log_id, 2); | ||
| 144 | return 0; | ||
| 145 | out: | ||
| 146 | if (chsc_debug_msg_id) | ||
| 147 | debug_unregister(chsc_debug_msg_id); | ||
| 148 | return -ENOMEM; | ||
| 149 | } | ||
| 150 | |||
| 151 | static void chsc_remove_dbfs(void) | ||
| 152 | { | ||
| 153 | debug_unregister(chsc_debug_log_id); | ||
| 154 | debug_unregister(chsc_debug_msg_id); | ||
| 155 | } | ||
| 156 | |||
| 157 | static int __init chsc_init_sch_driver(void) | ||
| 158 | { | ||
| 159 | return css_driver_register(&chsc_subchannel_driver); | ||
| 160 | } | ||
| 161 | |||
| 162 | static void chsc_cleanup_sch_driver(void) | ||
| 163 | { | ||
| 164 | css_driver_unregister(&chsc_subchannel_driver); | ||
| 165 | } | ||
| 166 | |||
| 167 | static DEFINE_SPINLOCK(chsc_lock); | ||
| 168 | |||
| 169 | static int chsc_subchannel_match_next_free(struct device *dev, void *data) | ||
| 170 | { | ||
| 171 | struct subchannel *sch = to_subchannel(dev); | ||
| 172 | |||
| 173 | return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); | ||
| 174 | } | ||
| 175 | |||
| 176 | static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) | ||
| 177 | { | ||
| 178 | struct device *dev; | ||
| 179 | |||
| 180 | dev = driver_find_device(&chsc_subchannel_driver.drv, | ||
| 181 | sch ? &sch->dev : NULL, NULL, | ||
| 182 | chsc_subchannel_match_next_free); | ||
| 183 | return dev ? to_subchannel(dev) : NULL; | ||
| 184 | } | ||
| 185 | |||
| 186 | /** | ||
| 187 | * chsc_async() - try to start a chsc request asynchronously | ||
| 188 | * @chsc_area: request to be started | ||
| 189 | * @request: request structure to associate | ||
| 190 | * | ||
| 191 | * Tries to start a chsc request on one of the existing chsc subchannels. | ||
| 192 | * Returns: | ||
| 193 | * %0 if the request was performed synchronously | ||
| 194 | * %-EINPROGRESS if the request was successfully started | ||
| 195 | * %-EBUSY if all chsc subchannels are busy | ||
| 196 | * %-ENODEV if no chsc subchannels are available | ||
| 197 | * Context: | ||
| 198 | * interrupts disabled, chsc_lock held | ||
| 199 | */ | ||
| 200 | static int chsc_async(struct chsc_async_area *chsc_area, | ||
| 201 | struct chsc_request *request) | ||
| 202 | { | ||
| 203 | int cc; | ||
| 204 | struct chsc_private *private; | ||
| 205 | struct subchannel *sch = NULL; | ||
| 206 | int ret = -ENODEV; | ||
| 207 | char dbf[10]; | ||
| 208 | |||
| 209 | chsc_area->header.key = PAGE_DEFAULT_KEY; | ||
| 210 | while ((sch = chsc_get_next_subchannel(sch))) { | ||
| 211 | spin_lock(sch->lock); | ||
| 212 | private = sch->private; | ||
| 213 | if (private->request) { | ||
| 214 | spin_unlock(sch->lock); | ||
| 215 | ret = -EBUSY; | ||
| 216 | continue; | ||
| 217 | } | ||
| 218 | chsc_area->header.sid = sch->schid; | ||
| 219 | CHSC_LOG(2, "schid"); | ||
| 220 | CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); | ||
| 221 | cc = chsc(chsc_area); | ||
| 222 | sprintf(dbf, "cc:%d", cc); | ||
| 223 | CHSC_LOG(2, dbf); | ||
| 224 | switch (cc) { | ||
| 225 | case 0: | ||
| 226 | ret = 0; | ||
| 227 | break; | ||
| 228 | case 1: | ||
| 229 | sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; | ||
| 230 | ret = -EINPROGRESS; | ||
| 231 | private->request = request; | ||
| 232 | break; | ||
| 233 | case 2: | ||
| 234 | ret = -EBUSY; | ||
| 235 | break; | ||
| 236 | default: | ||
| 237 | ret = -ENODEV; | ||
| 238 | } | ||
| 239 | spin_unlock(sch->lock); | ||
| 240 | CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", | ||
| 241 | sch->schid.ssid, sch->schid.sch_no, cc); | ||
| 242 | if (ret == -EINPROGRESS) | ||
| 243 | return -EINPROGRESS; | ||
| 244 | put_device(&sch->dev); | ||
| 245 | if (ret == 0) | ||
| 246 | return 0; | ||
| 247 | } | ||
| 248 | return ret; | ||
| 249 | } | ||
| 250 | |||
| 251 | static void chsc_log_command(struct chsc_async_area *chsc_area) | ||
| 252 | { | ||
| 253 | char dbf[10]; | ||
| 254 | |||
| 255 | sprintf(dbf, "CHSC:%x", chsc_area->header.code); | ||
| 256 | CHSC_LOG(0, dbf); | ||
| 257 | CHSC_LOG_HEX(0, chsc_area, 32); | ||
| 258 | } | ||
| 259 | |||
| 260 | static int chsc_examine_irb(struct chsc_request *request) | ||
| 261 | { | ||
| 262 | int backed_up; | ||
| 263 | |||
| 264 | if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND) | ||
| 265 | return -EIO; | ||
| 266 | backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; | ||
| 267 | request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; | ||
| 268 | if (scsw_cstat(&request->irb.scsw) == 0) | ||
| 269 | return 0; | ||
| 270 | if (!backed_up) | ||
| 271 | return 0; | ||
| 272 | if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) | ||
| 273 | return -EIO; | ||
| 274 | if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) | ||
| 275 | return -EPERM; | ||
| 276 | if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) | ||
| 277 | return -EAGAIN; | ||
| 278 | if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) | ||
| 279 | return -EAGAIN; | ||
| 280 | return -EIO; | ||
| 281 | } | ||
| 282 | |||
| 283 | static int chsc_ioctl_start(void __user *user_area) | ||
| 284 | { | ||
| 285 | struct chsc_request *request; | ||
| 286 | struct chsc_async_area *chsc_area; | ||
| 287 | int ret; | ||
| 288 | char dbf[10]; | ||
| 289 | |||
| 290 | if (!css_general_characteristics.dynio) | ||
| 291 | /* It makes no sense to try. */ | ||
| 292 | return -EOPNOTSUPP; | ||
| 293 | chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); | ||
| 294 | if (!chsc_area) | ||
| 295 | return -ENOMEM; | ||
| 296 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
| 297 | if (!request) { | ||
| 298 | ret = -ENOMEM; | ||
| 299 | goto out_free; | ||
| 300 | } | ||
| 301 | init_completion(&request->completion); | ||
| 302 | if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { | ||
| 303 | ret = -EFAULT; | ||
| 304 | goto out_free; | ||
| 305 | } | ||
| 306 | chsc_log_command(chsc_area); | ||
| 307 | spin_lock_irq(&chsc_lock); | ||
| 308 | ret = chsc_async(chsc_area, request); | ||
| 309 | spin_unlock_irq(&chsc_lock); | ||
| 310 | if (ret == -EINPROGRESS) { | ||
| 311 | wait_for_completion(&request->completion); | ||
| 312 | ret = chsc_examine_irb(request); | ||
| 313 | } | ||
| 314 | /* copy area back to user */ | ||
| 315 | if (!ret) | ||
| 316 | if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) | ||
| 317 | ret = -EFAULT; | ||
| 318 | out_free: | ||
| 319 | sprintf(dbf, "ret:%d", ret); | ||
| 320 | CHSC_LOG(0, dbf); | ||
| 321 | kfree(request); | ||
| 322 | free_page((unsigned long)chsc_area); | ||
| 323 | return ret; | ||
| 324 | } | ||
| 325 | |||
| 326 | static int chsc_ioctl_info_channel_path(void __user *user_cd) | ||
| 327 | { | ||
| 328 | struct chsc_chp_cd *cd; | ||
| 329 | int ret, ccode; | ||
| 330 | struct { | ||
| 331 | struct chsc_header request; | ||
| 332 | u32 : 2; | ||
| 333 | u32 m : 1; | ||
| 334 | u32 : 1; | ||
| 335 | u32 fmt1 : 4; | ||
| 336 | u32 cssid : 8; | ||
| 337 | u32 : 8; | ||
| 338 | u32 first_chpid : 8; | ||
| 339 | u32 : 24; | ||
| 340 | u32 last_chpid : 8; | ||
| 341 | u32 : 32; | ||
| 342 | struct chsc_header response; | ||
| 343 | u8 data[PAGE_SIZE - 20]; | ||
| 344 | } __attribute__ ((packed)) *scpcd_area; | ||
| 345 | |||
| 346 | scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
| 347 | if (!scpcd_area) | ||
| 348 | return -ENOMEM; | ||
| 349 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
| 350 | if (!cd) { | ||
| 351 | ret = -ENOMEM; | ||
| 352 | goto out_free; | ||
| 353 | } | ||
| 354 | if (copy_from_user(cd, user_cd, sizeof(*cd))) { | ||
| 355 | ret = -EFAULT; | ||
| 356 | goto out_free; | ||
| 357 | } | ||
| 358 | scpcd_area->request.length = 0x0010; | ||
| 359 | scpcd_area->request.code = 0x0028; | ||
| 360 | scpcd_area->m = cd->m; | ||
| 361 | scpcd_area->fmt1 = cd->fmt; | ||
| 362 | scpcd_area->cssid = cd->chpid.cssid; | ||
| 363 | scpcd_area->first_chpid = cd->chpid.id; | ||
| 364 | scpcd_area->last_chpid = cd->chpid.id; | ||
| 365 | |||
| 366 | ccode = chsc(scpcd_area); | ||
| 367 | if (ccode != 0) { | ||
| 368 | ret = -EIO; | ||
| 369 | goto out_free; | ||
| 370 | } | ||
| 371 | if (scpcd_area->response.code != 0x0001) { | ||
| 372 | ret = -EIO; | ||
| 373 | CHSC_MSG(0, "scpcd: response code=%x\n", | ||
| 374 | scpcd_area->response.code); | ||
| 375 | goto out_free; | ||
| 376 | } | ||
| 377 | memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); | ||
| 378 | if (copy_to_user(user_cd, cd, sizeof(*cd))) | ||
| 379 | ret = -EFAULT; | ||
| 380 | else | ||
| 381 | ret = 0; | ||
| 382 | out_free: | ||
| 383 | kfree(cd); | ||
| 384 | free_page((unsigned long)scpcd_area); | ||
| 385 | return ret; | ||
| 386 | } | ||
| 387 | |||
| 388 | static int chsc_ioctl_info_cu(void __user *user_cd) | ||
| 389 | { | ||
| 390 | struct chsc_cu_cd *cd; | ||
| 391 | int ret, ccode; | ||
| 392 | struct { | ||
| 393 | struct chsc_header request; | ||
| 394 | u32 : 2; | ||
| 395 | u32 m : 1; | ||
| 396 | u32 : 1; | ||
| 397 | u32 fmt1 : 4; | ||
| 398 | u32 cssid : 8; | ||
| 399 | u32 : 8; | ||
| 400 | u32 first_cun : 8; | ||
| 401 | u32 : 24; | ||
| 402 | u32 last_cun : 8; | ||
| 403 | u32 : 32; | ||
| 404 | struct chsc_header response; | ||
| 405 | u8 data[PAGE_SIZE - 20]; | ||
| 406 | } __attribute__ ((packed)) *scucd_area; | ||
| 407 | |||
| 408 | scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
| 409 | if (!scucd_area) | ||
| 410 | return -ENOMEM; | ||
| 411 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
| 412 | if (!cd) { | ||
| 413 | ret = -ENOMEM; | ||
| 414 | goto out_free; | ||
| 415 | } | ||
| 416 | if (copy_from_user(cd, user_cd, sizeof(*cd))) { | ||
| 417 | ret = -EFAULT; | ||
| 418 | goto out_free; | ||
| 419 | } | ||
| 420 | scucd_area->request.length = 0x0010; | ||
| 421 | scucd_area->request.code = 0x0028; | ||
| 422 | scucd_area->m = cd->m; | ||
| 423 | scucd_area->fmt1 = cd->fmt; | ||
| 424 | scucd_area->cssid = cd->cssid; | ||
| 425 | scucd_area->first_cun = cd->cun; | ||
| 426 | scucd_area->last_cun = cd->cun; | ||
| 427 | |||
| 428 | ccode = chsc(scucd_area); | ||
| 429 | if (ccode != 0) { | ||
| 430 | ret = -EIO; | ||
| 431 | goto out_free; | ||
| 432 | } | ||
| 433 | if (scucd_area->response.code != 0x0001) { | ||
| 434 | ret = -EIO; | ||
| 435 | CHSC_MSG(0, "scucd: response code=%x\n", | ||
| 436 | scucd_area->response.code); | ||
| 437 | goto out_free; | ||
| 438 | } | ||
| 439 | memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); | ||
| 440 | if (copy_to_user(user_cd, cd, sizeof(*cd))) | ||
| 441 | ret = -EFAULT; | ||
| 442 | else | ||
| 443 | ret = 0; | ||
| 444 | out_free: | ||
| 445 | kfree(cd); | ||
| 446 | free_page((unsigned long)scucd_area); | ||
| 447 | return ret; | ||
| 448 | } | ||
| 449 | |||
| 450 | static int chsc_ioctl_info_sch_cu(void __user *user_cud) | ||
| 451 | { | ||
| 452 | struct chsc_sch_cud *cud; | ||
| 453 | int ret, ccode; | ||
| 454 | struct { | ||
| 455 | struct chsc_header request; | ||
| 456 | u32 : 2; | ||
| 457 | u32 m : 1; | ||
| 458 | u32 : 5; | ||
| 459 | u32 fmt1 : 4; | ||
| 460 | u32 : 2; | ||
| 461 | u32 ssid : 2; | ||
| 462 | u32 first_sch : 16; | ||
| 463 | u32 : 8; | ||
| 464 | u32 cssid : 8; | ||
| 465 | u32 last_sch : 16; | ||
| 466 | u32 : 32; | ||
| 467 | struct chsc_header response; | ||
| 468 | u8 data[PAGE_SIZE - 20]; | ||
| 469 | } __attribute__ ((packed)) *sscud_area; | ||
| 470 | |||
| 471 | sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
| 472 | if (!sscud_area) | ||
| 473 | return -ENOMEM; | ||
| 474 | cud = kzalloc(sizeof(*cud), GFP_KERNEL); | ||
| 475 | if (!cud) { | ||
| 476 | ret = -ENOMEM; | ||
| 477 | goto out_free; | ||
| 478 | } | ||
| 479 | if (copy_from_user(cud, user_cud, sizeof(*cud))) { | ||
| 480 | ret = -EFAULT; | ||
| 481 | goto out_free; | ||
| 482 | } | ||
| 483 | sscud_area->request.length = 0x0010; | ||
| 484 | sscud_area->request.code = 0x0006; | ||
| 485 | sscud_area->m = cud->schid.m; | ||
| 486 | sscud_area->fmt1 = cud->fmt; | ||
| 487 | sscud_area->ssid = cud->schid.ssid; | ||
| 488 | sscud_area->first_sch = cud->schid.sch_no; | ||
| 489 | sscud_area->cssid = cud->schid.cssid; | ||
| 490 | sscud_area->last_sch = cud->schid.sch_no; | ||
| 491 | |||
| 492 | ccode = chsc(sscud_area); | ||
| 493 | if (ccode != 0) { | ||
| 494 | ret = -EIO; | ||
| 495 | goto out_free; | ||
| 496 | } | ||
| 497 | if (sscud_area->response.code != 0x0001) { | ||
| 498 | ret = -EIO; | ||
| 499 | CHSC_MSG(0, "sscud: response code=%x\n", | ||
| 500 | sscud_area->response.code); | ||
| 501 | goto out_free; | ||
| 502 | } | ||
| 503 | memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); | ||
| 504 | if (copy_to_user(user_cud, cud, sizeof(*cud))) | ||
| 505 | ret = -EFAULT; | ||
| 506 | else | ||
| 507 | ret = 0; | ||
| 508 | out_free: | ||
| 509 | kfree(cud); | ||
| 510 | free_page((unsigned long)sscud_area); | ||
| 511 | return ret; | ||
| 512 | } | ||
| 513 | |||
| 514 | static int chsc_ioctl_conf_info(void __user *user_ci) | ||
| 515 | { | ||
| 516 | struct chsc_conf_info *ci; | ||
| 517 | int ret, ccode; | ||
| 518 | struct { | ||
| 519 | struct chsc_header request; | ||
| 520 | u32 : 2; | ||
| 521 | u32 m : 1; | ||
| 522 | u32 : 1; | ||
| 523 | u32 fmt1 : 4; | ||
| 524 | u32 cssid : 8; | ||
| 525 | u32 : 6; | ||
| 526 | u32 ssid : 2; | ||
| 527 | u32 : 8; | ||
| 528 | u64 : 64; | ||
| 529 | struct chsc_header response; | ||
| 530 | u8 data[PAGE_SIZE - 20]; | ||
| 531 | } __attribute__ ((packed)) *sci_area; | ||
| 532 | |||
| 533 | sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
| 534 | if (!sci_area) | ||
| 535 | return -ENOMEM; | ||
| 536 | ci = kzalloc(sizeof(*ci), GFP_KERNEL); | ||
| 537 | if (!ci) { | ||
| 538 | ret = -ENOMEM; | ||
| 539 | goto out_free; | ||
| 540 | } | ||
| 541 | if (copy_from_user(ci, user_ci, sizeof(*ci))) { | ||
| 542 | ret = -EFAULT; | ||
| 543 | goto out_free; | ||
| 544 | } | ||
| 545 | sci_area->request.length = 0x0010; | ||
| 546 | sci_area->request.code = 0x0012; | ||
| 547 | sci_area->m = ci->id.m; | ||
| 548 | sci_area->fmt1 = ci->fmt; | ||
| 549 | sci_area->cssid = ci->id.cssid; | ||
| 550 | sci_area->ssid = ci->id.ssid; | ||
| 551 | |||
| 552 | ccode = chsc(sci_area); | ||
| 553 | if (ccode != 0) { | ||
| 554 | ret = -EIO; | ||
| 555 | goto out_free; | ||
| 556 | } | ||
| 557 | if (sci_area->response.code != 0x0001) { | ||
| 558 | ret = -EIO; | ||
| 559 | CHSC_MSG(0, "sci: response code=%x\n", | ||
| 560 | sci_area->response.code); | ||
| 561 | goto out_free; | ||
| 562 | } | ||
| 563 | memcpy(&ci->scid, &sci_area->response, sci_area->response.length); | ||
| 564 | if (copy_to_user(user_ci, ci, sizeof(*ci))) | ||
| 565 | ret = -EFAULT; | ||
| 566 | else | ||
| 567 | ret = 0; | ||
| 568 | out_free: | ||
| 569 | kfree(ci); | ||
| 570 | free_page((unsigned long)sci_area); | ||
| 571 | return ret; | ||
| 572 | } | ||
| 573 | |||
| 574 | static int chsc_ioctl_conf_comp_list(void __user *user_ccl) | ||
| 575 | { | ||
| 576 | struct chsc_comp_list *ccl; | ||
| 577 | int ret, ccode; | ||
| 578 | struct { | ||
| 579 | struct chsc_header request; | ||
| 580 | u32 ctype : 8; | ||
| 581 | u32 : 4; | ||
| 582 | u32 fmt : 4; | ||
| 583 | u32 : 16; | ||
| 584 | u64 : 64; | ||
| 585 | u32 list_parm[2]; | ||
| 586 | u64 : 64; | ||
| 587 | struct chsc_header response; | ||
| 588 | u8 data[PAGE_SIZE - 36]; | ||
| 589 | } __attribute__ ((packed)) *sccl_area; | ||
| 590 | struct { | ||
| 591 | u32 m : 1; | ||
| 592 | u32 : 31; | ||
| 593 | u32 cssid : 8; | ||
| 594 | u32 : 16; | ||
| 595 | u32 chpid : 8; | ||
| 596 | } __attribute__ ((packed)) *chpid_parm; | ||
| 597 | struct { | ||
| 598 | u32 f_cssid : 8; | ||
| 599 | u32 l_cssid : 8; | ||
| 600 | u32 : 16; | ||
| 601 | u32 res; | ||
| 602 | } __attribute__ ((packed)) *cssids_parm; | ||
| 603 | |||
| 604 | sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
| 605 | if (!sccl_area) | ||
| 606 | return -ENOMEM; | ||
| 607 | ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); | ||
| 608 | if (!ccl) { | ||
| 609 | ret = -ENOMEM; | ||
| 610 | goto out_free; | ||
| 611 | } | ||
| 612 | if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { | ||
| 613 | ret = -EFAULT; | ||
| 614 | goto out_free; | ||
| 615 | } | ||
| 616 | sccl_area->request.length = 0x0020; | ||
| 617 | sccl_area->request.code = 0x0030; | ||
| 618 | sccl_area->fmt = ccl->req.fmt; | ||
| 619 | sccl_area->ctype = ccl->req.ctype; | ||
| 620 | switch (sccl_area->ctype) { | ||
| 621 | case CCL_CU_ON_CHP: | ||
| 622 | case CCL_IOP_CHP: | ||
| 623 | chpid_parm = (void *)&sccl_area->list_parm; | ||
| 624 | chpid_parm->m = ccl->req.chpid.m; | ||
| 625 | chpid_parm->cssid = ccl->req.chpid.chp.cssid; | ||
| 626 | chpid_parm->chpid = ccl->req.chpid.chp.id; | ||
| 627 | break; | ||
| 628 | case CCL_CSS_IMG: | ||
| 629 | case CCL_CSS_IMG_CONF_CHAR: | ||
| 630 | cssids_parm = (void *)&sccl_area->list_parm; | ||
| 631 | cssids_parm->f_cssid = ccl->req.cssids.f_cssid; | ||
| 632 | cssids_parm->l_cssid = ccl->req.cssids.l_cssid; | ||
| 633 | break; | ||
| 634 | } | ||
| 635 | ccode = chsc(sccl_area); | ||
| 636 | if (ccode != 0) { | ||
| 637 | ret = -EIO; | ||
| 638 | goto out_free; | ||
| 639 | } | ||
| 640 | if (sccl_area->response.code != 0x0001) { | ||
| 641 | ret = -EIO; | ||
| 642 | CHSC_MSG(0, "sccl: response code=%x\n", | ||
| 643 | sccl_area->response.code); | ||
| 644 | goto out_free; | ||
| 645 | } | ||
| 646 | memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); | ||
| 647 | if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) | ||
| 648 | ret = -EFAULT; | ||
| 649 | else | ||
| 650 | ret = 0; | ||
| 651 | out_free: | ||
| 652 | kfree(ccl); | ||
| 653 | free_page((unsigned long)sccl_area); | ||
| 654 | return ret; | ||
| 655 | } | ||
| 656 | |||
| 657 | static int chsc_ioctl_chpd(void __user *user_chpd) | ||
| 658 | { | ||
| 659 | struct chsc_cpd_info *chpd; | ||
| 660 | int ret; | ||
| 661 | |||
| 662 | chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); | ||
| 663 | if (!chpd) | ||
| 664 | return -ENOMEM; | ||
| 665 | if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { | ||
| 666 | ret = -EFAULT; | ||
| 667 | goto out_free; | ||
| 668 | } | ||
| 669 | ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, | ||
| 670 | chpd->rfmt, chpd->c, chpd->m, | ||
| 671 | &chpd->chpdb); | ||
| 672 | if (ret) | ||
| 673 | goto out_free; | ||
| 674 | if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) | ||
| 675 | ret = -EFAULT; | ||
| 676 | out_free: | ||
| 677 | kfree(chpd); | ||
| 678 | return ret; | ||
| 679 | } | ||
| 680 | |||
| 681 | static int chsc_ioctl_dcal(void __user *user_dcal) | ||
| 682 | { | ||
| 683 | struct chsc_dcal *dcal; | ||
| 684 | int ret, ccode; | ||
| 685 | struct { | ||
| 686 | struct chsc_header request; | ||
| 687 | u32 atype : 8; | ||
| 688 | u32 : 4; | ||
| 689 | u32 fmt : 4; | ||
| 690 | u32 : 16; | ||
| 691 | u32 res0[2]; | ||
| 692 | u32 list_parm[2]; | ||
| 693 | u32 res1[2]; | ||
| 694 | struct chsc_header response; | ||
| 695 | u8 data[PAGE_SIZE - 36]; | ||
| 696 | } __attribute__ ((packed)) *sdcal_area; | ||
| 697 | |||
| 698 | sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
| 699 | if (!sdcal_area) | ||
| 700 | return -ENOMEM; | ||
| 701 | dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); | ||
| 702 | if (!dcal) { | ||
| 703 | ret = -ENOMEM; | ||
| 704 | goto out_free; | ||
| 705 | } | ||
| 706 | if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { | ||
| 707 | ret = -EFAULT; | ||
| 708 | goto out_free; | ||
| 709 | } | ||
| 710 | sdcal_area->request.length = 0x0020; | ||
| 711 | sdcal_area->request.code = 0x0034; | ||
| 712 | sdcal_area->atype = dcal->req.atype; | ||
| 713 | sdcal_area->fmt = dcal->req.fmt; | ||
| 714 | memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, | ||
| 715 | sizeof(sdcal_area->list_parm)); | ||
| 716 | |||
| 717 | ccode = chsc(sdcal_area); | ||
| 718 | if (ccode != 0) { | ||
| 719 | ret = -EIO; | ||
| 720 | goto out_free; | ||
| 721 | } | ||
| 722 | if (sdcal_area->response.code != 0x0001) { | ||
| 723 | ret = -EIO; | ||
| 724 | CHSC_MSG(0, "sdcal: response code=%x\n", | ||
| 725 | sdcal_area->response.code); | ||
| 726 | goto out_free; | ||
| 727 | } | ||
| 728 | memcpy(&dcal->sdcal, &sdcal_area->response, | ||
| 729 | sdcal_area->response.length); | ||
| 730 | if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) | ||
| 731 | ret = -EFAULT; | ||
| 732 | else | ||
| 733 | ret = 0; | ||
| 734 | out_free: | ||
| 735 | kfree(dcal); | ||
| 736 | free_page((unsigned long)sdcal_area); | ||
| 737 | return ret; | ||
| 738 | } | ||
| 739 | |||
| 740 | static long chsc_ioctl(struct file *filp, unsigned int cmd, | ||
| 741 | unsigned long arg) | ||
| 742 | { | ||
| 743 | CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); | ||
| 744 | switch (cmd) { | ||
| 745 | case CHSC_START: | ||
| 746 | return chsc_ioctl_start((void __user *)arg); | ||
| 747 | case CHSC_INFO_CHANNEL_PATH: | ||
| 748 | return chsc_ioctl_info_channel_path((void __user *)arg); | ||
| 749 | case CHSC_INFO_CU: | ||
| 750 | return chsc_ioctl_info_cu((void __user *)arg); | ||
| 751 | case CHSC_INFO_SCH_CU: | ||
| 752 | return chsc_ioctl_info_sch_cu((void __user *)arg); | ||
| 753 | case CHSC_INFO_CI: | ||
| 754 | return chsc_ioctl_conf_info((void __user *)arg); | ||
| 755 | case CHSC_INFO_CCL: | ||
| 756 | return chsc_ioctl_conf_comp_list((void __user *)arg); | ||
| 757 | case CHSC_INFO_CPD: | ||
| 758 | return chsc_ioctl_chpd((void __user *)arg); | ||
| 759 | case CHSC_INFO_DCAL: | ||
| 760 | return chsc_ioctl_dcal((void __user *)arg); | ||
| 761 | default: /* unknown ioctl number */ | ||
| 762 | return -ENOIOCTLCMD; | ||
| 763 | } | ||
| 764 | } | ||
| 765 | |||
| 766 | static const struct file_operations chsc_fops = { | ||
| 767 | .owner = THIS_MODULE, | ||
| 768 | .unlocked_ioctl = chsc_ioctl, | ||
| 769 | .compat_ioctl = chsc_ioctl, | ||
| 770 | }; | ||
| 771 | |||
| 772 | static struct miscdevice chsc_misc_device = { | ||
| 773 | .minor = MISC_DYNAMIC_MINOR, | ||
| 774 | .name = "chsc", | ||
| 775 | .fops = &chsc_fops, | ||
| 776 | }; | ||
| 777 | |||
| 778 | static int __init chsc_misc_init(void) | ||
| 779 | { | ||
| 780 | return misc_register(&chsc_misc_device); | ||
| 781 | } | ||
| 782 | |||
| 783 | static void chsc_misc_cleanup(void) | ||
| 784 | { | ||
| 785 | misc_deregister(&chsc_misc_device); | ||
| 786 | } | ||
| 787 | |||
| 788 | static int __init chsc_sch_init(void) | ||
| 789 | { | ||
| 790 | int ret; | ||
| 791 | |||
| 792 | ret = chsc_init_dbfs(); | ||
| 793 | if (ret) | ||
| 794 | return ret; | ||
| 795 | isc_register(CHSC_SCH_ISC); | ||
| 796 | ret = chsc_init_sch_driver(); | ||
| 797 | if (ret) | ||
| 798 | goto out_dbf; | ||
| 799 | ret = chsc_misc_init(); | ||
| 800 | if (ret) | ||
| 801 | goto out_driver; | ||
| 802 | return ret; | ||
| 803 | out_driver: | ||
| 804 | chsc_cleanup_sch_driver(); | ||
| 805 | out_dbf: | ||
| 806 | isc_unregister(CHSC_SCH_ISC); | ||
| 807 | chsc_remove_dbfs(); | ||
| 808 | return ret; | ||
| 809 | } | ||
| 810 | |||
| 811 | static void __exit chsc_sch_exit(void) | ||
| 812 | { | ||
| 813 | chsc_misc_cleanup(); | ||
| 814 | chsc_cleanup_sch_driver(); | ||
| 815 | isc_unregister(CHSC_SCH_ISC); | ||
| 816 | chsc_remove_dbfs(); | ||
| 817 | } | ||
| 818 | |||
| 819 | module_init(chsc_sch_init); | ||
| 820 | module_exit(chsc_sch_exit); | ||
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h new file mode 100644 index 000000000000..589ebfad6aad --- /dev/null +++ b/drivers/s390/cio/chsc_sch.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | #ifndef _CHSC_SCH_H | ||
| 2 | #define _CHSC_SCH_H | ||
| 3 | |||
| 4 | struct chsc_request { | ||
| 5 | struct completion completion; | ||
| 6 | struct irb irb; | ||
| 7 | }; | ||
| 8 | |||
| 9 | struct chsc_private { | ||
| 10 | struct chsc_request *request; | ||
| 11 | }; | ||
| 12 | |||
| 13 | #endif | ||
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index b32d7eb3d81a..33bff8fec7d1 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * drivers/s390/cio/cio.c | 2 | * drivers/s390/cio/cio.c |
| 3 | * S/390 common I/O routines -- low level i/o calls | 3 | * S/390 common I/O routines -- low level i/o calls |
| 4 | * | 4 | * |
| 5 | * Copyright (C) IBM Corp. 1999,2006 | 5 | * Copyright IBM Corp. 1999,2008 |
| 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
| 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
| 8 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
| @@ -24,7 +24,9 @@ | |||
| 24 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
| 25 | #include <asm/chpid.h> | 25 | #include <asm/chpid.h> |
| 26 | #include <asm/airq.h> | 26 | #include <asm/airq.h> |
| 27 | #include <asm/isc.h> | ||
| 27 | #include <asm/cpu.h> | 28 | #include <asm/cpu.h> |
| 29 | #include <asm/fcx.h> | ||
| 28 | #include "cio.h" | 30 | #include "cio.h" |
| 29 | #include "css.h" | 31 | #include "css.h" |
| 30 | #include "chsc.h" | 32 | #include "chsc.h" |
| @@ -72,7 +74,6 @@ out_unregister: | |||
| 72 | debug_unregister(cio_debug_trace_id); | 74 | debug_unregister(cio_debug_trace_id); |
| 73 | if (cio_debug_crw_id) | 75 | if (cio_debug_crw_id) |
| 74 | debug_unregister(cio_debug_crw_id); | 76 | debug_unregister(cio_debug_crw_id); |
| 75 | printk(KERN_WARNING"cio: could not initialize debugging\n"); | ||
| 76 | return -1; | 77 | return -1; |
| 77 | } | 78 | } |
| 78 | 79 | ||
| @@ -128,7 +129,7 @@ cio_tpi(void) | |||
| 128 | local_bh_disable(); | 129 | local_bh_disable(); |
| 129 | irq_enter (); | 130 | irq_enter (); |
| 130 | spin_lock(sch->lock); | 131 | spin_lock(sch->lock); |
| 131 | memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); | 132 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); |
| 132 | if (sch->driver && sch->driver->irq) | 133 | if (sch->driver && sch->driver->irq) |
| 133 | sch->driver->irq(sch); | 134 | sch->driver->irq(sch); |
| 134 | spin_unlock(sch->lock); | 135 | spin_unlock(sch->lock); |
| @@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ | |||
| 167 | { | 168 | { |
| 168 | char dbf_txt[15]; | 169 | char dbf_txt[15]; |
| 169 | int ccode; | 170 | int ccode; |
| 170 | struct orb *orb; | 171 | union orb *orb; |
| 171 | 172 | ||
| 172 | CIO_TRACE_EVENT(4, "stIO"); | 173 | CIO_TRACE_EVENT(4, "stIO"); |
| 173 | CIO_TRACE_EVENT(4, sch->dev.bus_id); | 174 | CIO_TRACE_EVENT(4, sch->dev.bus_id); |
| 174 | 175 | ||
| 175 | orb = &to_io_private(sch)->orb; | 176 | orb = &to_io_private(sch)->orb; |
| 176 | /* sch is always under 2G. */ | 177 | /* sch is always under 2G. */ |
| 177 | orb->intparm = (u32)(addr_t)sch; | 178 | orb->cmd.intparm = (u32)(addr_t)sch; |
| 178 | orb->fmt = 1; | 179 | orb->cmd.fmt = 1; |
| 179 | 180 | ||
| 180 | orb->pfch = sch->options.prefetch == 0; | 181 | orb->cmd.pfch = sch->options.prefetch == 0; |
| 181 | orb->spnd = sch->options.suspend; | 182 | orb->cmd.spnd = sch->options.suspend; |
| 182 | orb->ssic = sch->options.suspend && sch->options.inter; | 183 | orb->cmd.ssic = sch->options.suspend && sch->options.inter; |
| 183 | orb->lpm = (lpm != 0) ? lpm : sch->lpm; | 184 | orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; |
| 184 | #ifdef CONFIG_64BIT | 185 | #ifdef CONFIG_64BIT |
| 185 | /* | 186 | /* |
| 186 | * for 64 bit we always support 64 bit IDAWs with 4k page size only | 187 | * for 64 bit we always support 64 bit IDAWs with 4k page size only |
| 187 | */ | 188 | */ |
| 188 | orb->c64 = 1; | 189 | orb->cmd.c64 = 1; |
| 189 | orb->i2k = 0; | 190 | orb->cmd.i2k = 0; |
| 190 | #endif | 191 | #endif |
| 191 | orb->key = key >> 4; | 192 | orb->cmd.key = key >> 4; |
| 192 | /* issue "Start Subchannel" */ | 193 | /* issue "Start Subchannel" */ |
| 193 | orb->cpa = (__u32) __pa(cpa); | 194 | orb->cmd.cpa = (__u32) __pa(cpa); |
| 194 | ccode = ssch(sch->schid, orb); | 195 | ccode = ssch(sch->schid, orb); |
| 195 | 196 | ||
| 196 | /* process condition code */ | 197 | /* process condition code */ |
| @@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ | |||
| 202 | /* | 203 | /* |
| 203 | * initialize device status information | 204 | * initialize device status information |
| 204 | */ | 205 | */ |
| 205 | sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; | 206 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; |
| 206 | return 0; | 207 | return 0; |
| 207 | case 1: /* status pending */ | 208 | case 1: /* status pending */ |
| 208 | case 2: /* busy */ | 209 | case 2: /* busy */ |
| @@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch) | |||
| 237 | 238 | ||
| 238 | switch (ccode) { | 239 | switch (ccode) { |
| 239 | case 0: | 240 | case 0: |
| 240 | sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; | 241 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; |
| 241 | return 0; | 242 | return 0; |
| 242 | case 1: | 243 | case 1: |
| 243 | return -EBUSY; | 244 | return -EBUSY; |
| @@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch) | |||
| 277 | 278 | ||
| 278 | switch (ccode) { | 279 | switch (ccode) { |
| 279 | case 0: | 280 | case 0: |
| 280 | sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; | 281 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; |
| 281 | return 0; | 282 | return 0; |
| 282 | case 1: /* status pending */ | 283 | case 1: /* status pending */ |
| 283 | case 2: /* busy */ | 284 | case 2: /* busy */ |
| @@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch) | |||
| 312 | 313 | ||
| 313 | switch (ccode) { | 314 | switch (ccode) { |
| 314 | case 0: | 315 | case 0: |
| 315 | sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; | 316 | sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; |
| 316 | return 0; | 317 | return 0; |
| 317 | default: /* device not operational */ | 318 | default: /* device not operational */ |
| 318 | return -ENODEV; | 319 | return -ENODEV; |
| @@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch) | |||
| 387 | return ret; | 388 | return ret; |
| 388 | } | 389 | } |
| 389 | 390 | ||
| 390 | /* | 391 | /** |
| 391 | * Enable subchannel. | 392 | * cio_enable_subchannel - enable a subchannel. |
| 393 | * @sch: subchannel to be enabled | ||
| 394 | * @intparm: interruption parameter to set | ||
| 392 | */ | 395 | */ |
| 393 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | 396 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) |
| 394 | { | 397 | { |
| @@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | |||
| 434 | CIO_TRACE_EVENT (2, dbf_txt); | 437 | CIO_TRACE_EVENT (2, dbf_txt); |
| 435 | return ret; | 438 | return ret; |
| 436 | } | 439 | } |
| 440 | EXPORT_SYMBOL_GPL(cio_enable_subchannel); | ||
| 437 | 441 | ||
| 438 | /* | 442 | /** |
| 439 | * Disable subchannel. | 443 | * cio_disable_subchannel - disable a subchannel. |
| 444 | * @sch: subchannel to disable | ||
| 440 | */ | 445 | */ |
| 441 | int | 446 | int cio_disable_subchannel(struct subchannel *sch) |
| 442 | cio_disable_subchannel (struct subchannel *sch) | ||
| 443 | { | 447 | { |
| 444 | char dbf_txt[15]; | 448 | char dbf_txt[15]; |
| 445 | int ccode; | 449 | int ccode; |
| @@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch) | |||
| 455 | if (ccode == 3) /* Not operational. */ | 459 | if (ccode == 3) /* Not operational. */ |
| 456 | return -ENODEV; | 460 | return -ENODEV; |
| 457 | 461 | ||
| 458 | if (sch->schib.scsw.actl != 0) | 462 | if (scsw_actl(&sch->schib.scsw) != 0) |
| 459 | /* | 463 | /* |
| 460 | * the disable function must not be called while there are | 464 | * the disable function must not be called while there are |
| 461 | * requests pending for completion ! | 465 | * requests pending for completion ! |
| @@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch) | |||
| 484 | CIO_TRACE_EVENT (2, dbf_txt); | 488 | CIO_TRACE_EVENT (2, dbf_txt); |
| 485 | return ret; | 489 | return ret; |
| 486 | } | 490 | } |
| 491 | EXPORT_SYMBOL_GPL(cio_disable_subchannel); | ||
| 487 | 492 | ||
| 488 | int cio_create_sch_lock(struct subchannel *sch) | 493 | int cio_create_sch_lock(struct subchannel *sch) |
| 489 | { | 494 | { |
| @@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch) | |||
| 494 | return 0; | 499 | return 0; |
| 495 | } | 500 | } |
| 496 | 501 | ||
| 497 | /* | 502 | static int cio_check_devno_blacklisted(struct subchannel *sch) |
| 498 | * cio_validate_subchannel() | 503 | { |
| 504 | if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { | ||
| 505 | /* | ||
| 506 | * This device must not be known to Linux. So we simply | ||
| 507 | * say that there is no device and return ENODEV. | ||
| 508 | */ | ||
| 509 | CIO_MSG_EVENT(6, "Blacklisted device detected " | ||
| 510 | "at devno %04X, subchannel set %x\n", | ||
| 511 | sch->schib.pmcw.dev, sch->schid.ssid); | ||
| 512 | return -ENODEV; | ||
| 513 | } | ||
| 514 | return 0; | ||
| 515 | } | ||
| 516 | |||
| 517 | static int cio_validate_io_subchannel(struct subchannel *sch) | ||
| 518 | { | ||
| 519 | /* Initialization for io subchannels. */ | ||
| 520 | if (!css_sch_is_valid(&sch->schib)) | ||
| 521 | return -ENODEV; | ||
| 522 | |||
| 523 | /* Devno is valid. */ | ||
| 524 | return cio_check_devno_blacklisted(sch); | ||
| 525 | } | ||
| 526 | |||
| 527 | static int cio_validate_msg_subchannel(struct subchannel *sch) | ||
| 528 | { | ||
| 529 | /* Initialization for message subchannels. */ | ||
| 530 | if (!css_sch_is_valid(&sch->schib)) | ||
| 531 | return -ENODEV; | ||
| 532 | |||
| 533 | /* Devno is valid. */ | ||
| 534 | return cio_check_devno_blacklisted(sch); | ||
| 535 | } | ||
| 536 | |||
| 537 | /** | ||
| 538 | * cio_validate_subchannel - basic validation of subchannel | ||
| 539 | * @sch: subchannel structure to be filled out | ||
| 540 | * @schid: subchannel id | ||
| 499 | * | 541 | * |
| 500 | * Find out subchannel type and initialize struct subchannel. | 542 | * Find out subchannel type and initialize struct subchannel. |
| 501 | * Return codes: | 543 | * Return codes: |
| 502 | * SUBCHANNEL_TYPE_IO for a normal io subchannel | 544 | * 0 on success |
| 503 | * SUBCHANNEL_TYPE_CHSC for a chsc subchannel | ||
| 504 | * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel | ||
| 505 | * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel | ||
| 506 | * -ENXIO for non-defined subchannels | 545 | * -ENXIO for non-defined subchannels |
| 507 | * -ENODEV for subchannels with invalid device number or blacklisted devices | 546 | * -ENODEV for invalid subchannels or blacklisted devices |
| 547 | * -EIO for subchannels in an invalid subchannel set | ||
| 508 | */ | 548 | */ |
| 509 | int | 549 | int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) |
| 510 | cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | ||
| 511 | { | 550 | { |
| 512 | char dbf_txt[15]; | 551 | char dbf_txt[15]; |
| 513 | int ccode; | 552 | int ccode; |
| 514 | int err; | 553 | int err; |
| 515 | 554 | ||
| 516 | sprintf (dbf_txt, "valsch%x", schid.sch_no); | 555 | sprintf(dbf_txt, "valsch%x", schid.sch_no); |
| 517 | CIO_TRACE_EVENT (4, dbf_txt); | 556 | CIO_TRACE_EVENT(4, dbf_txt); |
| 518 | 557 | ||
| 519 | /* Nuke all fields. */ | 558 | /* Nuke all fields. */ |
| 520 | memset(sch, 0, sizeof(struct subchannel)); | 559 | memset(sch, 0, sizeof(struct subchannel)); |
| @@ -546,67 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
| 546 | /* Copy subchannel type from path management control word. */ | 585 | /* Copy subchannel type from path management control word. */ |
| 547 | sch->st = sch->schib.pmcw.st; | 586 | sch->st = sch->schib.pmcw.st; |
| 548 | 587 | ||
| 549 | /* | 588 | switch (sch->st) { |
| 550 | * ... just being curious we check for non I/O subchannels | 589 | case SUBCHANNEL_TYPE_IO: |
| 551 | */ | 590 | err = cio_validate_io_subchannel(sch); |
| 552 | if (sch->st != 0) { | 591 | break; |
| 553 | CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " | 592 | case SUBCHANNEL_TYPE_MSG: |
| 554 | "non-I/O subchannel type %04X\n", | 593 | err = cio_validate_msg_subchannel(sch); |
| 555 | sch->schid.ssid, sch->schid.sch_no, sch->st); | 594 | break; |
| 556 | /* We stop here for non-io subchannels. */ | 595 | default: |
| 557 | err = sch->st; | 596 | err = 0; |
| 558 | goto out; | ||
| 559 | } | ||
| 560 | |||
| 561 | /* Initialization for io subchannels. */ | ||
| 562 | if (!css_sch_is_valid(&sch->schib)) { | ||
| 563 | err = -ENODEV; | ||
| 564 | goto out; | ||
| 565 | } | 597 | } |
| 566 | 598 | if (err) | |
| 567 | /* Devno is valid. */ | ||
| 568 | if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { | ||
| 569 | /* | ||
| 570 | * This device must not be known to Linux. So we simply | ||
| 571 | * say that there is no device and return ENODEV. | ||
| 572 | */ | ||
| 573 | CIO_MSG_EVENT(6, "Blacklisted device detected " | ||
| 574 | "at devno %04X, subchannel set %x\n", | ||
| 575 | sch->schib.pmcw.dev, sch->schid.ssid); | ||
| 576 | err = -ENODEV; | ||
| 577 | goto out; | 599 | goto out; |
| 578 | } | ||
| 579 | if (cio_is_console(sch->schid)) { | ||
| 580 | sch->opm = 0xff; | ||
| 581 | sch->isc = 1; | ||
| 582 | } else { | ||
| 583 | sch->opm = chp_get_sch_opm(sch); | ||
| 584 | sch->isc = 3; | ||
| 585 | } | ||
| 586 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
| 587 | |||
| 588 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " | ||
| 589 | "- PIM = %02X, PAM = %02X, POM = %02X\n", | ||
| 590 | sch->schib.pmcw.dev, sch->schid.ssid, | ||
| 591 | sch->schid.sch_no, sch->schib.pmcw.pim, | ||
| 592 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); | ||
| 593 | 600 | ||
| 594 | /* | 601 | CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", |
| 595 | * We now have to initially ... | 602 | sch->schid.ssid, sch->schid.sch_no, sch->st); |
| 596 | * ... enable "concurrent sense" | ||
| 597 | * ... enable "multipath mode" if more than one | ||
| 598 | * CHPID is available. This is done regardless | ||
| 599 | * whether multiple paths are available for us. | ||
| 600 | */ | ||
| 601 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | ||
| 602 | sch->schib.pmcw.ena = 0; | ||
| 603 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
| 604 | sch->schib.pmcw.mp = 1; /* multipath mode */ | ||
| 605 | /* clean up possible residual cmf stuff */ | ||
| 606 | sch->schib.pmcw.mme = 0; | ||
| 607 | sch->schib.pmcw.mbfc = 0; | ||
| 608 | sch->schib.pmcw.mbi = 0; | ||
| 609 | sch->schib.mba = 0; | ||
| 610 | return 0; | 603 | return 0; |
| 611 | out: | 604 | out: |
| 612 | if (!cio_is_console(schid)) | 605 | if (!cio_is_console(schid)) |
| @@ -647,7 +640,7 @@ do_IRQ (struct pt_regs *regs) | |||
| 647 | */ | 640 | */ |
| 648 | if (tpi_info->adapter_IO == 1 && | 641 | if (tpi_info->adapter_IO == 1 && |
| 649 | tpi_info->int_type == IO_INTERRUPT_TYPE) { | 642 | tpi_info->int_type == IO_INTERRUPT_TYPE) { |
| 650 | do_adapter_IO(); | 643 | do_adapter_IO(tpi_info->isc); |
| 651 | continue; | 644 | continue; |
| 652 | } | 645 | } |
| 653 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | 646 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; |
| @@ -706,9 +699,9 @@ void wait_cons_dev(void) | |||
| 706 | if (!console_subchannel_in_use) | 699 | if (!console_subchannel_in_use) |
| 707 | return; | 700 | return; |
| 708 | 701 | ||
| 709 | /* disable all but isc 1 (console device) */ | 702 | /* disable all but the console isc */ |
| 710 | __ctl_store (save_cr6, 6, 6); | 703 | __ctl_store (save_cr6, 6, 6); |
| 711 | cr6 = 0x40000000; | 704 | cr6 = 1UL << (31 - CONSOLE_ISC); |
| 712 | __ctl_load (cr6, 6, 6); | 705 | __ctl_load (cr6, 6, 6); |
| 713 | 706 | ||
| 714 | do { | 707 | do { |
| @@ -716,7 +709,7 @@ void wait_cons_dev(void) | |||
| 716 | if (!cio_tpi()) | 709 | if (!cio_tpi()) |
| 717 | cpu_relax(); | 710 | cpu_relax(); |
| 718 | spin_lock(console_subchannel.lock); | 711 | spin_lock(console_subchannel.lock); |
| 719 | } while (console_subchannel.schib.scsw.actl != 0); | 712 | } while (console_subchannel.schib.scsw.cmd.actl != 0); |
| 720 | /* | 713 | /* |
| 721 | * restore previous isc value | 714 | * restore previous isc value |
| 722 | */ | 715 | */ |
| @@ -761,7 +754,6 @@ cio_get_console_sch_no(void) | |||
| 761 | /* unlike in 2.4, we cannot autoprobe here, since | 754 | /* unlike in 2.4, we cannot autoprobe here, since |
| 762 | * the channel subsystem is not fully initialized. | 755 | * the channel subsystem is not fully initialized. |
| 763 | * With some luck, the HWC console can take over */ | 756 | * With some luck, the HWC console can take over */ |
| 764 | printk(KERN_WARNING "cio: No ccw console found!\n"); | ||
| 765 | return -1; | 757 | return -1; |
| 766 | } | 758 | } |
| 767 | return console_irq; | 759 | return console_irq; |
| @@ -778,6 +770,7 @@ cio_probe_console(void) | |||
| 778 | sch_no = cio_get_console_sch_no(); | 770 | sch_no = cio_get_console_sch_no(); |
| 779 | if (sch_no == -1) { | 771 | if (sch_no == -1) { |
| 780 | console_subchannel_in_use = 0; | 772 | console_subchannel_in_use = 0; |
| 773 | printk(KERN_WARNING "cio: No ccw console found!\n"); | ||
| 781 | return ERR_PTR(-ENODEV); | 774 | return ERR_PTR(-ENODEV); |
| 782 | } | 775 | } |
| 783 | memset(&console_subchannel, 0, sizeof(struct subchannel)); | 776 | memset(&console_subchannel, 0, sizeof(struct subchannel)); |
| @@ -790,15 +783,15 @@ cio_probe_console(void) | |||
| 790 | } | 783 | } |
| 791 | 784 | ||
| 792 | /* | 785 | /* |
| 793 | * enable console I/O-interrupt subclass 1 | 786 | * enable console I/O-interrupt subclass |
| 794 | */ | 787 | */ |
| 795 | ctl_set_bit(6, 30); | 788 | isc_register(CONSOLE_ISC); |
| 796 | console_subchannel.isc = 1; | 789 | console_subchannel.schib.pmcw.isc = CONSOLE_ISC; |
| 797 | console_subchannel.schib.pmcw.isc = 1; | ||
| 798 | console_subchannel.schib.pmcw.intparm = | 790 | console_subchannel.schib.pmcw.intparm = |
| 799 | (u32)(addr_t)&console_subchannel; | 791 | (u32)(addr_t)&console_subchannel; |
| 800 | ret = cio_modify(&console_subchannel); | 792 | ret = cio_modify(&console_subchannel); |
| 801 | if (ret) { | 793 | if (ret) { |
| 794 | isc_unregister(CONSOLE_ISC); | ||
| 802 | console_subchannel_in_use = 0; | 795 | console_subchannel_in_use = 0; |
| 803 | return ERR_PTR(ret); | 796 | return ERR_PTR(ret); |
| 804 | } | 797 | } |
| @@ -810,7 +803,7 @@ cio_release_console(void) | |||
| 810 | { | 803 | { |
| 811 | console_subchannel.schib.pmcw.intparm = 0; | 804 | console_subchannel.schib.pmcw.intparm = 0; |
| 812 | cio_modify(&console_subchannel); | 805 | cio_modify(&console_subchannel); |
| 813 | ctl_clear_bit(6, 24); | 806 | isc_unregister(CONSOLE_ISC); |
| 814 | console_subchannel_in_use = 0; | 807 | console_subchannel_in_use = 0; |
| 815 | } | 808 | } |
| 816 | 809 | ||
| @@ -864,7 +857,7 @@ static void udelay_reset(unsigned long usecs) | |||
| 864 | } | 857 | } |
| 865 | 858 | ||
| 866 | static int | 859 | static int |
| 867 | __clear_subchannel_easy(struct subchannel_id schid) | 860 | __clear_io_subchannel_easy(struct subchannel_id schid) |
| 868 | { | 861 | { |
| 869 | int retry; | 862 | int retry; |
| 870 | 863 | ||
| @@ -883,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid) | |||
| 883 | return -EBUSY; | 876 | return -EBUSY; |
| 884 | } | 877 | } |
| 885 | 878 | ||
| 879 | static void __clear_chsc_subchannel_easy(void) | ||
| 880 | { | ||
| 881 | /* It seems we can only wait for a bit here :/ */ | ||
| 882 | udelay_reset(100); | ||
| 883 | } | ||
| 884 | |||
| 886 | static int pgm_check_occured; | 885 | static int pgm_check_occured; |
| 887 | 886 | ||
| 888 | static void cio_reset_pgm_check_handler(void) | 887 | static void cio_reset_pgm_check_handler(void) |
| @@ -921,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) | |||
| 921 | case -ENODEV: | 920 | case -ENODEV: |
| 922 | break; | 921 | break; |
| 923 | default: /* -EBUSY */ | 922 | default: /* -EBUSY */ |
| 924 | if (__clear_subchannel_easy(schid)) | 923 | switch (schib.pmcw.st) { |
| 925 | break; /* give up... */ | 924 | case SUBCHANNEL_TYPE_IO: |
| 925 | if (__clear_io_subchannel_easy(schid)) | ||
| 926 | goto out; /* give up... */ | ||
| 927 | break; | ||
| 928 | case SUBCHANNEL_TYPE_CHSC: | ||
| 929 | __clear_chsc_subchannel_easy(); | ||
| 930 | break; | ||
| 931 | default: | ||
| 932 | /* No default clear strategy */ | ||
| 933 | break; | ||
| 934 | } | ||
| 926 | stsch(schid, &schib); | 935 | stsch(schid, &schib); |
| 927 | __disable_subchannel_easy(schid, &schib); | 936 | __disable_subchannel_easy(schid, &schib); |
| 928 | } | 937 | } |
| 938 | out: | ||
| 929 | return 0; | 939 | return 0; |
| 930 | } | 940 | } |
| 931 | 941 | ||
| @@ -1068,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | |||
| 1068 | iplinfo->is_qdio = schib.pmcw.qf; | 1078 | iplinfo->is_qdio = schib.pmcw.qf; |
| 1069 | return 0; | 1079 | return 0; |
| 1070 | } | 1080 | } |
| 1081 | |||
| 1082 | /** | ||
| 1083 | * cio_tm_start_key - perform start function | ||
| 1084 | * @sch: subchannel on which to perform the start function | ||
| 1085 | * @tcw: transport-command word to be started | ||
| 1086 | * @lpm: mask of paths to use | ||
| 1087 | * @key: storage key to use for storage access | ||
| 1088 | * | ||
| 1089 | * Start the tcw on the given subchannel. Return zero on success, non-zero | ||
| 1090 | * otherwise. | ||
| 1091 | */ | ||
| 1092 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) | ||
| 1093 | { | ||
| 1094 | int cc; | ||
| 1095 | union orb *orb = &to_io_private(sch)->orb; | ||
| 1096 | |||
| 1097 | memset(orb, 0, sizeof(union orb)); | ||
| 1098 | orb->tm.intparm = (u32) (addr_t) sch; | ||
| 1099 | orb->tm.key = key >> 4; | ||
| 1100 | orb->tm.b = 1; | ||
| 1101 | orb->tm.lpm = lpm ? lpm : sch->lpm; | ||
| 1102 | orb->tm.tcw = (u32) (addr_t) tcw; | ||
| 1103 | cc = ssch(sch->schid, orb); | ||
| 1104 | switch (cc) { | ||
| 1105 | case 0: | ||
| 1106 | return 0; | ||
| 1107 | case 1: | ||
| 1108 | case 2: | ||
| 1109 | return -EBUSY; | ||
| 1110 | default: | ||
| 1111 | return cio_start_handle_notoper(sch, lpm); | ||
| 1112 | } | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | /** | ||
| 1116 | * cio_tm_intrg - perform interrogate function | ||
| 1117 | * @sch - subchannel on which to perform the interrogate function | ||
| 1118 | * | ||
| 1119 | * If the specified subchannel is running in transport-mode, perform the | ||
| 1120 | * interrogate function. Return zero on success, non-zero otherwie. | ||
| 1121 | */ | ||
| 1122 | int cio_tm_intrg(struct subchannel *sch) | ||
| 1123 | { | ||
| 1124 | int cc; | ||
| 1125 | |||
| 1126 | if (!to_io_private(sch)->orb.tm.b) | ||
| 1127 | return -EINVAL; | ||
| 1128 | cc = xsch(sch->schid); | ||
| 1129 | switch (cc) { | ||
| 1130 | case 0: | ||
| 1131 | case 2: | ||
| 1132 | return 0; | ||
| 1133 | case 1: | ||
| 1134 | return -EBUSY; | ||
| 1135 | default: | ||
| 1136 | return -ENODEV; | ||
| 1137 | } | ||
| 1138 | } | ||
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 6e933aebe013..3b236d20e835 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
| @@ -3,9 +3,12 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/mutex.h> | 4 | #include <linux/mutex.h> |
| 5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
| 6 | #include <linux/mod_devicetable.h> | ||
| 6 | #include <asm/chpid.h> | 7 | #include <asm/chpid.h> |
| 8 | #include <asm/cio.h> | ||
| 9 | #include <asm/fcx.h> | ||
| 10 | #include <asm/schid.h> | ||
| 7 | #include "chsc.h" | 11 | #include "chsc.h" |
| 8 | #include "schid.h" | ||
| 9 | 12 | ||
| 10 | /* | 13 | /* |
| 11 | * path management control word | 14 | * path management control word |
| @@ -13,7 +16,7 @@ | |||
| 13 | struct pmcw { | 16 | struct pmcw { |
| 14 | u32 intparm; /* interruption parameter */ | 17 | u32 intparm; /* interruption parameter */ |
| 15 | u32 qf : 1; /* qdio facility */ | 18 | u32 qf : 1; /* qdio facility */ |
| 16 | u32 res0 : 1; /* reserved zeros */ | 19 | u32 w : 1; |
| 17 | u32 isc : 3; /* interruption sublass */ | 20 | u32 isc : 3; /* interruption sublass */ |
| 18 | u32 res5 : 3; /* reserved zeros */ | 21 | u32 res5 : 3; /* reserved zeros */ |
| 19 | u32 ena : 1; /* enabled */ | 22 | u32 ena : 1; /* enabled */ |
| @@ -47,7 +50,7 @@ struct pmcw { | |||
| 47 | */ | 50 | */ |
| 48 | struct schib { | 51 | struct schib { |
| 49 | struct pmcw pmcw; /* path management control word */ | 52 | struct pmcw pmcw; /* path management control word */ |
| 50 | struct scsw scsw; /* subchannel status word */ | 53 | union scsw scsw; /* subchannel status word */ |
| 51 | __u64 mba; /* measurement block address */ | 54 | __u64 mba; /* measurement block address */ |
| 52 | __u8 mda[4]; /* model dependent area */ | 55 | __u8 mda[4]; /* model dependent area */ |
| 53 | } __attribute__ ((packed,aligned(4))); | 56 | } __attribute__ ((packed,aligned(4))); |
| @@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int); | |||
| 99 | extern int cio_get_options (struct subchannel *); | 102 | extern int cio_get_options (struct subchannel *); |
| 100 | extern int cio_modify (struct subchannel *); | 103 | extern int cio_modify (struct subchannel *); |
| 101 | 104 | ||
| 105 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); | ||
| 106 | int cio_tm_intrg(struct subchannel *sch); | ||
| 107 | |||
| 102 | int cio_create_sch_lock(struct subchannel *); | 108 | int cio_create_sch_lock(struct subchannel *); |
| 103 | void do_adapter_IO(void); | 109 | void do_adapter_IO(u8 isc); |
| 104 | void do_IRQ(struct pt_regs *); | 110 | void do_IRQ(struct pt_regs *); |
| 105 | 111 | ||
| 106 | /* Use with care. */ | 112 | /* Use with care. */ |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 2808b6833b9e..a90b28c0be57 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
| @@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev) | |||
| 341 | if (stsch(sch->schid, &sch->schib)) | 341 | if (stsch(sch->schid, &sch->schib)) |
| 342 | return -ENODEV; | 342 | return -ENODEV; |
| 343 | 343 | ||
| 344 | if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { | 344 | if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { |
| 345 | /* Don't copy if a start function is in progress. */ | 345 | /* Don't copy if a start function is in progress. */ |
| 346 | if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && | 346 | if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) && |
| 347 | (sch->schib.scsw.actl & | 347 | (scsw_actl(&sch->schib.scsw) & |
| 348 | (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && | 348 | (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && |
| 349 | (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) | 349 | (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS))) |
| 350 | return -EBUSY; | 350 | return -EBUSY; |
| 351 | } | 351 | } |
| 352 | cmb_data = cdev->private->cmb; | 352 | cmb_data = cdev->private->cmb; |
| @@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev) | |||
| 612 | free_pages((unsigned long)mem, get_order(size)); | 612 | free_pages((unsigned long)mem, get_order(size)); |
| 613 | } else if (!mem) { | 613 | } else if (!mem) { |
| 614 | /* no luck */ | 614 | /* no luck */ |
| 615 | printk(KERN_WARNING "cio: failed to allocate area " | ||
| 616 | "for measuring %d subchannels\n", | ||
| 617 | cmb_area.num_channels); | ||
| 618 | ret = -ENOMEM; | 615 | ret = -ENOMEM; |
| 619 | goto out; | 616 | goto out; |
| 620 | } else { | 617 | } else { |
| @@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev, | |||
| 1230 | switch (val) { | 1227 | switch (val) { |
| 1231 | case 0: | 1228 | case 0: |
| 1232 | ret = disable_cmf(cdev); | 1229 | ret = disable_cmf(cdev); |
| 1233 | if (ret) | ||
| 1234 | dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret); | ||
| 1235 | break; | 1230 | break; |
| 1236 | case 1: | 1231 | case 1: |
| 1237 | ret = enable_cmf(cdev); | 1232 | ret = enable_cmf(cdev); |
| 1238 | if (ret && ret != -EBUSY) | ||
| 1239 | dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret); | ||
| 1240 | break; | 1233 | break; |
| 1241 | } | 1234 | } |
| 1242 | 1235 | ||
| @@ -1344,8 +1337,7 @@ static int __init init_cmf(void) | |||
| 1344 | * to basic mode. | 1337 | * to basic mode. |
| 1345 | */ | 1338 | */ |
| 1346 | if (format == CMF_AUTODETECT) { | 1339 | if (format == CMF_AUTODETECT) { |
| 1347 | if (!css_characteristics_avail || | 1340 | if (!css_general_characteristics.ext_mb) { |
| 1348 | !css_general_characteristics.ext_mb) { | ||
| 1349 | format = CMF_BASIC; | 1341 | format = CMF_BASIC; |
| 1350 | } else { | 1342 | } else { |
| 1351 | format = CMF_EXTENDED; | 1343 | format = CMF_EXTENDED; |
| @@ -1365,8 +1357,6 @@ static int __init init_cmf(void) | |||
| 1365 | cmbops = &cmbops_extended; | 1357 | cmbops = &cmbops_extended; |
| 1366 | break; | 1358 | break; |
| 1367 | default: | 1359 | default: |
| 1368 | printk(KERN_ERR "cio: Invalid format %d for channel " | ||
| 1369 | "measurement facility\n", format); | ||
| 1370 | return 1; | 1360 | return 1; |
| 1371 | } | 1361 | } |
| 1372 | 1362 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a76956512b2d..46c021d880dc 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
| @@ -2,8 +2,7 @@ | |||
| 2 | * drivers/s390/cio/css.c | 2 | * drivers/s390/cio/css.c |
| 3 | * driver for channel subsystem | 3 | * driver for channel subsystem |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 2002,2008 |
| 6 | * IBM Corporation | ||
| 7 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
| 8 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
| 9 | */ | 8 | */ |
| @@ -14,7 +13,9 @@ | |||
| 14 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
| 15 | #include <linux/list.h> | 14 | #include <linux/list.h> |
| 16 | #include <linux/reboot.h> | 15 | #include <linux/reboot.h> |
| 16 | #include <asm/isc.h> | ||
| 17 | 17 | ||
| 18 | #include "../s390mach.h" | ||
| 18 | #include "css.h" | 19 | #include "css.h" |
| 19 | #include "cio.h" | 20 | #include "cio.h" |
| 20 | #include "cio_debug.h" | 21 | #include "cio_debug.h" |
| @@ -30,8 +31,6 @@ static int max_ssid = 0; | |||
| 30 | 31 | ||
| 31 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; | 32 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; |
| 32 | 33 | ||
| 33 | int css_characteristics_avail = 0; | ||
| 34 | |||
| 35 | int | 34 | int |
| 36 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) | 35 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) |
| 37 | { | 36 | { |
| @@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid) | |||
| 121 | kfree(sch); | 120 | kfree(sch); |
| 122 | return ERR_PTR(ret); | 121 | return ERR_PTR(ret); |
| 123 | } | 122 | } |
| 124 | |||
| 125 | if (sch->st != SUBCHANNEL_TYPE_IO) { | ||
| 126 | /* For now we ignore all non-io subchannels. */ | ||
| 127 | kfree(sch); | ||
| 128 | return ERR_PTR(-EINVAL); | ||
| 129 | } | ||
| 130 | |||
| 131 | /* | ||
| 132 | * Set intparm to subchannel address. | ||
| 133 | * This is fine even on 64bit since the subchannel is always located | ||
| 134 | * under 2G. | ||
| 135 | */ | ||
| 136 | sch->schib.pmcw.intparm = (u32)(addr_t)sch; | ||
| 137 | ret = cio_modify(sch); | ||
| 138 | if (ret) { | ||
| 139 | kfree(sch->lock); | ||
| 140 | kfree(sch); | ||
| 141 | return ERR_PTR(ret); | ||
| 142 | } | ||
| 143 | return sch; | 123 | return sch; |
| 144 | } | 124 | } |
| 145 | 125 | ||
| @@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch) | |||
| 177 | return ret; | 157 | return ret; |
| 178 | } | 158 | } |
| 179 | 159 | ||
| 160 | /** | ||
| 161 | * css_sch_device_unregister - unregister a subchannel | ||
| 162 | * @sch: subchannel to be unregistered | ||
| 163 | */ | ||
| 180 | void css_sch_device_unregister(struct subchannel *sch) | 164 | void css_sch_device_unregister(struct subchannel *sch) |
| 181 | { | 165 | { |
| 182 | mutex_lock(&sch->reg_mutex); | 166 | mutex_lock(&sch->reg_mutex); |
| 183 | device_unregister(&sch->dev); | 167 | if (device_is_registered(&sch->dev)) |
| 168 | device_unregister(&sch->dev); | ||
| 184 | mutex_unlock(&sch->reg_mutex); | 169 | mutex_unlock(&sch->reg_mutex); |
| 185 | } | 170 | } |
| 171 | EXPORT_SYMBOL_GPL(css_sch_device_unregister); | ||
| 186 | 172 | ||
| 187 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) | 173 | static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) |
| 188 | { | 174 | { |
| @@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch) | |||
| 229 | } | 215 | } |
| 230 | } | 216 | } |
| 231 | 217 | ||
| 218 | static ssize_t type_show(struct device *dev, struct device_attribute *attr, | ||
| 219 | char *buf) | ||
| 220 | { | ||
| 221 | struct subchannel *sch = to_subchannel(dev); | ||
| 222 | |||
| 223 | return sprintf(buf, "%01x\n", sch->st); | ||
| 224 | } | ||
| 225 | |||
| 226 | static DEVICE_ATTR(type, 0444, type_show, NULL); | ||
| 227 | |||
| 228 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | ||
| 229 | char *buf) | ||
| 230 | { | ||
| 231 | struct subchannel *sch = to_subchannel(dev); | ||
| 232 | |||
| 233 | return sprintf(buf, "css:t%01X\n", sch->st); | ||
| 234 | } | ||
| 235 | |||
| 236 | static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); | ||
| 237 | |||
| 238 | static struct attribute *subch_attrs[] = { | ||
| 239 | &dev_attr_type.attr, | ||
| 240 | &dev_attr_modalias.attr, | ||
| 241 | NULL, | ||
| 242 | }; | ||
| 243 | |||
| 244 | static struct attribute_group subch_attr_group = { | ||
| 245 | .attrs = subch_attrs, | ||
| 246 | }; | ||
| 247 | |||
| 248 | static struct attribute_group *default_subch_attr_groups[] = { | ||
| 249 | &subch_attr_group, | ||
| 250 | NULL, | ||
| 251 | }; | ||
| 252 | |||
| 232 | static int css_register_subchannel(struct subchannel *sch) | 253 | static int css_register_subchannel(struct subchannel *sch) |
| 233 | { | 254 | { |
| 234 | int ret; | 255 | int ret; |
| @@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch) | |||
| 237 | sch->dev.parent = &channel_subsystems[0]->device; | 258 | sch->dev.parent = &channel_subsystems[0]->device; |
| 238 | sch->dev.bus = &css_bus_type; | 259 | sch->dev.bus = &css_bus_type; |
| 239 | sch->dev.release = &css_subchannel_release; | 260 | sch->dev.release = &css_subchannel_release; |
| 240 | sch->dev.groups = subch_attr_groups; | 261 | sch->dev.groups = default_subch_attr_groups; |
| 241 | /* | 262 | /* |
| 242 | * We don't want to generate uevents for I/O subchannels that don't | 263 | * We don't want to generate uevents for I/O subchannels that don't |
| 243 | * have a working ccw device behind them since they will be | 264 | * have a working ccw device behind them since they will be |
| 244 | * unregistered before they can be used anyway, so we delay the add | 265 | * unregistered before they can be used anyway, so we delay the add |
| 245 | * uevent until after device recognition was successful. | 266 | * uevent until after device recognition was successful. |
| 267 | * Note that we suppress the uevent for all subchannel types; | ||
| 268 | * the subchannel driver can decide itself when it wants to inform | ||
| 269 | * userspace of its existence. | ||
| 246 | */ | 270 | */ |
| 247 | if (!cio_is_console(sch->schid)) | 271 | sch->dev.uevent_suppress = 1; |
| 248 | /* Console is special, no need to suppress. */ | ||
| 249 | sch->dev.uevent_suppress = 1; | ||
| 250 | css_update_ssd_info(sch); | 272 | css_update_ssd_info(sch); |
| 251 | /* make it known to the system */ | 273 | /* make it known to the system */ |
| 252 | ret = css_sch_device_register(sch); | 274 | ret = css_sch_device_register(sch); |
| @@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch) | |||
| 255 | sch->schid.ssid, sch->schid.sch_no, ret); | 277 | sch->schid.ssid, sch->schid.sch_no, ret); |
| 256 | return ret; | 278 | return ret; |
| 257 | } | 279 | } |
| 280 | if (!sch->driver) { | ||
| 281 | /* | ||
| 282 | * No driver matched. Generate the uevent now so that | ||
| 283 | * a fitting driver module may be loaded based on the | ||
| 284 | * modalias. | ||
| 285 | */ | ||
| 286 | sch->dev.uevent_suppress = 0; | ||
| 287 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); | ||
| 288 | } | ||
| 258 | return ret; | 289 | return ret; |
| 259 | } | 290 | } |
| 260 | 291 | ||
| 261 | static int css_probe_device(struct subchannel_id schid) | 292 | int css_probe_device(struct subchannel_id schid) |
| 262 | { | 293 | { |
| 263 | int ret; | 294 | int ret; |
| 264 | struct subchannel *sch; | 295 | struct subchannel *sch; |
| @@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib) | |||
| 301 | { | 332 | { |
| 302 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) | 333 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) |
| 303 | return 0; | 334 | return 0; |
| 335 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) | ||
| 336 | return 0; | ||
| 304 | return 1; | 337 | return 1; |
| 305 | } | 338 | } |
| 306 | EXPORT_SYMBOL_GPL(css_sch_is_valid); | 339 | EXPORT_SYMBOL_GPL(css_sch_is_valid); |
| 307 | 340 | ||
| 308 | static int css_get_subchannel_status(struct subchannel *sch) | ||
| 309 | { | ||
| 310 | struct schib schib; | ||
| 311 | |||
| 312 | if (stsch(sch->schid, &schib)) | ||
| 313 | return CIO_GONE; | ||
| 314 | if (!css_sch_is_valid(&schib)) | ||
| 315 | return CIO_GONE; | ||
| 316 | if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) | ||
| 317 | return CIO_REVALIDATE; | ||
| 318 | if (!sch->lpm) | ||
| 319 | return CIO_NO_PATH; | ||
| 320 | return CIO_OPER; | ||
| 321 | } | ||
| 322 | |||
| 323 | static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) | ||
| 324 | { | ||
| 325 | int event, ret, disc; | ||
| 326 | unsigned long flags; | ||
| 327 | enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; | ||
| 328 | |||
| 329 | spin_lock_irqsave(sch->lock, flags); | ||
| 330 | disc = device_is_disconnected(sch); | ||
| 331 | if (disc && slow) { | ||
| 332 | /* Disconnected devices are evaluated directly only.*/ | ||
| 333 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 334 | return 0; | ||
| 335 | } | ||
| 336 | /* No interrupt after machine check - kill pending timers. */ | ||
| 337 | device_kill_pending_timer(sch); | ||
| 338 | if (!disc && !slow) { | ||
| 339 | /* Non-disconnected devices are evaluated on the slow path. */ | ||
| 340 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 341 | return -EAGAIN; | ||
| 342 | } | ||
| 343 | event = css_get_subchannel_status(sch); | ||
| 344 | CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", | ||
| 345 | sch->schid.ssid, sch->schid.sch_no, event, | ||
| 346 | disc ? "disconnected" : "normal", | ||
| 347 | slow ? "slow" : "fast"); | ||
| 348 | /* Analyze subchannel status. */ | ||
| 349 | action = NONE; | ||
| 350 | switch (event) { | ||
| 351 | case CIO_NO_PATH: | ||
| 352 | if (disc) { | ||
| 353 | /* Check if paths have become available. */ | ||
| 354 | action = REPROBE; | ||
| 355 | break; | ||
| 356 | } | ||
| 357 | /* fall through */ | ||
| 358 | case CIO_GONE: | ||
| 359 | /* Prevent unwanted effects when opening lock. */ | ||
| 360 | cio_disable_subchannel(sch); | ||
| 361 | device_set_disconnected(sch); | ||
| 362 | /* Ask driver what to do with device. */ | ||
| 363 | action = UNREGISTER; | ||
| 364 | if (sch->driver && sch->driver->notify) { | ||
| 365 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 366 | ret = sch->driver->notify(sch, event); | ||
| 367 | spin_lock_irqsave(sch->lock, flags); | ||
| 368 | if (ret) | ||
| 369 | action = NONE; | ||
| 370 | } | ||
| 371 | break; | ||
| 372 | case CIO_REVALIDATE: | ||
| 373 | /* Device will be removed, so no notify necessary. */ | ||
| 374 | if (disc) | ||
| 375 | /* Reprobe because immediate unregister might block. */ | ||
| 376 | action = REPROBE; | ||
| 377 | else | ||
| 378 | action = UNREGISTER_PROBE; | ||
| 379 | break; | ||
| 380 | case CIO_OPER: | ||
| 381 | if (disc) | ||
| 382 | /* Get device operational again. */ | ||
| 383 | action = REPROBE; | ||
| 384 | break; | ||
| 385 | } | ||
| 386 | /* Perform action. */ | ||
| 387 | ret = 0; | ||
| 388 | switch (action) { | ||
| 389 | case UNREGISTER: | ||
| 390 | case UNREGISTER_PROBE: | ||
| 391 | /* Unregister device (will use subchannel lock). */ | ||
| 392 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 393 | css_sch_device_unregister(sch); | ||
| 394 | spin_lock_irqsave(sch->lock, flags); | ||
| 395 | |||
| 396 | /* Reset intparm to zeroes. */ | ||
| 397 | sch->schib.pmcw.intparm = 0; | ||
| 398 | cio_modify(sch); | ||
| 399 | break; | ||
| 400 | case REPROBE: | ||
| 401 | device_trigger_reprobe(sch); | ||
| 402 | break; | ||
| 403 | default: | ||
| 404 | break; | ||
| 405 | } | ||
| 406 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 407 | /* Probe if necessary. */ | ||
| 408 | if (action == UNREGISTER_PROBE) | ||
| 409 | ret = css_probe_device(sch->schid); | ||
| 410 | |||
| 411 | return ret; | ||
| 412 | } | ||
| 413 | |||
| 414 | static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | 341 | static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) |
| 415 | { | 342 | { |
| 416 | struct schib schib; | 343 | struct schib schib; |
| @@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
| 429 | return css_probe_device(schid); | 356 | return css_probe_device(schid); |
| 430 | } | 357 | } |
| 431 | 358 | ||
| 359 | static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) | ||
| 360 | { | ||
| 361 | int ret = 0; | ||
| 362 | |||
| 363 | if (sch->driver) { | ||
| 364 | if (sch->driver->sch_event) | ||
| 365 | ret = sch->driver->sch_event(sch, slow); | ||
| 366 | else | ||
| 367 | dev_dbg(&sch->dev, | ||
| 368 | "Got subchannel machine check but " | ||
| 369 | "no sch_event handler provided.\n"); | ||
| 370 | } | ||
| 371 | return ret; | ||
| 372 | } | ||
| 373 | |||
| 432 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) | 374 | static void css_evaluate_subchannel(struct subchannel_id schid, int slow) |
| 433 | { | 375 | { |
| 434 | struct subchannel *sch; | 376 | struct subchannel *sch; |
| @@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe); | |||
| 596 | /* | 538 | /* |
| 597 | * Called from the machine check handler for subchannel report words. | 539 | * Called from the machine check handler for subchannel report words. |
| 598 | */ | 540 | */ |
| 599 | void css_process_crw(int rsid1, int rsid2) | 541 | static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) |
| 600 | { | 542 | { |
| 601 | struct subchannel_id mchk_schid; | 543 | struct subchannel_id mchk_schid; |
| 602 | 544 | ||
| 603 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", | 545 | if (overflow) { |
| 604 | rsid1, rsid2); | 546 | css_schedule_eval_all(); |
| 547 | return; | ||
| 548 | } | ||
| 549 | CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " | ||
| 550 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | ||
| 551 | crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, | ||
| 552 | crw0->erc, crw0->rsid); | ||
| 553 | if (crw1) | ||
| 554 | CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " | ||
| 555 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | ||
| 556 | crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, | ||
| 557 | crw1->anc, crw1->erc, crw1->rsid); | ||
| 605 | init_subchannel_id(&mchk_schid); | 558 | init_subchannel_id(&mchk_schid); |
| 606 | mchk_schid.sch_no = rsid1; | 559 | mchk_schid.sch_no = crw0->rsid; |
| 607 | if (rsid2 != 0) | 560 | if (crw1) |
| 608 | mchk_schid.ssid = (rsid2 >> 8) & 3; | 561 | mchk_schid.ssid = (crw1->rsid >> 8) & 3; |
| 609 | 562 | ||
| 610 | /* | 563 | /* |
| 611 | * Since we are always presented with IPI in the CRW, we have to | 564 | * Since we are always presented with IPI in the CRW, we have to |
| 612 | * use stsch() to find out if the subchannel in question has come | 565 | * use stsch() to find out if the subchannel in question has come |
| 613 | * or gone. | 566 | * or gone. |
| @@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data) | |||
| 658 | static void __init | 611 | static void __init |
| 659 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) | 612 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) |
| 660 | { | 613 | { |
| 661 | if (css_characteristics_avail && css_general_characteristics.mcss) { | 614 | if (css_general_characteristics.mcss) { |
| 662 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; | 615 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; |
| 663 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; | 616 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; |
| 664 | } else { | 617 | } else { |
| @@ -795,8 +748,6 @@ init_channel_subsystem (void) | |||
| 795 | ret = chsc_determine_css_characteristics(); | 748 | ret = chsc_determine_css_characteristics(); |
| 796 | if (ret == -ENOMEM) | 749 | if (ret == -ENOMEM) |
| 797 | goto out; /* No need to continue. */ | 750 | goto out; /* No need to continue. */ |
| 798 | if (ret == 0) | ||
| 799 | css_characteristics_avail = 1; | ||
| 800 | 751 | ||
| 801 | ret = chsc_alloc_sei_area(); | 752 | ret = chsc_alloc_sei_area(); |
| 802 | if (ret) | 753 | if (ret) |
| @@ -806,6 +757,10 @@ init_channel_subsystem (void) | |||
| 806 | if (ret) | 757 | if (ret) |
| 807 | goto out; | 758 | goto out; |
| 808 | 759 | ||
| 760 | ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw); | ||
| 761 | if (ret) | ||
| 762 | goto out; | ||
| 763 | |||
| 809 | if ((ret = bus_register(&css_bus_type))) | 764 | if ((ret = bus_register(&css_bus_type))) |
| 810 | goto out; | 765 | goto out; |
| 811 | 766 | ||
| @@ -836,8 +791,7 @@ init_channel_subsystem (void) | |||
| 836 | ret = device_register(&css->device); | 791 | ret = device_register(&css->device); |
| 837 | if (ret) | 792 | if (ret) |
| 838 | goto out_free_all; | 793 | goto out_free_all; |
| 839 | if (css_characteristics_avail && | 794 | if (css_chsc_characteristics.secm) { |
| 840 | css_chsc_characteristics.secm) { | ||
| 841 | ret = device_create_file(&css->device, | 795 | ret = device_create_file(&css->device, |
| 842 | &dev_attr_cm_enable); | 796 | &dev_attr_cm_enable); |
| 843 | if (ret) | 797 | if (ret) |
| @@ -852,7 +806,8 @@ init_channel_subsystem (void) | |||
| 852 | goto out_pseudo; | 806 | goto out_pseudo; |
| 853 | css_init_done = 1; | 807 | css_init_done = 1; |
| 854 | 808 | ||
| 855 | ctl_set_bit(6, 28); | 809 | /* Enable default isc for I/O subchannels. */ |
| 810 | isc_register(IO_SCH_ISC); | ||
| 856 | 811 | ||
| 857 | for_each_subchannel(__init_channel_subsystem, NULL); | 812 | for_each_subchannel(__init_channel_subsystem, NULL); |
| 858 | return 0; | 813 | return 0; |
| @@ -875,7 +830,7 @@ out_unregister: | |||
| 875 | i--; | 830 | i--; |
| 876 | css = channel_subsystems[i]; | 831 | css = channel_subsystems[i]; |
| 877 | device_unregister(&css->pseudo_subchannel->dev); | 832 | device_unregister(&css->pseudo_subchannel->dev); |
| 878 | if (css_characteristics_avail && css_chsc_characteristics.secm) | 833 | if (css_chsc_characteristics.secm) |
| 879 | device_remove_file(&css->device, | 834 | device_remove_file(&css->device, |
| 880 | &dev_attr_cm_enable); | 835 | &dev_attr_cm_enable); |
| 881 | device_unregister(&css->device); | 836 | device_unregister(&css->device); |
| @@ -883,6 +838,7 @@ out_unregister: | |||
| 883 | out_bus: | 838 | out_bus: |
| 884 | bus_unregister(&css_bus_type); | 839 | bus_unregister(&css_bus_type); |
| 885 | out: | 840 | out: |
| 841 | s390_unregister_crw_handler(CRW_RSC_CSS); | ||
| 886 | chsc_free_sei_area(); | 842 | chsc_free_sei_area(); |
| 887 | kfree(slow_subchannel_set); | 843 | kfree(slow_subchannel_set); |
| 888 | printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", | 844 | printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", |
| @@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch) | |||
| 895 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; | 851 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; |
| 896 | } | 852 | } |
| 897 | 853 | ||
| 898 | /* | 854 | static int css_bus_match(struct device *dev, struct device_driver *drv) |
| 899 | * find a driver for a subchannel. They identify by the subchannel | ||
| 900 | * type with the exception that the console subchannel driver has its own | ||
| 901 | * subchannel type although the device is an i/o subchannel | ||
| 902 | */ | ||
| 903 | static int | ||
| 904 | css_bus_match (struct device *dev, struct device_driver *drv) | ||
| 905 | { | 855 | { |
| 906 | struct subchannel *sch = to_subchannel(dev); | 856 | struct subchannel *sch = to_subchannel(dev); |
| 907 | struct css_driver *driver = to_cssdriver(drv); | 857 | struct css_driver *driver = to_cssdriver(drv); |
| 858 | struct css_device_id *id; | ||
| 908 | 859 | ||
| 909 | if (sch->st == driver->subchannel_type) | 860 | for (id = driver->subchannel_type; id->match_flags; id++) { |
| 910 | return 1; | 861 | if (sch->st == id->type) |
| 862 | return 1; | ||
| 863 | } | ||
| 911 | 864 | ||
| 912 | return 0; | 865 | return 0; |
| 913 | } | 866 | } |
| @@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev) | |||
| 945 | sch->driver->shutdown(sch); | 898 | sch->driver->shutdown(sch); |
| 946 | } | 899 | } |
| 947 | 900 | ||
| 901 | static int css_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
| 902 | { | ||
| 903 | struct subchannel *sch = to_subchannel(dev); | ||
| 904 | int ret; | ||
| 905 | |||
| 906 | ret = add_uevent_var(env, "ST=%01X", sch->st); | ||
| 907 | if (ret) | ||
| 908 | return ret; | ||
| 909 | ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); | ||
| 910 | return ret; | ||
| 911 | } | ||
| 912 | |||
| 948 | struct bus_type css_bus_type = { | 913 | struct bus_type css_bus_type = { |
| 949 | .name = "css", | 914 | .name = "css", |
| 950 | .match = css_bus_match, | 915 | .match = css_bus_match, |
| 951 | .probe = css_probe, | 916 | .probe = css_probe, |
| 952 | .remove = css_remove, | 917 | .remove = css_remove, |
| 953 | .shutdown = css_shutdown, | 918 | .shutdown = css_shutdown, |
| 919 | .uevent = css_uevent, | ||
| 954 | }; | 920 | }; |
| 955 | 921 | ||
| 956 | /** | 922 | /** |
| @@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem); | |||
| 985 | 951 | ||
| 986 | MODULE_LICENSE("GPL"); | 952 | MODULE_LICENSE("GPL"); |
| 987 | EXPORT_SYMBOL(css_bus_type); | 953 | EXPORT_SYMBOL(css_bus_type); |
| 988 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | ||
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index e1913518f354..57ebf120f825 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
| @@ -9,8 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include <asm/cio.h> | 10 | #include <asm/cio.h> |
| 11 | #include <asm/chpid.h> | 11 | #include <asm/chpid.h> |
| 12 | 12 | #include <asm/schid.h> | |
| 13 | #include "schid.h" | ||
| 14 | 13 | ||
| 15 | /* | 14 | /* |
| 16 | * path grouping stuff | 15 | * path grouping stuff |
| @@ -58,20 +57,28 @@ struct pgid { | |||
| 58 | __u32 tod_high; /* high word TOD clock */ | 57 | __u32 tod_high; /* high word TOD clock */ |
| 59 | } __attribute__ ((packed)); | 58 | } __attribute__ ((packed)); |
| 60 | 59 | ||
| 61 | /* | ||
| 62 | * A css driver handles all subchannels of one type. | ||
| 63 | * Currently, we only care about I/O subchannels (type 0), these | ||
| 64 | * have a ccw_device connected to them. | ||
| 65 | */ | ||
| 66 | struct subchannel; | 60 | struct subchannel; |
| 61 | struct chp_link; | ||
| 62 | /** | ||
| 63 | * struct css_driver - device driver for subchannels | ||
| 64 | * @owner: owning module | ||
| 65 | * @subchannel_type: subchannel type supported by this driver | ||
| 66 | * @drv: embedded device driver structure | ||
| 67 | * @irq: called on interrupts | ||
| 68 | * @chp_event: called for events affecting a channel path | ||
| 69 | * @sch_event: called for events affecting the subchannel | ||
| 70 | * @probe: function called on probe | ||
| 71 | * @remove: function called on remove | ||
| 72 | * @shutdown: called at device shutdown | ||
| 73 | * @name: name of the device driver | ||
| 74 | */ | ||
| 67 | struct css_driver { | 75 | struct css_driver { |
| 68 | struct module *owner; | 76 | struct module *owner; |
| 69 | unsigned int subchannel_type; | 77 | struct css_device_id *subchannel_type; |
| 70 | struct device_driver drv; | 78 | struct device_driver drv; |
| 71 | void (*irq)(struct subchannel *); | 79 | void (*irq)(struct subchannel *); |
| 72 | int (*notify)(struct subchannel *, int); | 80 | int (*chp_event)(struct subchannel *, struct chp_link *, int); |
| 73 | void (*verify)(struct subchannel *); | 81 | int (*sch_event)(struct subchannel *, int); |
| 74 | void (*termination)(struct subchannel *); | ||
| 75 | int (*probe)(struct subchannel *); | 82 | int (*probe)(struct subchannel *); |
| 76 | int (*remove)(struct subchannel *); | 83 | int (*remove)(struct subchannel *); |
| 77 | void (*shutdown)(struct subchannel *); | 84 | void (*shutdown)(struct subchannel *); |
| @@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *); | |||
| 89 | extern void css_driver_unregister(struct css_driver *); | 96 | extern void css_driver_unregister(struct css_driver *); |
| 90 | 97 | ||
| 91 | extern void css_sch_device_unregister(struct subchannel *); | 98 | extern void css_sch_device_unregister(struct subchannel *); |
| 92 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 99 | extern int css_probe_device(struct subchannel_id); |
| 100 | extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); | ||
| 93 | extern int css_init_done; | 101 | extern int css_init_done; |
| 94 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), | 102 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), |
| 95 | int (*fn_unknown)(struct subchannel_id, | 103 | int (*fn_unknown)(struct subchannel_id, |
| 96 | void *), void *data); | 104 | void *), void *data); |
| 97 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 105 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
| 98 | extern void css_process_crw(int, int); | ||
| 99 | extern void css_reiterate_subchannels(void); | 106 | extern void css_reiterate_subchannels(void); |
| 100 | void css_update_ssd_info(struct subchannel *sch); | 107 | void css_update_ssd_info(struct subchannel *sch); |
| 101 | 108 | ||
| @@ -121,20 +128,6 @@ struct channel_subsystem { | |||
| 121 | extern struct bus_type css_bus_type; | 128 | extern struct bus_type css_bus_type; |
| 122 | extern struct channel_subsystem *channel_subsystems[]; | 129 | extern struct channel_subsystem *channel_subsystems[]; |
| 123 | 130 | ||
| 124 | /* Some helper functions for disconnected state. */ | ||
| 125 | int device_is_disconnected(struct subchannel *); | ||
| 126 | void device_set_disconnected(struct subchannel *); | ||
| 127 | void device_trigger_reprobe(struct subchannel *); | ||
| 128 | |||
| 129 | /* Helper functions for vary on/off. */ | ||
| 130 | int device_is_online(struct subchannel *); | ||
| 131 | void device_kill_io(struct subchannel *); | ||
| 132 | void device_set_intretry(struct subchannel *sch); | ||
| 133 | int device_trigger_verify(struct subchannel *sch); | ||
| 134 | |||
| 135 | /* Machine check helper function. */ | ||
| 136 | void device_kill_pending_timer(struct subchannel *); | ||
| 137 | |||
| 138 | /* Helper functions to build lists for the slow path. */ | 131 | /* Helper functions to build lists for the slow path. */ |
| 139 | void css_schedule_eval(struct subchannel_id schid); | 132 | void css_schedule_eval(struct subchannel_id schid); |
| 140 | void css_schedule_eval_all(void); | 133 | void css_schedule_eval_all(void); |
| @@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *); | |||
| 145 | 138 | ||
| 146 | extern struct workqueue_struct *slow_path_wq; | 139 | extern struct workqueue_struct *slow_path_wq; |
| 147 | void css_wait_for_slow_path(void); | 140 | void css_wait_for_slow_path(void); |
| 148 | |||
| 149 | extern struct attribute_group *subch_attr_groups[]; | ||
| 150 | #endif | 141 | #endif |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e22813db74a2..e818d0c54c09 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
| @@ -2,8 +2,7 @@ | |||
| 2 | * drivers/s390/cio/device.c | 2 | * drivers/s390/cio/device.c |
| 3 | * bus driver for ccw devices | 3 | * bus driver for ccw devices |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 2002,2008 |
| 6 | * IBM Corporation | ||
| 7 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
| 8 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
| 9 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| @@ -23,7 +22,9 @@ | |||
| 23 | #include <asm/cio.h> | 22 | #include <asm/cio.h> |
| 24 | #include <asm/param.h> /* HZ */ | 23 | #include <asm/param.h> /* HZ */ |
| 25 | #include <asm/cmb.h> | 24 | #include <asm/cmb.h> |
| 25 | #include <asm/isc.h> | ||
| 26 | 26 | ||
| 27 | #include "chp.h" | ||
| 27 | #include "cio.h" | 28 | #include "cio.h" |
| 28 | #include "cio_debug.h" | 29 | #include "cio_debug.h" |
| 29 | #include "css.h" | 30 | #include "css.h" |
| @@ -125,19 +126,24 @@ struct bus_type ccw_bus_type; | |||
| 125 | static void io_subchannel_irq(struct subchannel *); | 126 | static void io_subchannel_irq(struct subchannel *); |
| 126 | static int io_subchannel_probe(struct subchannel *); | 127 | static int io_subchannel_probe(struct subchannel *); |
| 127 | static int io_subchannel_remove(struct subchannel *); | 128 | static int io_subchannel_remove(struct subchannel *); |
| 128 | static int io_subchannel_notify(struct subchannel *, int); | ||
| 129 | static void io_subchannel_verify(struct subchannel *); | ||
| 130 | static void io_subchannel_ioterm(struct subchannel *); | ||
| 131 | static void io_subchannel_shutdown(struct subchannel *); | 129 | static void io_subchannel_shutdown(struct subchannel *); |
| 130 | static int io_subchannel_sch_event(struct subchannel *, int); | ||
| 131 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, | ||
| 132 | int); | ||
| 133 | |||
| 134 | static struct css_device_id io_subchannel_ids[] = { | ||
| 135 | { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, | ||
| 136 | { /* end of list */ }, | ||
| 137 | }; | ||
| 138 | MODULE_DEVICE_TABLE(css, io_subchannel_ids); | ||
| 132 | 139 | ||
| 133 | static struct css_driver io_subchannel_driver = { | 140 | static struct css_driver io_subchannel_driver = { |
| 134 | .owner = THIS_MODULE, | 141 | .owner = THIS_MODULE, |
| 135 | .subchannel_type = SUBCHANNEL_TYPE_IO, | 142 | .subchannel_type = io_subchannel_ids, |
| 136 | .name = "io_subchannel", | 143 | .name = "io_subchannel", |
| 137 | .irq = io_subchannel_irq, | 144 | .irq = io_subchannel_irq, |
| 138 | .notify = io_subchannel_notify, | 145 | .sch_event = io_subchannel_sch_event, |
| 139 | .verify = io_subchannel_verify, | 146 | .chp_event = io_subchannel_chp_event, |
| 140 | .termination = io_subchannel_ioterm, | ||
| 141 | .probe = io_subchannel_probe, | 147 | .probe = io_subchannel_probe, |
| 142 | .remove = io_subchannel_remove, | 148 | .remove = io_subchannel_remove, |
| 143 | .shutdown = io_subchannel_shutdown, | 149 | .shutdown = io_subchannel_shutdown, |
| @@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev) | |||
| 487 | ccw_device_set_online(cdev); | 493 | ccw_device_set_online(cdev); |
| 488 | return 0; | 494 | return 0; |
| 489 | } | 495 | } |
| 490 | static void online_store_handle_online(struct ccw_device *cdev, int force) | 496 | static int online_store_handle_online(struct ccw_device *cdev, int force) |
| 491 | { | 497 | { |
| 492 | int ret; | 498 | int ret; |
| 493 | 499 | ||
| 494 | ret = online_store_recog_and_online(cdev); | 500 | ret = online_store_recog_and_online(cdev); |
| 495 | if (ret) | 501 | if (ret) |
| 496 | return; | 502 | return ret; |
| 497 | if (force && cdev->private->state == DEV_STATE_BOXED) { | 503 | if (force && cdev->private->state == DEV_STATE_BOXED) { |
| 498 | ret = ccw_device_stlck(cdev); | 504 | ret = ccw_device_stlck(cdev); |
| 499 | if (ret) { | 505 | if (ret) |
| 500 | dev_warn(&cdev->dev, | 506 | return ret; |
| 501 | "ccw_device_stlck returned %d!\n", ret); | ||
| 502 | return; | ||
| 503 | } | ||
| 504 | if (cdev->id.cu_type == 0) | 507 | if (cdev->id.cu_type == 0) |
| 505 | cdev->private->state = DEV_STATE_NOT_OPER; | 508 | cdev->private->state = DEV_STATE_NOT_OPER; |
| 506 | online_store_recog_and_online(cdev); | 509 | online_store_recog_and_online(cdev); |
| 507 | } | 510 | } |
| 508 | 511 | return 0; | |
| 509 | } | 512 | } |
| 510 | 513 | ||
| 511 | static ssize_t online_store (struct device *dev, struct device_attribute *attr, | 514 | static ssize_t online_store (struct device *dev, struct device_attribute *attr, |
| @@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, | |||
| 538 | ret = count; | 541 | ret = count; |
| 539 | break; | 542 | break; |
| 540 | case 1: | 543 | case 1: |
| 541 | online_store_handle_online(cdev, force); | 544 | ret = online_store_handle_online(cdev, force); |
| 542 | ret = count; | 545 | if (!ret) |
| 546 | ret = count; | ||
| 543 | break; | 547 | break; |
| 544 | default: | 548 | default: |
| 545 | ret = -EINVAL; | 549 | ret = -EINVAL; |
| @@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); | |||
| 584 | static DEVICE_ATTR(online, 0644, online_show, online_store); | 588 | static DEVICE_ATTR(online, 0644, online_show, online_store); |
| 585 | static DEVICE_ATTR(availability, 0444, available_show, NULL); | 589 | static DEVICE_ATTR(availability, 0444, available_show, NULL); |
| 586 | 590 | ||
| 587 | static struct attribute * subch_attrs[] = { | 591 | static struct attribute *io_subchannel_attrs[] = { |
| 588 | &dev_attr_chpids.attr, | 592 | &dev_attr_chpids.attr, |
| 589 | &dev_attr_pimpampom.attr, | 593 | &dev_attr_pimpampom.attr, |
| 590 | NULL, | 594 | NULL, |
| 591 | }; | 595 | }; |
| 592 | 596 | ||
| 593 | static struct attribute_group subch_attr_group = { | 597 | static struct attribute_group io_subchannel_attr_group = { |
| 594 | .attrs = subch_attrs, | 598 | .attrs = io_subchannel_attrs, |
| 595 | }; | ||
| 596 | |||
| 597 | struct attribute_group *subch_attr_groups[] = { | ||
| 598 | &subch_attr_group, | ||
| 599 | NULL, | ||
| 600 | }; | 599 | }; |
| 601 | 600 | ||
| 602 | static struct attribute * ccwdev_attrs[] = { | 601 | static struct attribute * ccwdev_attrs[] = { |
| @@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch, | |||
| 790 | sch_set_cdev(sch, cdev); | 789 | sch_set_cdev(sch, cdev); |
| 791 | cdev->private->schid = sch->schid; | 790 | cdev->private->schid = sch->schid; |
| 792 | cdev->ccwlock = sch->lock; | 791 | cdev->ccwlock = sch->lock; |
| 793 | device_trigger_reprobe(sch); | 792 | ccw_device_trigger_reprobe(cdev); |
| 794 | spin_unlock_irq(sch->lock); | 793 | spin_unlock_irq(sch->lock); |
| 795 | } | 794 | } |
| 796 | 795 | ||
| @@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) | |||
| 1037 | struct ccw_device_private *priv; | 1036 | struct ccw_device_private *priv; |
| 1038 | 1037 | ||
| 1039 | sch_set_cdev(sch, cdev); | 1038 | sch_set_cdev(sch, cdev); |
| 1040 | sch->driver = &io_subchannel_driver; | ||
| 1041 | cdev->ccwlock = sch->lock; | 1039 | cdev->ccwlock = sch->lock; |
| 1042 | 1040 | ||
| 1043 | /* Init private data. */ | 1041 | /* Init private data. */ |
| @@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch) | |||
| 1122 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | 1120 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); |
| 1123 | } | 1121 | } |
| 1124 | 1122 | ||
| 1125 | static int | 1123 | static void io_subchannel_init_fields(struct subchannel *sch) |
| 1126 | io_subchannel_probe (struct subchannel *sch) | 1124 | { |
| 1125 | if (cio_is_console(sch->schid)) | ||
| 1126 | sch->opm = 0xff; | ||
| 1127 | else | ||
| 1128 | sch->opm = chp_get_sch_opm(sch); | ||
| 1129 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
| 1130 | sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; | ||
| 1131 | |||
| 1132 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" | ||
| 1133 | " - PIM = %02X, PAM = %02X, POM = %02X\n", | ||
| 1134 | sch->schib.pmcw.dev, sch->schid.ssid, | ||
| 1135 | sch->schid.sch_no, sch->schib.pmcw.pim, | ||
| 1136 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); | ||
| 1137 | /* Initially set up some fields in the pmcw. */ | ||
| 1138 | sch->schib.pmcw.ena = 0; | ||
| 1139 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | ||
| 1140 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
| 1141 | sch->schib.pmcw.mp = 1; /* multipath mode */ | ||
| 1142 | /* clean up possible residual cmf stuff */ | ||
| 1143 | sch->schib.pmcw.mme = 0; | ||
| 1144 | sch->schib.pmcw.mbfc = 0; | ||
| 1145 | sch->schib.pmcw.mbi = 0; | ||
| 1146 | sch->schib.mba = 0; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | static int io_subchannel_probe(struct subchannel *sch) | ||
| 1127 | { | 1150 | { |
| 1128 | struct ccw_device *cdev; | 1151 | struct ccw_device *cdev; |
| 1129 | int rc; | 1152 | int rc; |
| @@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch) | |||
| 1132 | 1155 | ||
| 1133 | cdev = sch_get_cdev(sch); | 1156 | cdev = sch_get_cdev(sch); |
| 1134 | if (cdev) { | 1157 | if (cdev) { |
| 1158 | rc = sysfs_create_group(&sch->dev.kobj, | ||
| 1159 | &io_subchannel_attr_group); | ||
| 1160 | if (rc) | ||
| 1161 | CIO_MSG_EVENT(0, "Failed to create io subchannel " | ||
| 1162 | "attributes for subchannel " | ||
| 1163 | "0.%x.%04x (rc=%d)\n", | ||
| 1164 | sch->schid.ssid, sch->schid.sch_no, rc); | ||
| 1135 | /* | 1165 | /* |
| 1136 | * This subchannel already has an associated ccw_device. | 1166 | * This subchannel already has an associated ccw_device. |
| 1137 | * Register it and exit. This happens for all early | 1167 | * Throw the delayed uevent for the subchannel, register |
| 1138 | * device, e.g. the console. | 1168 | * the ccw_device and exit. This happens for all early |
| 1169 | * devices, e.g. the console. | ||
| 1139 | */ | 1170 | */ |
| 1171 | sch->dev.uevent_suppress = 0; | ||
| 1172 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); | ||
| 1140 | cdev->dev.groups = ccwdev_attr_groups; | 1173 | cdev->dev.groups = ccwdev_attr_groups; |
| 1141 | device_initialize(&cdev->dev); | 1174 | device_initialize(&cdev->dev); |
| 1142 | ccw_device_register(cdev); | 1175 | ccw_device_register(cdev); |
| @@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch) | |||
| 1152 | get_device(&cdev->dev); | 1185 | get_device(&cdev->dev); |
| 1153 | return 0; | 1186 | return 0; |
| 1154 | } | 1187 | } |
| 1188 | io_subchannel_init_fields(sch); | ||
| 1155 | /* | 1189 | /* |
| 1156 | * First check if a fitting device may be found amongst the | 1190 | * First check if a fitting device may be found amongst the |
| 1157 | * disconnected devices or in the orphanage. | 1191 | * disconnected devices or in the orphanage. |
| 1158 | */ | 1192 | */ |
| 1159 | dev_id.devno = sch->schib.pmcw.dev; | 1193 | dev_id.devno = sch->schib.pmcw.dev; |
| 1160 | dev_id.ssid = sch->schid.ssid; | 1194 | dev_id.ssid = sch->schid.ssid; |
| 1195 | rc = sysfs_create_group(&sch->dev.kobj, | ||
| 1196 | &io_subchannel_attr_group); | ||
| 1197 | if (rc) | ||
| 1198 | return rc; | ||
| 1161 | /* Allocate I/O subchannel private data. */ | 1199 | /* Allocate I/O subchannel private data. */ |
| 1162 | sch->private = kzalloc(sizeof(struct io_subchannel_private), | 1200 | sch->private = kzalloc(sizeof(struct io_subchannel_private), |
| 1163 | GFP_KERNEL | GFP_DMA); | 1201 | GFP_KERNEL | GFP_DMA); |
| 1164 | if (!sch->private) | 1202 | if (!sch->private) { |
| 1165 | return -ENOMEM; | 1203 | rc = -ENOMEM; |
| 1204 | goto out_err; | ||
| 1205 | } | ||
| 1166 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); | 1206 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); |
| 1167 | if (!cdev) | 1207 | if (!cdev) |
| 1168 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), | 1208 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), |
| @@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch) | |||
| 1181 | } | 1221 | } |
| 1182 | cdev = io_subchannel_create_ccwdev(sch); | 1222 | cdev = io_subchannel_create_ccwdev(sch); |
| 1183 | if (IS_ERR(cdev)) { | 1223 | if (IS_ERR(cdev)) { |
| 1184 | kfree(sch->private); | 1224 | rc = PTR_ERR(cdev); |
| 1185 | return PTR_ERR(cdev); | 1225 | goto out_err; |
| 1186 | } | 1226 | } |
| 1187 | rc = io_subchannel_recog(cdev, sch); | 1227 | rc = io_subchannel_recog(cdev, sch); |
| 1188 | if (rc) { | 1228 | if (rc) { |
| @@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch) | |||
| 1191 | spin_unlock_irqrestore(sch->lock, flags); | 1231 | spin_unlock_irqrestore(sch->lock, flags); |
| 1192 | if (cdev->dev.release) | 1232 | if (cdev->dev.release) |
| 1193 | cdev->dev.release(&cdev->dev); | 1233 | cdev->dev.release(&cdev->dev); |
| 1194 | kfree(sch->private); | 1234 | goto out_err; |
| 1195 | } | 1235 | } |
| 1196 | 1236 | return 0; | |
| 1237 | out_err: | ||
| 1238 | kfree(sch->private); | ||
| 1239 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | ||
| 1197 | return rc; | 1240 | return rc; |
| 1198 | } | 1241 | } |
| 1199 | 1242 | ||
| @@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch) | |||
| 1214 | ccw_device_unregister(cdev); | 1257 | ccw_device_unregister(cdev); |
| 1215 | put_device(&cdev->dev); | 1258 | put_device(&cdev->dev); |
| 1216 | kfree(sch->private); | 1259 | kfree(sch->private); |
| 1260 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | ||
| 1217 | return 0; | 1261 | return 0; |
| 1218 | } | 1262 | } |
| 1219 | 1263 | ||
| @@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event) | |||
| 1224 | cdev = sch_get_cdev(sch); | 1268 | cdev = sch_get_cdev(sch); |
| 1225 | if (!cdev) | 1269 | if (!cdev) |
| 1226 | return 0; | 1270 | return 0; |
| 1227 | if (!cdev->drv) | 1271 | return ccw_device_notify(cdev, event); |
| 1228 | return 0; | ||
| 1229 | if (!cdev->online) | ||
| 1230 | return 0; | ||
| 1231 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | ||
| 1232 | } | 1272 | } |
| 1233 | 1273 | ||
| 1234 | static void io_subchannel_verify(struct subchannel *sch) | 1274 | static void io_subchannel_verify(struct subchannel *sch) |
| @@ -1240,22 +1280,96 @@ static void io_subchannel_verify(struct subchannel *sch) | |||
| 1240 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | 1280 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); |
| 1241 | } | 1281 | } |
| 1242 | 1282 | ||
| 1243 | static void io_subchannel_ioterm(struct subchannel *sch) | 1283 | static int check_for_io_on_path(struct subchannel *sch, int mask) |
| 1244 | { | 1284 | { |
| 1245 | struct ccw_device *cdev; | 1285 | int cc; |
| 1246 | 1286 | ||
| 1247 | cdev = sch_get_cdev(sch); | 1287 | cc = stsch(sch->schid, &sch->schib); |
| 1248 | if (!cdev) | 1288 | if (cc) |
| 1249 | return; | 1289 | return 0; |
| 1250 | /* Internal I/O will be retried by the interrupt handler. */ | 1290 | if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) |
| 1251 | if (cdev->private->flags.intretry) | 1291 | return 1; |
| 1292 | return 0; | ||
| 1293 | } | ||
| 1294 | |||
| 1295 | static void terminate_internal_io(struct subchannel *sch, | ||
| 1296 | struct ccw_device *cdev) | ||
| 1297 | { | ||
| 1298 | if (cio_clear(sch)) { | ||
| 1299 | /* Recheck device in case clear failed. */ | ||
| 1300 | sch->lpm = 0; | ||
| 1301 | if (cdev->online) | ||
| 1302 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
| 1303 | else | ||
| 1304 | css_schedule_eval(sch->schid); | ||
| 1252 | return; | 1305 | return; |
| 1306 | } | ||
| 1253 | cdev->private->state = DEV_STATE_CLEAR_VERIFY; | 1307 | cdev->private->state = DEV_STATE_CLEAR_VERIFY; |
| 1308 | /* Request retry of internal operation. */ | ||
| 1309 | cdev->private->flags.intretry = 1; | ||
| 1310 | /* Call handler. */ | ||
| 1254 | if (cdev->handler) | 1311 | if (cdev->handler) |
| 1255 | cdev->handler(cdev, cdev->private->intparm, | 1312 | cdev->handler(cdev, cdev->private->intparm, |
| 1256 | ERR_PTR(-EIO)); | 1313 | ERR_PTR(-EIO)); |
| 1257 | } | 1314 | } |
| 1258 | 1315 | ||
| 1316 | static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) | ||
| 1317 | { | ||
| 1318 | struct ccw_device *cdev; | ||
| 1319 | |||
| 1320 | cdev = sch_get_cdev(sch); | ||
| 1321 | if (!cdev) | ||
| 1322 | return; | ||
| 1323 | if (check_for_io_on_path(sch, mask)) { | ||
| 1324 | if (cdev->private->state == DEV_STATE_ONLINE) | ||
| 1325 | ccw_device_kill_io(cdev); | ||
| 1326 | else { | ||
| 1327 | terminate_internal_io(sch, cdev); | ||
| 1328 | /* Re-start path verification. */ | ||
| 1329 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
| 1330 | } | ||
| 1331 | } else | ||
| 1332 | /* trigger path verification. */ | ||
| 1333 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
| 1334 | |||
| 1335 | } | ||
| 1336 | |||
| 1337 | static int io_subchannel_chp_event(struct subchannel *sch, | ||
| 1338 | struct chp_link *link, int event) | ||
| 1339 | { | ||
| 1340 | int mask; | ||
| 1341 | |||
| 1342 | mask = chp_ssd_get_mask(&sch->ssd_info, link); | ||
| 1343 | if (!mask) | ||
| 1344 | return 0; | ||
| 1345 | switch (event) { | ||
| 1346 | case CHP_VARY_OFF: | ||
| 1347 | sch->opm &= ~mask; | ||
| 1348 | sch->lpm &= ~mask; | ||
| 1349 | io_subchannel_terminate_path(sch, mask); | ||
| 1350 | break; | ||
| 1351 | case CHP_VARY_ON: | ||
| 1352 | sch->opm |= mask; | ||
| 1353 | sch->lpm |= mask; | ||
| 1354 | io_subchannel_verify(sch); | ||
| 1355 | break; | ||
| 1356 | case CHP_OFFLINE: | ||
| 1357 | if (stsch(sch->schid, &sch->schib)) | ||
| 1358 | return -ENXIO; | ||
| 1359 | if (!css_sch_is_valid(&sch->schib)) | ||
| 1360 | return -ENODEV; | ||
| 1361 | io_subchannel_terminate_path(sch, mask); | ||
| 1362 | break; | ||
| 1363 | case CHP_ONLINE: | ||
| 1364 | if (stsch(sch->schid, &sch->schib)) | ||
| 1365 | return -ENXIO; | ||
| 1366 | sch->lpm |= mask & sch->opm; | ||
| 1367 | io_subchannel_verify(sch); | ||
| 1368 | break; | ||
| 1369 | } | ||
| 1370 | return 0; | ||
| 1371 | } | ||
| 1372 | |||
| 1259 | static void | 1373 | static void |
| 1260 | io_subchannel_shutdown(struct subchannel *sch) | 1374 | io_subchannel_shutdown(struct subchannel *sch) |
| 1261 | { | 1375 | { |
| @@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch) | |||
| 1285 | cio_disable_subchannel(sch); | 1399 | cio_disable_subchannel(sch); |
| 1286 | } | 1400 | } |
| 1287 | 1401 | ||
| 1402 | static int io_subchannel_get_status(struct subchannel *sch) | ||
| 1403 | { | ||
| 1404 | struct schib schib; | ||
| 1405 | |||
| 1406 | if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) | ||
| 1407 | return CIO_GONE; | ||
| 1408 | if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) | ||
| 1409 | return CIO_REVALIDATE; | ||
| 1410 | if (!sch->lpm) | ||
| 1411 | return CIO_NO_PATH; | ||
| 1412 | return CIO_OPER; | ||
| 1413 | } | ||
| 1414 | |||
| 1415 | static int device_is_disconnected(struct ccw_device *cdev) | ||
| 1416 | { | ||
| 1417 | if (!cdev) | ||
| 1418 | return 0; | ||
| 1419 | return (cdev->private->state == DEV_STATE_DISCONNECTED || | ||
| 1420 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); | ||
| 1421 | } | ||
| 1422 | |||
| 1423 | static int recovery_check(struct device *dev, void *data) | ||
| 1424 | { | ||
| 1425 | struct ccw_device *cdev = to_ccwdev(dev); | ||
| 1426 | int *redo = data; | ||
| 1427 | |||
| 1428 | spin_lock_irq(cdev->ccwlock); | ||
| 1429 | switch (cdev->private->state) { | ||
| 1430 | case DEV_STATE_DISCONNECTED: | ||
| 1431 | CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", | ||
| 1432 | cdev->private->dev_id.ssid, | ||
| 1433 | cdev->private->dev_id.devno); | ||
| 1434 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
| 1435 | *redo = 1; | ||
| 1436 | break; | ||
| 1437 | case DEV_STATE_DISCONNECTED_SENSE_ID: | ||
| 1438 | *redo = 1; | ||
| 1439 | break; | ||
| 1440 | } | ||
| 1441 | spin_unlock_irq(cdev->ccwlock); | ||
| 1442 | |||
| 1443 | return 0; | ||
| 1444 | } | ||
| 1445 | |||
| 1446 | static void recovery_work_func(struct work_struct *unused) | ||
| 1447 | { | ||
| 1448 | int redo = 0; | ||
| 1449 | |||
| 1450 | bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); | ||
| 1451 | if (redo) { | ||
| 1452 | spin_lock_irq(&recovery_lock); | ||
| 1453 | if (!timer_pending(&recovery_timer)) { | ||
| 1454 | if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) | ||
| 1455 | recovery_phase++; | ||
| 1456 | mod_timer(&recovery_timer, jiffies + | ||
| 1457 | recovery_delay[recovery_phase] * HZ); | ||
| 1458 | } | ||
| 1459 | spin_unlock_irq(&recovery_lock); | ||
| 1460 | } else | ||
| 1461 | CIO_MSG_EVENT(4, "recovery: end\n"); | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | static DECLARE_WORK(recovery_work, recovery_work_func); | ||
| 1465 | |||
| 1466 | static void recovery_func(unsigned long data) | ||
| 1467 | { | ||
| 1468 | /* | ||
| 1469 | * We can't do our recovery in softirq context and it's not | ||
| 1470 | * performance critical, so we schedule it. | ||
| 1471 | */ | ||
| 1472 | schedule_work(&recovery_work); | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | static void ccw_device_schedule_recovery(void) | ||
| 1476 | { | ||
| 1477 | unsigned long flags; | ||
| 1478 | |||
| 1479 | CIO_MSG_EVENT(4, "recovery: schedule\n"); | ||
| 1480 | spin_lock_irqsave(&recovery_lock, flags); | ||
| 1481 | if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { | ||
| 1482 | recovery_phase = 0; | ||
| 1483 | mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); | ||
| 1484 | } | ||
| 1485 | spin_unlock_irqrestore(&recovery_lock, flags); | ||
| 1486 | } | ||
| 1487 | |||
| 1488 | static void device_set_disconnected(struct ccw_device *cdev) | ||
| 1489 | { | ||
| 1490 | if (!cdev) | ||
| 1491 | return; | ||
| 1492 | ccw_device_set_timeout(cdev, 0); | ||
| 1493 | cdev->private->flags.fake_irb = 0; | ||
| 1494 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
| 1495 | if (cdev->online) | ||
| 1496 | ccw_device_schedule_recovery(); | ||
| 1497 | } | ||
| 1498 | |||
| 1499 | static int io_subchannel_sch_event(struct subchannel *sch, int slow) | ||
| 1500 | { | ||
| 1501 | int event, ret, disc; | ||
| 1502 | unsigned long flags; | ||
| 1503 | enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; | ||
| 1504 | struct ccw_device *cdev; | ||
| 1505 | |||
| 1506 | spin_lock_irqsave(sch->lock, flags); | ||
| 1507 | cdev = sch_get_cdev(sch); | ||
| 1508 | disc = device_is_disconnected(cdev); | ||
| 1509 | if (disc && slow) { | ||
| 1510 | /* Disconnected devices are evaluated directly only.*/ | ||
| 1511 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 1512 | return 0; | ||
| 1513 | } | ||
| 1514 | /* No interrupt after machine check - kill pending timers. */ | ||
| 1515 | if (cdev) | ||
| 1516 | ccw_device_set_timeout(cdev, 0); | ||
| 1517 | if (!disc && !slow) { | ||
| 1518 | /* Non-disconnected devices are evaluated on the slow path. */ | ||
| 1519 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 1520 | return -EAGAIN; | ||
| 1521 | } | ||
| 1522 | event = io_subchannel_get_status(sch); | ||
| 1523 | CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", | ||
| 1524 | sch->schid.ssid, sch->schid.sch_no, event, | ||
| 1525 | disc ? "disconnected" : "normal", | ||
| 1526 | slow ? "slow" : "fast"); | ||
| 1527 | /* Analyze subchannel status. */ | ||
| 1528 | action = NONE; | ||
| 1529 | switch (event) { | ||
| 1530 | case CIO_NO_PATH: | ||
| 1531 | if (disc) { | ||
| 1532 | /* Check if paths have become available. */ | ||
| 1533 | action = REPROBE; | ||
| 1534 | break; | ||
| 1535 | } | ||
| 1536 | /* fall through */ | ||
| 1537 | case CIO_GONE: | ||
| 1538 | /* Prevent unwanted effects when opening lock. */ | ||
| 1539 | cio_disable_subchannel(sch); | ||
| 1540 | device_set_disconnected(cdev); | ||
| 1541 | /* Ask driver what to do with device. */ | ||
| 1542 | action = UNREGISTER; | ||
| 1543 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 1544 | ret = io_subchannel_notify(sch, event); | ||
| 1545 | spin_lock_irqsave(sch->lock, flags); | ||
| 1546 | if (ret) | ||
| 1547 | action = NONE; | ||
| 1548 | break; | ||
| 1549 | case CIO_REVALIDATE: | ||
| 1550 | /* Device will be removed, so no notify necessary. */ | ||
| 1551 | if (disc) | ||
| 1552 | /* Reprobe because immediate unregister might block. */ | ||
| 1553 | action = REPROBE; | ||
| 1554 | else | ||
| 1555 | action = UNREGISTER_PROBE; | ||
| 1556 | break; | ||
| 1557 | case CIO_OPER: | ||
| 1558 | if (disc) | ||
| 1559 | /* Get device operational again. */ | ||
| 1560 | action = REPROBE; | ||
| 1561 | break; | ||
| 1562 | } | ||
| 1563 | /* Perform action. */ | ||
| 1564 | ret = 0; | ||
| 1565 | switch (action) { | ||
| 1566 | case UNREGISTER: | ||
| 1567 | case UNREGISTER_PROBE: | ||
| 1568 | /* Unregister device (will use subchannel lock). */ | ||
| 1569 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 1570 | css_sch_device_unregister(sch); | ||
| 1571 | spin_lock_irqsave(sch->lock, flags); | ||
| 1572 | |||
| 1573 | /* Reset intparm to zeroes. */ | ||
| 1574 | sch->schib.pmcw.intparm = 0; | ||
| 1575 | cio_modify(sch); | ||
| 1576 | break; | ||
| 1577 | case REPROBE: | ||
| 1578 | ccw_device_trigger_reprobe(cdev); | ||
| 1579 | break; | ||
| 1580 | default: | ||
| 1581 | break; | ||
| 1582 | } | ||
| 1583 | spin_unlock_irqrestore(sch->lock, flags); | ||
| 1584 | /* Probe if necessary. */ | ||
| 1585 | if (action == UNREGISTER_PROBE) | ||
| 1586 | ret = css_probe_device(sch->schid); | ||
| 1587 | |||
| 1588 | return ret; | ||
| 1589 | } | ||
| 1590 | |||
| 1288 | #ifdef CONFIG_CCW_CONSOLE | 1591 | #ifdef CONFIG_CCW_CONSOLE |
| 1289 | static struct ccw_device console_cdev; | 1592 | static struct ccw_device console_cdev; |
| 1290 | static struct ccw_device_private console_private; | 1593 | static struct ccw_device_private console_private; |
| @@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void) | |||
| 1297 | return &ccw_console_lock; | 1600 | return &ccw_console_lock; |
| 1298 | } | 1601 | } |
| 1299 | 1602 | ||
| 1300 | static int | 1603 | static int ccw_device_console_enable(struct ccw_device *cdev, |
| 1301 | ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) | 1604 | struct subchannel *sch) |
| 1302 | { | 1605 | { |
| 1303 | int rc; | 1606 | int rc; |
| 1304 | 1607 | ||
| 1305 | /* Attach subchannel private data. */ | 1608 | /* Attach subchannel private data. */ |
| 1306 | sch->private = cio_get_console_priv(); | 1609 | sch->private = cio_get_console_priv(); |
| 1307 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); | 1610 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); |
| 1611 | io_subchannel_init_fields(sch); | ||
| 1612 | sch->driver = &io_subchannel_driver; | ||
| 1308 | /* Initialize the ccw_device structure. */ | 1613 | /* Initialize the ccw_device structure. */ |
| 1309 | cdev->dev.parent= &sch->dev; | 1614 | cdev->dev.parent= &sch->dev; |
| 1310 | rc = io_subchannel_recog(cdev, sch); | 1615 | rc = io_subchannel_recog(cdev, sch); |
| @@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev) | |||
| 1515 | return sch->schid; | 1820 | return sch->schid; |
| 1516 | } | 1821 | } |
| 1517 | 1822 | ||
| 1518 | static int recovery_check(struct device *dev, void *data) | ||
| 1519 | { | ||
| 1520 | struct ccw_device *cdev = to_ccwdev(dev); | ||
| 1521 | int *redo = data; | ||
| 1522 | |||
| 1523 | spin_lock_irq(cdev->ccwlock); | ||
| 1524 | switch (cdev->private->state) { | ||
| 1525 | case DEV_STATE_DISCONNECTED: | ||
| 1526 | CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n", | ||
| 1527 | cdev->private->dev_id.ssid, | ||
| 1528 | cdev->private->dev_id.devno); | ||
| 1529 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
| 1530 | *redo = 1; | ||
| 1531 | break; | ||
| 1532 | case DEV_STATE_DISCONNECTED_SENSE_ID: | ||
| 1533 | *redo = 1; | ||
| 1534 | break; | ||
| 1535 | } | ||
| 1536 | spin_unlock_irq(cdev->ccwlock); | ||
| 1537 | |||
| 1538 | return 0; | ||
| 1539 | } | ||
| 1540 | |||
| 1541 | static void recovery_work_func(struct work_struct *unused) | ||
| 1542 | { | ||
| 1543 | int redo = 0; | ||
| 1544 | |||
| 1545 | bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); | ||
| 1546 | if (redo) { | ||
| 1547 | spin_lock_irq(&recovery_lock); | ||
| 1548 | if (!timer_pending(&recovery_timer)) { | ||
| 1549 | if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) | ||
| 1550 | recovery_phase++; | ||
| 1551 | mod_timer(&recovery_timer, jiffies + | ||
| 1552 | recovery_delay[recovery_phase] * HZ); | ||
| 1553 | } | ||
| 1554 | spin_unlock_irq(&recovery_lock); | ||
| 1555 | } else | ||
| 1556 | CIO_MSG_EVENT(4, "recovery: end\n"); | ||
| 1557 | } | ||
| 1558 | |||
| 1559 | static DECLARE_WORK(recovery_work, recovery_work_func); | ||
| 1560 | |||
| 1561 | static void recovery_func(unsigned long data) | ||
| 1562 | { | ||
| 1563 | /* | ||
| 1564 | * We can't do our recovery in softirq context and it's not | ||
| 1565 | * performance critical, so we schedule it. | ||
| 1566 | */ | ||
| 1567 | schedule_work(&recovery_work); | ||
| 1568 | } | ||
| 1569 | |||
| 1570 | void ccw_device_schedule_recovery(void) | ||
| 1571 | { | ||
| 1572 | unsigned long flags; | ||
| 1573 | |||
| 1574 | CIO_MSG_EVENT(4, "recovery: schedule\n"); | ||
| 1575 | spin_lock_irqsave(&recovery_lock, flags); | ||
| 1576 | if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { | ||
| 1577 | recovery_phase = 0; | ||
| 1578 | mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); | ||
| 1579 | } | ||
| 1580 | spin_unlock_irqrestore(&recovery_lock, flags); | ||
| 1581 | } | ||
| 1582 | |||
| 1583 | MODULE_LICENSE("GPL"); | 1823 | MODULE_LICENSE("GPL"); |
| 1584 | EXPORT_SYMBOL(ccw_device_set_online); | 1824 | EXPORT_SYMBOL(ccw_device_set_online); |
| 1585 | EXPORT_SYMBOL(ccw_device_set_offline); | 1825 | EXPORT_SYMBOL(ccw_device_set_offline); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index cb08092be39f..9800a8335a3f 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
| @@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *); | |||
| 88 | int ccw_device_online(struct ccw_device *); | 88 | int ccw_device_online(struct ccw_device *); |
| 89 | int ccw_device_offline(struct ccw_device *); | 89 | int ccw_device_offline(struct ccw_device *); |
| 90 | 90 | ||
| 91 | void ccw_device_schedule_recovery(void); | ||
| 92 | |||
| 93 | /* Function prototypes for device status and basic sense stuff. */ | 91 | /* Function prototypes for device status and basic sense stuff. */ |
| 94 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); | 92 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); |
| 95 | void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); | 93 | void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); |
| @@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *); | |||
| 118 | 116 | ||
| 119 | int ccw_device_stlck(struct ccw_device *); | 117 | int ccw_device_stlck(struct ccw_device *); |
| 120 | 118 | ||
| 119 | /* Helper function for machine check handling. */ | ||
| 120 | void ccw_device_trigger_reprobe(struct ccw_device *); | ||
| 121 | void ccw_device_kill_io(struct ccw_device *); | ||
| 122 | int ccw_device_notify(struct ccw_device *, int); | ||
| 123 | |||
| 121 | /* qdio needs this. */ | 124 | /* qdio needs this. */ |
| 122 | void ccw_device_set_timeout(struct ccw_device *, int); | 125 | void ccw_device_set_timeout(struct ccw_device *, int); |
| 123 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); | 126 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index e268d5a77c12..8b5fe57fb2f3 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
| @@ -2,8 +2,7 @@ | |||
| 2 | * drivers/s390/cio/device_fsm.c | 2 | * drivers/s390/cio/device_fsm.c |
| 3 | * finite state machine for device handling | 3 | * finite state machine for device handling |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 2002,2008 |
| 6 | * IBM Corporation | ||
| 7 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) | 6 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) |
| 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 9 | */ | 8 | */ |
| @@ -27,65 +26,6 @@ | |||
| 27 | 26 | ||
| 28 | static int timeout_log_enabled; | 27 | static int timeout_log_enabled; |
| 29 | 28 | ||
| 30 | int | ||
| 31 | device_is_online(struct subchannel *sch) | ||
| 32 | { | ||
| 33 | struct ccw_device *cdev; | ||
| 34 | |||
| 35 | cdev = sch_get_cdev(sch); | ||
| 36 | if (!cdev) | ||
| 37 | return 0; | ||
| 38 | return (cdev->private->state == DEV_STATE_ONLINE); | ||
| 39 | } | ||
| 40 | |||
| 41 | int | ||
| 42 | device_is_disconnected(struct subchannel *sch) | ||
| 43 | { | ||
| 44 | struct ccw_device *cdev; | ||
| 45 | |||
| 46 | cdev = sch_get_cdev(sch); | ||
| 47 | if (!cdev) | ||
| 48 | return 0; | ||
| 49 | return (cdev->private->state == DEV_STATE_DISCONNECTED || | ||
| 50 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); | ||
| 51 | } | ||
| 52 | |||
| 53 | void | ||
| 54 | device_set_disconnected(struct subchannel *sch) | ||
| 55 | { | ||
| 56 | struct ccw_device *cdev; | ||
| 57 | |||
| 58 | cdev = sch_get_cdev(sch); | ||
| 59 | if (!cdev) | ||
| 60 | return; | ||
| 61 | ccw_device_set_timeout(cdev, 0); | ||
| 62 | cdev->private->flags.fake_irb = 0; | ||
| 63 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
| 64 | if (cdev->online) | ||
| 65 | ccw_device_schedule_recovery(); | ||
| 66 | } | ||
| 67 | |||
| 68 | void device_set_intretry(struct subchannel *sch) | ||
| 69 | { | ||
| 70 | struct ccw_device *cdev; | ||
| 71 | |||
| 72 | cdev = sch_get_cdev(sch); | ||
| 73 | if (!cdev) | ||
| 74 | return; | ||
| 75 | cdev->private->flags.intretry = 1; | ||
| 76 | } | ||
| 77 | |||
| 78 | int device_trigger_verify(struct subchannel *sch) | ||
| 79 | { | ||
| 80 | struct ccw_device *cdev; | ||
| 81 | |||
| 82 | cdev = sch_get_cdev(sch); | ||
| 83 | if (!cdev || !cdev->online) | ||
| 84 | return -EINVAL; | ||
| 85 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
| 86 | return 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | static int __init ccw_timeout_log_setup(char *unused) | 29 | static int __init ccw_timeout_log_setup(char *unused) |
| 90 | { | 30 | { |
| 91 | timeout_log_enabled = 1; | 31 | timeout_log_enabled = 1; |
| @@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev) | |||
| 99 | struct schib schib; | 39 | struct schib schib; |
| 100 | struct subchannel *sch; | 40 | struct subchannel *sch; |
| 101 | struct io_subchannel_private *private; | 41 | struct io_subchannel_private *private; |
| 42 | union orb *orb; | ||
| 102 | int cc; | 43 | int cc; |
| 103 | 44 | ||
| 104 | sch = to_subchannel(cdev->dev.parent); | 45 | sch = to_subchannel(cdev->dev.parent); |
| 105 | private = to_io_private(sch); | 46 | private = to_io_private(sch); |
| 47 | orb = &private->orb; | ||
| 106 | cc = stsch(sch->schid, &schib); | 48 | cc = stsch(sch->schid, &schib); |
| 107 | 49 | ||
| 108 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " | 50 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " |
| 109 | "device information:\n", get_clock()); | 51 | "device information:\n", get_clock()); |
| 110 | printk(KERN_WARNING "cio: orb:\n"); | 52 | printk(KERN_WARNING "cio: orb:\n"); |
| 111 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | 53 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, |
| 112 | &private->orb, sizeof(private->orb), 0); | 54 | orb, sizeof(*orb), 0); |
| 113 | printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); | 55 | printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); |
| 114 | printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); | 56 | printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); |
| 115 | printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " | 57 | printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " |
| 116 | "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); | 58 | "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); |
| 117 | 59 | ||
| 118 | if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || | 60 | if (orb->tm.b) { |
| 119 | (void *)(addr_t)private->orb.cpa == cdev->private->iccws) | 61 | printk(KERN_WARNING "cio: orb indicates transport mode\n"); |
| 120 | printk(KERN_WARNING "cio: last channel program (intern):\n"); | 62 | printk(KERN_WARNING "cio: last tcw:\n"); |
| 121 | else | 63 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, |
| 122 | printk(KERN_WARNING "cio: last channel program:\n"); | 64 | (void *)(addr_t)orb->tm.tcw, |
| 123 | 65 | sizeof(struct tcw), 0); | |
| 124 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | 66 | } else { |
| 125 | (void *)(addr_t)private->orb.cpa, | 67 | printk(KERN_WARNING "cio: orb indicates command mode\n"); |
| 126 | sizeof(struct ccw1), 0); | 68 | if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || |
| 69 | (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) | ||
| 70 | printk(KERN_WARNING "cio: last channel program " | ||
| 71 | "(intern):\n"); | ||
| 72 | else | ||
| 73 | printk(KERN_WARNING "cio: last channel program:\n"); | ||
| 74 | |||
| 75 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | ||
| 76 | (void *)(addr_t)orb->cmd.cpa, | ||
| 77 | sizeof(struct ccw1), 0); | ||
| 78 | } | ||
| 127 | printk(KERN_WARNING "cio: ccw device state: %d\n", | 79 | printk(KERN_WARNING "cio: ccw device state: %d\n", |
| 128 | cdev->private->state); | 80 | cdev->private->state); |
| 129 | printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); | 81 | printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); |
| @@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires) | |||
| 171 | add_timer(&cdev->private->timer); | 123 | add_timer(&cdev->private->timer); |
| 172 | } | 124 | } |
| 173 | 125 | ||
| 174 | /* Kill any pending timers after machine check. */ | ||
| 175 | void | ||
| 176 | device_kill_pending_timer(struct subchannel *sch) | ||
| 177 | { | ||
| 178 | struct ccw_device *cdev; | ||
| 179 | |||
| 180 | cdev = sch_get_cdev(sch); | ||
| 181 | if (!cdev) | ||
| 182 | return; | ||
| 183 | ccw_device_set_timeout(cdev, 0); | ||
| 184 | } | ||
| 185 | |||
| 186 | /* | 126 | /* |
| 187 | * Cancel running i/o. This is called repeatedly since halt/clear are | 127 | * Cancel running i/o. This is called repeatedly since halt/clear are |
| 188 | * asynchronous operations. We do one try with cio_cancel, two tries | 128 | * asynchronous operations. We do one try with cio_cancel, two tries |
| @@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
| 205 | /* Not operational -> done. */ | 145 | /* Not operational -> done. */ |
| 206 | return 0; | 146 | return 0; |
| 207 | /* Stage 1: cancel io. */ | 147 | /* Stage 1: cancel io. */ |
| 208 | if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && | 148 | if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && |
| 209 | !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { | 149 | !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { |
| 210 | ret = cio_cancel(sch); | 150 | if (!scsw_is_tm(&sch->schib.scsw)) { |
| 211 | if (ret != -EINVAL) | 151 | ret = cio_cancel(sch); |
| 212 | return ret; | 152 | if (ret != -EINVAL) |
| 213 | /* cancel io unsuccessful. From now on it is asynchronous. */ | 153 | return ret; |
| 154 | } | ||
| 155 | /* cancel io unsuccessful or not applicable (transport mode). | ||
| 156 | * Continue with asynchronous instructions. */ | ||
| 214 | cdev->private->iretry = 3; /* 3 halt retries. */ | 157 | cdev->private->iretry = 3; /* 3 halt retries. */ |
| 215 | } | 158 | } |
| 216 | if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { | 159 | if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { |
| 217 | /* Stage 2: halt io. */ | 160 | /* Stage 2: halt io. */ |
| 218 | if (cdev->private->iretry) { | 161 | if (cdev->private->iretry) { |
| 219 | cdev->private->iretry--; | 162 | cdev->private->iretry--; |
| @@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) | |||
| 388 | } | 331 | } |
| 389 | } | 332 | } |
| 390 | 333 | ||
| 334 | int ccw_device_notify(struct ccw_device *cdev, int event) | ||
| 335 | { | ||
| 336 | if (!cdev->drv) | ||
| 337 | return 0; | ||
| 338 | if (!cdev->online) | ||
| 339 | return 0; | ||
| 340 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | ||
| 341 | } | ||
| 342 | |||
| 391 | static void | 343 | static void |
| 392 | ccw_device_oper_notify(struct work_struct *work) | 344 | ccw_device_oper_notify(struct work_struct *work) |
| 393 | { | 345 | { |
| 394 | struct ccw_device_private *priv; | 346 | struct ccw_device_private *priv; |
| 395 | struct ccw_device *cdev; | 347 | struct ccw_device *cdev; |
| 396 | struct subchannel *sch; | ||
| 397 | int ret; | 348 | int ret; |
| 398 | unsigned long flags; | ||
| 399 | 349 | ||
| 400 | priv = container_of(work, struct ccw_device_private, kick_work); | 350 | priv = container_of(work, struct ccw_device_private, kick_work); |
| 401 | cdev = priv->cdev; | 351 | cdev = priv->cdev; |
| 402 | spin_lock_irqsave(cdev->ccwlock, flags); | 352 | ret = ccw_device_notify(cdev, CIO_OPER); |
| 403 | sch = to_subchannel(cdev->dev.parent); | ||
| 404 | if (sch->driver && sch->driver->notify) { | ||
| 405 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
| 406 | ret = sch->driver->notify(sch, CIO_OPER); | ||
| 407 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
| 408 | } else | ||
| 409 | ret = 0; | ||
| 410 | if (ret) { | 353 | if (ret) { |
| 411 | /* Reenable channel measurements, if needed. */ | 354 | /* Reenable channel measurements, if needed. */ |
| 412 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
| 413 | cmf_reenable(cdev); | 355 | cmf_reenable(cdev); |
| 414 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
| 415 | wake_up(&cdev->private->wait_q); | 356 | wake_up(&cdev->private->wait_q); |
| 416 | } | 357 | } else |
| 417 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
| 418 | if (!ret) | ||
| 419 | /* Driver doesn't want device back. */ | 358 | /* Driver doesn't want device back. */ |
| 420 | ccw_device_do_unreg_rereg(work); | 359 | ccw_device_do_unreg_rereg(work); |
| 421 | } | 360 | } |
| @@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
| 621 | /* Deliver fake irb to device driver, if needed. */ | 560 | /* Deliver fake irb to device driver, if needed. */ |
| 622 | if (cdev->private->flags.fake_irb) { | 561 | if (cdev->private->flags.fake_irb) { |
| 623 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 562 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
| 624 | cdev->private->irb.scsw.cc = 1; | 563 | cdev->private->irb.scsw.cmd.cc = 1; |
| 625 | cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; | 564 | cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; |
| 626 | cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; | 565 | cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; |
| 627 | cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; | 566 | cdev->private->irb.scsw.cmd.stctl = |
| 567 | SCSW_STCTL_STATUS_PEND; | ||
| 628 | cdev->private->flags.fake_irb = 0; | 568 | cdev->private->flags.fake_irb = 0; |
| 629 | if (cdev->handler) | 569 | if (cdev->handler) |
| 630 | cdev->handler(cdev, cdev->private->intparm, | 570 | cdev->handler(cdev, cdev->private->intparm, |
| @@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev) | |||
| 718 | sch = to_subchannel(cdev->dev.parent); | 658 | sch = to_subchannel(cdev->dev.parent); |
| 719 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) | 659 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) |
| 720 | return -ENODEV; | 660 | return -ENODEV; |
| 721 | if (cdev->private->state != DEV_STATE_ONLINE) { | 661 | if (scsw_actl(&sch->schib.scsw) != 0) |
| 722 | if (sch->schib.scsw.actl != 0) | ||
| 723 | return -EBUSY; | ||
| 724 | return -EINVAL; | ||
| 725 | } | ||
| 726 | if (sch->schib.scsw.actl != 0) | ||
| 727 | return -EBUSY; | 662 | return -EBUSY; |
| 663 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
| 664 | return -EINVAL; | ||
| 728 | /* Are we doing path grouping? */ | 665 | /* Are we doing path grouping? */ |
| 729 | if (!cdev->private->options.pgroup) { | 666 | if (!cdev->private->options.pgroup) { |
| 730 | /* No, set state offline immediately. */ | 667 | /* No, set state offline immediately. */ |
| @@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 799 | */ | 736 | */ |
| 800 | stsch(sch->schid, &sch->schib); | 737 | stsch(sch->schid, &sch->schib); |
| 801 | 738 | ||
| 802 | if (sch->schib.scsw.actl != 0 || | 739 | if (scsw_actl(&sch->schib.scsw) != 0 || |
| 803 | (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || | 740 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || |
| 804 | (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { | 741 | (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { |
| 805 | /* | 742 | /* |
| 806 | * No final status yet or final status not yet delivered | 743 | * No final status yet or final status not yet delivered |
| 807 | * to the device driver. Can't do path verfication now, | 744 | * to the device driver. Can't do path verfication now, |
| @@ -823,13 +760,13 @@ static void | |||
| 823 | ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | 760 | ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) |
| 824 | { | 761 | { |
| 825 | struct irb *irb; | 762 | struct irb *irb; |
| 763 | int is_cmd; | ||
| 826 | 764 | ||
| 827 | irb = (struct irb *) __LC_IRB; | 765 | irb = (struct irb *) __LC_IRB; |
| 766 | is_cmd = !scsw_is_tm(&irb->scsw); | ||
| 828 | /* Check for unsolicited interrupt. */ | 767 | /* Check for unsolicited interrupt. */ |
| 829 | if ((irb->scsw.stctl == | 768 | if (!scsw_is_solicited(&irb->scsw)) { |
| 830 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) | 769 | if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
| 831 | && (!irb->scsw.cc)) { | ||
| 832 | if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | ||
| 833 | !irb->esw.esw0.erw.cons) { | 770 | !irb->esw.esw0.erw.cons) { |
| 834 | /* Unit check but no sense data. Need basic sense. */ | 771 | /* Unit check but no sense data. Need basic sense. */ |
| 835 | if (ccw_device_do_sense(cdev, irb) != 0) | 772 | if (ccw_device_do_sense(cdev, irb) != 0) |
| @@ -848,7 +785,7 @@ call_handler_unsol: | |||
| 848 | } | 785 | } |
| 849 | /* Accumulate status and find out if a basic sense is needed. */ | 786 | /* Accumulate status and find out if a basic sense is needed. */ |
| 850 | ccw_device_accumulate_irb(cdev, irb); | 787 | ccw_device_accumulate_irb(cdev, irb); |
| 851 | if (cdev->private->flags.dosense) { | 788 | if (is_cmd && cdev->private->flags.dosense) { |
| 852 | if (ccw_device_do_sense(cdev, irb) == 0) { | 789 | if (ccw_device_do_sense(cdev, irb) == 0) { |
| 853 | cdev->private->state = DEV_STATE_W4SENSE; | 790 | cdev->private->state = DEV_STATE_W4SENSE; |
| 854 | } | 791 | } |
| @@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 892 | 829 | ||
| 893 | irb = (struct irb *) __LC_IRB; | 830 | irb = (struct irb *) __LC_IRB; |
| 894 | /* Check for unsolicited interrupt. */ | 831 | /* Check for unsolicited interrupt. */ |
| 895 | if (irb->scsw.stctl == | 832 | if (scsw_stctl(&irb->scsw) == |
| 896 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 833 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
| 897 | if (irb->scsw.cc == 1) | 834 | if (scsw_cc(&irb->scsw) == 1) |
| 898 | /* Basic sense hasn't started. Try again. */ | 835 | /* Basic sense hasn't started. Try again. */ |
| 899 | ccw_device_do_sense(cdev, irb); | 836 | ccw_device_do_sense(cdev, irb); |
| 900 | else { | 837 | else { |
| @@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 912 | * only deliver the halt/clear interrupt to the device driver as if it | 849 | * only deliver the halt/clear interrupt to the device driver as if it |
| 913 | * had killed the original request. | 850 | * had killed the original request. |
| 914 | */ | 851 | */ |
| 915 | if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { | 852 | if (scsw_fctl(&irb->scsw) & |
| 853 | (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { | ||
| 916 | /* Retry Basic Sense if requested. */ | 854 | /* Retry Basic Sense if requested. */ |
| 917 | if (cdev->private->flags.intretry) { | 855 | if (cdev->private->flags.intretry) { |
| 918 | cdev->private->flags.intretry = 0; | 856 | cdev->private->flags.intretry = 0; |
| @@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 986 | ERR_PTR(-EIO)); | 924 | ERR_PTR(-EIO)); |
| 987 | } | 925 | } |
| 988 | 926 | ||
| 989 | void device_kill_io(struct subchannel *sch) | 927 | void ccw_device_kill_io(struct ccw_device *cdev) |
| 990 | { | 928 | { |
| 991 | int ret; | 929 | int ret; |
| 992 | struct ccw_device *cdev; | ||
| 993 | 930 | ||
| 994 | cdev = sch_get_cdev(sch); | ||
| 995 | ret = ccw_device_cancel_halt_clear(cdev); | 931 | ret = ccw_device_cancel_halt_clear(cdev); |
| 996 | if (ret == -EBUSY) { | 932 | if (ret == -EBUSY) { |
| 997 | ccw_device_set_timeout(cdev, 3*HZ); | 933 | ccw_device_set_timeout(cdev, 3*HZ); |
| @@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 1021 | case DEV_EVENT_INTERRUPT: | 957 | case DEV_EVENT_INTERRUPT: |
| 1022 | irb = (struct irb *) __LC_IRB; | 958 | irb = (struct irb *) __LC_IRB; |
| 1023 | /* Check for unsolicited interrupt. */ | 959 | /* Check for unsolicited interrupt. */ |
| 1024 | if ((irb->scsw.stctl == | 960 | if ((scsw_stctl(&irb->scsw) == |
| 1025 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && | 961 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && |
| 1026 | (!irb->scsw.cc)) | 962 | (!scsw_cc(&irb->scsw))) |
| 1027 | /* FIXME: we should restart stlck here, but this | 963 | /* FIXME: we should restart stlck here, but this |
| 1028 | * is extremely unlikely ... */ | 964 | * is extremely unlikely ... */ |
| 1029 | goto out_wakeup; | 965 | goto out_wakeup; |
| @@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 1055 | ccw_device_sense_id_start(cdev); | 991 | ccw_device_sense_id_start(cdev); |
| 1056 | } | 992 | } |
| 1057 | 993 | ||
| 1058 | void | 994 | void ccw_device_trigger_reprobe(struct ccw_device *cdev) |
| 1059 | device_trigger_reprobe(struct subchannel *sch) | ||
| 1060 | { | 995 | { |
| 1061 | struct ccw_device *cdev; | 996 | struct subchannel *sch; |
| 1062 | 997 | ||
| 1063 | cdev = sch_get_cdev(sch); | ||
| 1064 | if (!cdev) | ||
| 1065 | return; | ||
| 1066 | if (cdev->private->state != DEV_STATE_DISCONNECTED) | 998 | if (cdev->private->state != DEV_STATE_DISCONNECTED) |
| 1067 | return; | 999 | return; |
| 1068 | 1000 | ||
| 1001 | sch = to_subchannel(cdev->dev.parent); | ||
| 1069 | /* Update some values. */ | 1002 | /* Update some values. */ |
| 1070 | if (stsch(sch->schid, &sch->schib)) | 1003 | if (stsch(sch->schid, &sch->schib)) |
| 1071 | return; | 1004 | return; |
| @@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch) | |||
| 1081 | sch->schib.pmcw.ena = 0; | 1014 | sch->schib.pmcw.ena = 0; |
| 1082 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 1015 | if ((sch->lpm & (sch->lpm - 1)) != 0) |
| 1083 | sch->schib.pmcw.mp = 1; | 1016 | sch->schib.pmcw.mp = 1; |
| 1084 | sch->schib.pmcw.intparm = (u32)(addr_t)sch; | ||
| 1085 | /* We should also udate ssd info, but this has to wait. */ | 1017 | /* We should also udate ssd info, but this has to wait. */ |
| 1086 | /* Check if this is another device which appeared on the same sch. */ | 1018 | /* Check if this is another device which appeared on the same sch. */ |
| 1087 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1019 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index cba7020517ed..1bdaa614e34f 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c | |||
| @@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
| 196 | irb = &cdev->private->irb; | 196 | irb = &cdev->private->irb; |
| 197 | 197 | ||
| 198 | /* Check the error cases. */ | 198 | /* Check the error cases. */ |
| 199 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 199 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
| 200 | /* Retry Sense ID if requested. */ | 200 | /* Retry Sense ID if requested. */ |
| 201 | if (cdev->private->flags.intretry) { | 201 | if (cdev->private->flags.intretry) { |
| 202 | cdev->private->flags.intretry = 0; | 202 | cdev->private->flags.intretry = 0; |
| @@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
| 234 | irb->ecw[6], irb->ecw[7]); | 234 | irb->ecw[6], irb->ecw[7]); |
| 235 | return -EAGAIN; | 235 | return -EAGAIN; |
| 236 | } | 236 | } |
| 237 | if (irb->scsw.cc == 3) { | 237 | if (irb->scsw.cmd.cc == 3) { |
| 238 | u8 lpm; | 238 | u8 lpm; |
| 239 | 239 | ||
| 240 | lpm = to_io_private(sch)->orb.lpm; | 240 | lpm = to_io_private(sch)->orb.cmd.lpm; |
| 241 | if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) | 241 | if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) |
| 242 | CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " | 242 | CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " |
| 243 | "on subchannel 0.%x.%04x is " | 243 | "on subchannel 0.%x.%04x is " |
| @@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | /* Did we get a proper answer ? */ | 250 | /* Did we get a proper answer ? */ |
| 251 | if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && | 251 | if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && |
| 252 | cdev->private->senseid.reserved == 0xFF) { | 252 | cdev->private->senseid.reserved == 0xFF) { |
| 253 | if (irb->scsw.count < sizeof(struct senseid) - 8) | 253 | if (irb->scsw.cmd.count < sizeof(struct senseid) - 8) |
| 254 | cdev->private->flags.esid = 1; | 254 | cdev->private->flags.esid = 1; |
| 255 | return 0; /* Success */ | 255 | return 0; /* Success */ |
| 256 | } | 256 | } |
| @@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
| 260 | "subchannel 0.%x.%04x returns status %02X%02X\n", | 260 | "subchannel 0.%x.%04x returns status %02X%02X\n", |
| 261 | cdev->private->dev_id.devno, sch->schid.ssid, | 261 | cdev->private->dev_id.devno, sch->schid.ssid, |
| 262 | sch->schid.sch_no, | 262 | sch->schid.sch_no, |
| 263 | irb->scsw.dstat, irb->scsw.cstat); | 263 | irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); |
| 264 | return -EAGAIN; | 264 | return -EAGAIN; |
| 265 | } | 265 | } |
| 266 | 266 | ||
| @@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 277 | sch = to_subchannel(cdev->dev.parent); | 277 | sch = to_subchannel(cdev->dev.parent); |
| 278 | irb = (struct irb *) __LC_IRB; | 278 | irb = (struct irb *) __LC_IRB; |
| 279 | /* Retry sense id, if needed. */ | 279 | /* Retry sense id, if needed. */ |
| 280 | if (irb->scsw.stctl == | 280 | if (irb->scsw.cmd.stctl == |
| 281 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 281 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
| 282 | if ((irb->scsw.cc == 1) || !irb->scsw.actl) { | 282 | if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) { |
| 283 | ret = __ccw_device_sense_id_start(cdev); | 283 | ret = __ccw_device_sense_id_start(cdev); |
| 284 | if (ret && ret != -EBUSY) | 284 | if (ret && ret != -EBUSY) |
| 285 | ccw_device_sense_id_done(cdev, ret); | 285 | ccw_device_sense_id_done(cdev, ret); |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index f308ad55a6d5..ee1a28310fbb 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <asm/ccwdev.h> | 17 | #include <asm/ccwdev.h> |
| 18 | #include <asm/idals.h> | 18 | #include <asm/idals.h> |
| 19 | #include <asm/chpid.h> | 19 | #include <asm/chpid.h> |
| 20 | #include <asm/fcx.h> | ||
| 20 | 21 | ||
| 21 | #include "cio.h" | 22 | #include "cio.h" |
| 22 | #include "cio_debug.h" | 23 | #include "cio_debug.h" |
| @@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
| 179 | return -EBUSY; | 180 | return -EBUSY; |
| 180 | } | 181 | } |
| 181 | if (cdev->private->state != DEV_STATE_ONLINE || | 182 | if (cdev->private->state != DEV_STATE_ONLINE || |
| 182 | ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && | 183 | ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && |
| 183 | !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || | 184 | !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) || |
| 184 | cdev->private->flags.doverify) | 185 | cdev->private->flags.doverify) |
| 185 | return -EBUSY; | 186 | return -EBUSY; |
| 186 | ret = cio_set_options (sch, flags); | 187 | ret = cio_set_options (sch, flags); |
| @@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev) | |||
| 379 | if (cdev->private->state == DEV_STATE_NOT_OPER) | 380 | if (cdev->private->state == DEV_STATE_NOT_OPER) |
| 380 | return -ENODEV; | 381 | return -ENODEV; |
| 381 | if (cdev->private->state != DEV_STATE_ONLINE || | 382 | if (cdev->private->state != DEV_STATE_ONLINE || |
| 382 | !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) | 383 | !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) |
| 383 | return -EINVAL; | 384 | return -EINVAL; |
| 384 | return cio_resume(sch); | 385 | return cio_resume(sch); |
| 385 | } | 386 | } |
| @@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev) | |||
| 404 | * - fast notification was requested (primary status) | 405 | * - fast notification was requested (primary status) |
| 405 | * - unsolicited interrupts | 406 | * - unsolicited interrupts |
| 406 | */ | 407 | */ |
| 407 | stctl = cdev->private->irb.scsw.stctl; | 408 | stctl = scsw_stctl(&cdev->private->irb.scsw); |
| 408 | ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || | 409 | ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || |
| 409 | (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || | 410 | (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || |
| 410 | (stctl == SCSW_STCTL_STATUS_PEND); | 411 | (stctl == SCSW_STCTL_STATUS_PEND); |
| @@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev) | |||
| 528 | cio_disable_subchannel(sch); //FIXME: return code? | 529 | cio_disable_subchannel(sch); //FIXME: return code? |
| 529 | goto out_unlock; | 530 | goto out_unlock; |
| 530 | } | 531 | } |
| 531 | cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; | 532 | cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; |
| 532 | spin_unlock_irqrestore(sch->lock, flags); | 533 | spin_unlock_irqrestore(sch->lock, flags); |
| 533 | wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); | 534 | wait_event(cdev->private->wait_q, |
| 535 | cdev->private->irb.scsw.cmd.actl == 0); | ||
| 534 | spin_lock_irqsave(sch->lock, flags); | 536 | spin_lock_irqsave(sch->lock, flags); |
| 535 | cio_disable_subchannel(sch); //FIXME: return code? | 537 | cio_disable_subchannel(sch); //FIXME: return code? |
| 536 | if ((cdev->private->irb.scsw.dstat != | 538 | if ((cdev->private->irb.scsw.cmd.dstat != |
| 537 | (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || | 539 | (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || |
| 538 | (cdev->private->irb.scsw.cstat != 0)) | 540 | (cdev->private->irb.scsw.cmd.cstat != 0)) |
| 539 | ret = -EIO; | 541 | ret = -EIO; |
| 540 | /* Clear irb. */ | 542 | /* Clear irb. */ |
| 541 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 543 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
| @@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) | |||
| 568 | } | 570 | } |
| 569 | EXPORT_SYMBOL(ccw_device_get_id); | 571 | EXPORT_SYMBOL(ccw_device_get_id); |
| 570 | 572 | ||
| 573 | /** | ||
| 574 | * ccw_device_tm_start_key - perform start function | ||
| 575 | * @cdev: ccw device on which to perform the start function | ||
| 576 | * @tcw: transport-command word to be started | ||
| 577 | * @intparm: user defined parameter to be passed to the interrupt handler | ||
| 578 | * @lpm: mask of paths to use | ||
| 579 | * @key: storage key to use for storage access | ||
| 580 | * | ||
| 581 | * Start the tcw on the given ccw device. Return zero on success, non-zero | ||
| 582 | * otherwise. | ||
| 583 | */ | ||
| 584 | int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, | ||
| 585 | unsigned long intparm, u8 lpm, u8 key) | ||
| 586 | { | ||
| 587 | struct subchannel *sch; | ||
| 588 | int rc; | ||
| 589 | |||
| 590 | sch = to_subchannel(cdev->dev.parent); | ||
| 591 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
| 592 | return -EIO; | ||
| 593 | /* Adjust requested path mask to excluded varied off paths. */ | ||
| 594 | if (lpm) { | ||
| 595 | lpm &= sch->opm; | ||
| 596 | if (lpm == 0) | ||
| 597 | return -EACCES; | ||
| 598 | } | ||
| 599 | rc = cio_tm_start_key(sch, tcw, lpm, key); | ||
| 600 | if (rc == 0) | ||
| 601 | cdev->private->intparm = intparm; | ||
| 602 | return rc; | ||
| 603 | } | ||
| 604 | EXPORT_SYMBOL(ccw_device_tm_start_key); | ||
| 605 | |||
| 606 | /** | ||
| 607 | * ccw_device_tm_start_timeout_key - perform start function | ||
| 608 | * @cdev: ccw device on which to perform the start function | ||
| 609 | * @tcw: transport-command word to be started | ||
| 610 | * @intparm: user defined parameter to be passed to the interrupt handler | ||
| 611 | * @lpm: mask of paths to use | ||
| 612 | * @key: storage key to use for storage access | ||
| 613 | * @expires: time span in jiffies after which to abort request | ||
| 614 | * | ||
| 615 | * Start the tcw on the given ccw device. Return zero on success, non-zero | ||
| 616 | * otherwise. | ||
| 617 | */ | ||
| 618 | int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, | ||
| 619 | unsigned long intparm, u8 lpm, u8 key, | ||
| 620 | int expires) | ||
| 621 | { | ||
| 622 | int ret; | ||
| 623 | |||
| 624 | ccw_device_set_timeout(cdev, expires); | ||
| 625 | ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); | ||
| 626 | if (ret != 0) | ||
| 627 | ccw_device_set_timeout(cdev, 0); | ||
| 628 | return ret; | ||
| 629 | } | ||
| 630 | EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); | ||
| 631 | |||
| 632 | /** | ||
| 633 | * ccw_device_tm_start - perform start function | ||
| 634 | * @cdev: ccw device on which to perform the start function | ||
| 635 | * @tcw: transport-command word to be started | ||
| 636 | * @intparm: user defined parameter to be passed to the interrupt handler | ||
| 637 | * @lpm: mask of paths to use | ||
| 638 | * | ||
| 639 | * Start the tcw on the given ccw device. Return zero on success, non-zero | ||
| 640 | * otherwise. | ||
| 641 | */ | ||
| 642 | int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw, | ||
| 643 | unsigned long intparm, u8 lpm) | ||
| 644 | { | ||
| 645 | return ccw_device_tm_start_key(cdev, tcw, intparm, lpm, | ||
| 646 | PAGE_DEFAULT_KEY); | ||
| 647 | } | ||
| 648 | EXPORT_SYMBOL(ccw_device_tm_start); | ||
| 649 | |||
| 650 | /** | ||
| 651 | * ccw_device_tm_start_timeout - perform start function | ||
| 652 | * @cdev: ccw device on which to perform the start function | ||
| 653 | * @tcw: transport-command word to be started | ||
| 654 | * @intparm: user defined parameter to be passed to the interrupt handler | ||
| 655 | * @lpm: mask of paths to use | ||
| 656 | * @expires: time span in jiffies after which to abort request | ||
| 657 | * | ||
| 658 | * Start the tcw on the given ccw device. Return zero on success, non-zero | ||
| 659 | * otherwise. | ||
| 660 | */ | ||
| 661 | int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw, | ||
| 662 | unsigned long intparm, u8 lpm, int expires) | ||
| 663 | { | ||
| 664 | return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, | ||
| 665 | PAGE_DEFAULT_KEY, expires); | ||
| 666 | } | ||
| 667 | EXPORT_SYMBOL(ccw_device_tm_start_timeout); | ||
| 668 | |||
| 669 | /** | ||
| 670 | * ccw_device_tm_intrg - perform interrogate function | ||
| 671 | * @cdev: ccw device on which to perform the interrogate function | ||
| 672 | * | ||
| 673 | * Perform an interrogate function on the given ccw device. Return zero on | ||
| 674 | * success, non-zero otherwise. | ||
| 675 | */ | ||
| 676 | int ccw_device_tm_intrg(struct ccw_device *cdev) | ||
| 677 | { | ||
| 678 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
| 679 | |||
| 680 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
| 681 | return -EIO; | ||
| 682 | if (!scsw_is_tm(&sch->schib.scsw) || | ||
| 683 | !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND)) | ||
| 684 | return -EINVAL; | ||
| 685 | return cio_tm_intrg(sch); | ||
| 686 | } | ||
| 687 | EXPORT_SYMBOL(ccw_device_tm_intrg); | ||
| 688 | |||
| 571 | // FIXME: these have to go: | 689 | // FIXME: these have to go: |
| 572 | 690 | ||
| 573 | int | 691 | int |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 5cf7be008e98..86bc94eb607f 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
| @@ -28,13 +28,13 @@ | |||
| 28 | * Helper function called from interrupt context to decide whether an | 28 | * Helper function called from interrupt context to decide whether an |
| 29 | * operation should be tried again. | 29 | * operation should be tried again. |
| 30 | */ | 30 | */ |
| 31 | static int __ccw_device_should_retry(struct scsw *scsw) | 31 | static int __ccw_device_should_retry(union scsw *scsw) |
| 32 | { | 32 | { |
| 33 | /* CC is only valid if start function bit is set. */ | 33 | /* CC is only valid if start function bit is set. */ |
| 34 | if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) | 34 | if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1) |
| 35 | return 1; | 35 | return 1; |
| 36 | /* No more activity. For sense and set PGID we stubbornly try again. */ | 36 | /* No more activity. For sense and set PGID we stubbornly try again. */ |
| 37 | if (!scsw->actl) | 37 | if (!scsw->cmd.actl) |
| 38 | return 1; | 38 | return 1; |
| 39 | return 0; | 39 | return 0; |
| 40 | } | 40 | } |
| @@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) | |||
| 125 | 125 | ||
| 126 | sch = to_subchannel(cdev->dev.parent); | 126 | sch = to_subchannel(cdev->dev.parent); |
| 127 | irb = &cdev->private->irb; | 127 | irb = &cdev->private->irb; |
| 128 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 128 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
| 129 | /* Retry Sense PGID if requested. */ | 129 | /* Retry Sense PGID if requested. */ |
| 130 | if (cdev->private->flags.intretry) { | 130 | if (cdev->private->flags.intretry) { |
| 131 | cdev->private->flags.intretry = 0; | 131 | cdev->private->flags.intretry = 0; |
| @@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) | |||
| 155 | irb->ecw[6], irb->ecw[7]); | 155 | irb->ecw[6], irb->ecw[7]); |
| 156 | return -EAGAIN; | 156 | return -EAGAIN; |
| 157 | } | 157 | } |
| 158 | if (irb->scsw.cc == 3) { | 158 | if (irb->scsw.cmd.cc == 3) { |
| 159 | u8 lpm; | 159 | u8 lpm; |
| 160 | 160 | ||
| 161 | lpm = to_io_private(sch)->orb.lpm; | 161 | lpm = to_io_private(sch)->orb.cmd.lpm; |
| 162 | CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," | 162 | CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," |
| 163 | " lpm %02X, became 'not operational'\n", | 163 | " lpm %02X, became 'not operational'\n", |
| 164 | cdev->private->dev_id.devno, sch->schid.ssid, | 164 | cdev->private->dev_id.devno, sch->schid.ssid, |
| @@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 188 | 188 | ||
| 189 | irb = (struct irb *) __LC_IRB; | 189 | irb = (struct irb *) __LC_IRB; |
| 190 | 190 | ||
| 191 | if (irb->scsw.stctl == | 191 | if (irb->scsw.cmd.stctl == |
| 192 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 192 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
| 193 | if (__ccw_device_should_retry(&irb->scsw)) { | 193 | if (__ccw_device_should_retry(&irb->scsw)) { |
| 194 | ret = __ccw_device_sense_pgid_start(cdev); | 194 | ret = __ccw_device_sense_pgid_start(cdev); |
| @@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) | |||
| 331 | 331 | ||
| 332 | sch = to_subchannel(cdev->dev.parent); | 332 | sch = to_subchannel(cdev->dev.parent); |
| 333 | irb = &cdev->private->irb; | 333 | irb = &cdev->private->irb; |
| 334 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 334 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
| 335 | /* Retry Set PGID if requested. */ | 335 | /* Retry Set PGID if requested. */ |
| 336 | if (cdev->private->flags.intretry) { | 336 | if (cdev->private->flags.intretry) { |
| 337 | cdev->private->flags.intretry = 0; | 337 | cdev->private->flags.intretry = 0; |
| @@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) | |||
| 355 | irb->ecw[6], irb->ecw[7]); | 355 | irb->ecw[6], irb->ecw[7]); |
| 356 | return -EAGAIN; | 356 | return -EAGAIN; |
| 357 | } | 357 | } |
| 358 | if (irb->scsw.cc == 3) { | 358 | if (irb->scsw.cmd.cc == 3) { |
| 359 | CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," | 359 | CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," |
| 360 | " lpm %02X, became 'not operational'\n", | 360 | " lpm %02X, became 'not operational'\n", |
| 361 | cdev->private->dev_id.devno, sch->schid.ssid, | 361 | cdev->private->dev_id.devno, sch->schid.ssid, |
| @@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) | |||
| 376 | 376 | ||
| 377 | sch = to_subchannel(cdev->dev.parent); | 377 | sch = to_subchannel(cdev->dev.parent); |
| 378 | irb = &cdev->private->irb; | 378 | irb = &cdev->private->irb; |
| 379 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { | 379 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { |
| 380 | /* Retry NOP if requested. */ | 380 | /* Retry NOP if requested. */ |
| 381 | if (cdev->private->flags.intretry) { | 381 | if (cdev->private->flags.intretry) { |
| 382 | cdev->private->flags.intretry = 0; | 382 | cdev->private->flags.intretry = 0; |
| @@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) | |||
| 384 | } | 384 | } |
| 385 | return -ETIME; | 385 | return -ETIME; |
| 386 | } | 386 | } |
| 387 | if (irb->scsw.cc == 3) { | 387 | if (irb->scsw.cmd.cc == 3) { |
| 388 | CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," | 388 | CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," |
| 389 | " lpm %02X, became 'not operational'\n", | 389 | " lpm %02X, became 'not operational'\n", |
| 390 | cdev->private->dev_id.devno, sch->schid.ssid, | 390 | cdev->private->dev_id.devno, sch->schid.ssid, |
| @@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 438 | 438 | ||
| 439 | irb = (struct irb *) __LC_IRB; | 439 | irb = (struct irb *) __LC_IRB; |
| 440 | 440 | ||
| 441 | if (irb->scsw.stctl == | 441 | if (irb->scsw.cmd.stctl == |
| 442 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 442 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
| 443 | if (__ccw_device_should_retry(&irb->scsw)) | 443 | if (__ccw_device_should_retry(&irb->scsw)) |
| 444 | __ccw_device_verify_start(cdev); | 444 | __ccw_device_verify_start(cdev); |
| @@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 544 | 544 | ||
| 545 | irb = (struct irb *) __LC_IRB; | 545 | irb = (struct irb *) __LC_IRB; |
| 546 | 546 | ||
| 547 | if (irb->scsw.stctl == | 547 | if (irb->scsw.cmd.stctl == |
| 548 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 548 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
| 549 | if (__ccw_device_should_retry(&irb->scsw)) | 549 | if (__ccw_device_should_retry(&irb->scsw)) |
| 550 | __ccw_device_disband_start(cdev); | 550 | __ccw_device_disband_start(cdev); |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 4a38993000f2..1b03c5423be2 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
| @@ -29,9 +29,11 @@ | |||
| 29 | static void | 29 | static void |
| 30 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | 30 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) |
| 31 | { | 31 | { |
| 32 | if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | | 32 | char dbf_text[15]; |
| 33 | SCHN_STAT_CHN_CTRL_CHK | | 33 | |
| 34 | SCHN_STAT_INTF_CTRL_CHK))) | 34 | if (!scsw_is_valid_cstat(&irb->scsw) || |
| 35 | !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK | | ||
| 36 | SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK))) | ||
| 35 | return; | 37 | return; |
| 36 | CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " | 38 | CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " |
| 37 | "received" | 39 | "received" |
| @@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | |||
| 39 | ": %02X sch_stat : %02X\n", | 41 | ": %02X sch_stat : %02X\n", |
| 40 | cdev->private->dev_id.devno, cdev->private->schid.ssid, | 42 | cdev->private->dev_id.devno, cdev->private->schid.ssid, |
| 41 | cdev->private->schid.sch_no, | 43 | cdev->private->schid.sch_no, |
| 42 | irb->scsw.dstat, irb->scsw.cstat); | 44 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); |
| 43 | 45 | sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); | |
| 44 | if (irb->scsw.cc != 3) { | 46 | CIO_TRACE_EVENT(0, dbf_text); |
| 45 | char dbf_text[15]; | 47 | CIO_HEX_EVENT(0, irb, sizeof(struct irb)); |
| 46 | |||
| 47 | sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); | ||
| 48 | CIO_TRACE_EVENT(0, dbf_text); | ||
| 49 | CIO_HEX_EVENT(0, irb, sizeof (struct irb)); | ||
| 50 | } | ||
| 51 | } | 48 | } |
| 52 | 49 | ||
| 53 | /* | 50 | /* |
| @@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | |||
| 81 | * are condition that have to be met for the extended control | 78 | * are condition that have to be met for the extended control |
| 82 | * bit to have meaning. Sick. | 79 | * bit to have meaning. Sick. |
| 83 | */ | 80 | */ |
| 84 | cdev->private->irb.scsw.ectl = 0; | 81 | cdev->private->irb.scsw.cmd.ectl = 0; |
| 85 | if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && | 82 | if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) && |
| 86 | !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) | 83 | !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS)) |
| 87 | cdev->private->irb.scsw.ectl = irb->scsw.ectl; | 84 | cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; |
| 88 | /* Check if extended control word is valid. */ | 85 | /* Check if extended control word is valid. */ |
| 89 | if (!cdev->private->irb.scsw.ectl) | 86 | if (!cdev->private->irb.scsw.cmd.ectl) |
| 90 | return; | 87 | return; |
| 91 | /* Copy concurrent sense / model dependent information. */ | 88 | /* Copy concurrent sense / model dependent information. */ |
| 92 | memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); | 89 | memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); |
| @@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | |||
| 98 | static int | 95 | static int |
| 99 | ccw_device_accumulate_esw_valid(struct irb *irb) | 96 | ccw_device_accumulate_esw_valid(struct irb *irb) |
| 100 | { | 97 | { |
| 101 | if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) | 98 | if (!irb->scsw.cmd.eswf && |
| 99 | (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND)) | ||
| 102 | return 0; | 100 | return 0; |
| 103 | if (irb->scsw.stctl == | 101 | if (irb->scsw.cmd.stctl == |
| 104 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && | 102 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && |
| 105 | !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) | 103 | !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) |
| 106 | return 0; | 104 | return 0; |
| 107 | return 1; | 105 | return 1; |
| 108 | } | 106 | } |
| @@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | |||
| 125 | cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; | 123 | cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; |
| 126 | 124 | ||
| 127 | /* Copy subchannel logout information if esw is of format 0. */ | 125 | /* Copy subchannel logout information if esw is of format 0. */ |
| 128 | if (irb->scsw.eswf) { | 126 | if (irb->scsw.cmd.eswf) { |
| 129 | cdev_sublog = &cdev_irb->esw.esw0.sublog; | 127 | cdev_sublog = &cdev_irb->esw.esw0.sublog; |
| 130 | sublog = &irb->esw.esw0.sublog; | 128 | sublog = &irb->esw.esw0.sublog; |
| 131 | /* Copy extended status flags. */ | 129 | /* Copy extended status flags. */ |
| @@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | |||
| 134 | * Copy fields that have a meaning for channel data check | 132 | * Copy fields that have a meaning for channel data check |
| 135 | * channel control check and interface control check. | 133 | * channel control check and interface control check. |
| 136 | */ | 134 | */ |
| 137 | if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | | 135 | if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK | |
| 138 | SCHN_STAT_CHN_CTRL_CHK | | 136 | SCHN_STAT_CHN_CTRL_CHK | |
| 139 | SCHN_STAT_INTF_CTRL_CHK)) { | 137 | SCHN_STAT_INTF_CTRL_CHK)) { |
| 140 | /* Copy ancillary report bit. */ | 138 | /* Copy ancillary report bit. */ |
| @@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | |||
| 155 | /* Copy i/o-error alert. */ | 153 | /* Copy i/o-error alert. */ |
| 156 | cdev_sublog->ioerr = sublog->ioerr; | 154 | cdev_sublog->ioerr = sublog->ioerr; |
| 157 | /* Copy channel path timeout bit. */ | 155 | /* Copy channel path timeout bit. */ |
| 158 | if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) | 156 | if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK) |
| 159 | cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; | 157 | cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; |
| 160 | /* Copy failing storage address validity flag. */ | 158 | /* Copy failing storage address validity flag. */ |
| 161 | cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; | 159 | cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; |
| @@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) | |||
| 200 | * If not, the remaining bit have no meaning and we must ignore them. | 198 | * If not, the remaining bit have no meaning and we must ignore them. |
| 201 | * The esw is not meaningful as well... | 199 | * The esw is not meaningful as well... |
| 202 | */ | 200 | */ |
| 203 | if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) | 201 | if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) |
| 204 | return; | 202 | return; |
| 205 | 203 | ||
| 206 | /* Check for channel checks and interface control checks. */ | 204 | /* Check for channel checks and interface control checks. */ |
| 207 | ccw_device_msg_control_check(cdev, irb); | 205 | ccw_device_msg_control_check(cdev, irb); |
| 208 | 206 | ||
| 209 | /* Check for path not operational. */ | 207 | /* Check for path not operational. */ |
| 210 | if (irb->scsw.pno && irb->scsw.fctl != 0 && | 208 | if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) |
| 211 | (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || | ||
| 212 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) | ||
| 213 | ccw_device_path_notoper(cdev); | 209 | ccw_device_path_notoper(cdev); |
| 214 | 210 | /* No irb accumulation for transport mode irbs. */ | |
| 211 | if (scsw_is_tm(&irb->scsw)) { | ||
| 212 | memcpy(&cdev->private->irb, irb, sizeof(struct irb)); | ||
| 213 | return; | ||
| 214 | } | ||
| 215 | /* | 215 | /* |
| 216 | * Don't accumulate unsolicited interrupts. | 216 | * Don't accumulate unsolicited interrupts. |
| 217 | */ | 217 | */ |
| 218 | if ((irb->scsw.stctl == | 218 | if (!scsw_is_solicited(&irb->scsw)) |
| 219 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && | ||
| 220 | (!irb->scsw.cc)) | ||
| 221 | return; | 219 | return; |
| 222 | 220 | ||
| 223 | cdev_irb = &cdev->private->irb; | 221 | cdev_irb = &cdev->private->irb; |
| @@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) | |||
| 227 | * status at the subchannel has been cleared and we must not pass | 225 | * status at the subchannel has been cleared and we must not pass |
| 228 | * intermediate accumulated status to the device driver. | 226 | * intermediate accumulated status to the device driver. |
| 229 | */ | 227 | */ |
| 230 | if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) | 228 | if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) |
| 231 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 229 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
| 232 | 230 | ||
| 233 | /* Copy bits which are valid only for the start function. */ | 231 | /* Copy bits which are valid only for the start function. */ |
| 234 | if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { | 232 | if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) { |
| 235 | /* Copy key. */ | 233 | /* Copy key. */ |
| 236 | cdev_irb->scsw.key = irb->scsw.key; | 234 | cdev_irb->scsw.cmd.key = irb->scsw.cmd.key; |
| 237 | /* Copy suspend control bit. */ | 235 | /* Copy suspend control bit. */ |
| 238 | cdev_irb->scsw.sctl = irb->scsw.sctl; | 236 | cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl; |
| 239 | /* Accumulate deferred condition code. */ | 237 | /* Accumulate deferred condition code. */ |
| 240 | cdev_irb->scsw.cc |= irb->scsw.cc; | 238 | cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc; |
| 241 | /* Copy ccw format bit. */ | 239 | /* Copy ccw format bit. */ |
| 242 | cdev_irb->scsw.fmt = irb->scsw.fmt; | 240 | cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt; |
| 243 | /* Copy prefetch bit. */ | 241 | /* Copy prefetch bit. */ |
| 244 | cdev_irb->scsw.pfch = irb->scsw.pfch; | 242 | cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch; |
| 245 | /* Copy initial-status-interruption-control. */ | 243 | /* Copy initial-status-interruption-control. */ |
| 246 | cdev_irb->scsw.isic = irb->scsw.isic; | 244 | cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic; |
| 247 | /* Copy address limit checking control. */ | 245 | /* Copy address limit checking control. */ |
| 248 | cdev_irb->scsw.alcc = irb->scsw.alcc; | 246 | cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc; |
| 249 | /* Copy suppress suspend bit. */ | 247 | /* Copy suppress suspend bit. */ |
| 250 | cdev_irb->scsw.ssi = irb->scsw.ssi; | 248 | cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi; |
| 251 | } | 249 | } |
| 252 | 250 | ||
| 253 | /* Take care of the extended control bit and extended control word. */ | 251 | /* Take care of the extended control bit and extended control word. */ |
| 254 | ccw_device_accumulate_ecw(cdev, irb); | 252 | ccw_device_accumulate_ecw(cdev, irb); |
| 255 | 253 | ||
| 256 | /* Accumulate function control. */ | 254 | /* Accumulate function control. */ |
| 257 | cdev_irb->scsw.fctl |= irb->scsw.fctl; | 255 | cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl; |
| 258 | /* Copy activity control. */ | 256 | /* Copy activity control. */ |
| 259 | cdev_irb->scsw.actl= irb->scsw.actl; | 257 | cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl; |
| 260 | /* Accumulate status control. */ | 258 | /* Accumulate status control. */ |
| 261 | cdev_irb->scsw.stctl |= irb->scsw.stctl; | 259 | cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl; |
| 262 | /* | 260 | /* |
| 263 | * Copy ccw address if it is valid. This is a bit simplified | 261 | * Copy ccw address if it is valid. This is a bit simplified |
| 264 | * but should be close enough for all practical purposes. | 262 | * but should be close enough for all practical purposes. |
| 265 | */ | 263 | */ |
| 266 | if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || | 264 | if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) || |
| 267 | ((irb->scsw.stctl == | 265 | ((irb->scsw.cmd.stctl == |
| 268 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && | 266 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && |
| 269 | (irb->scsw.actl & SCSW_ACTL_DEVACT) && | 267 | (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) && |
| 270 | (irb->scsw.actl & SCSW_ACTL_SCHACT)) || | 268 | (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) || |
| 271 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) | 269 | (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) |
| 272 | cdev_irb->scsw.cpa = irb->scsw.cpa; | 270 | cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa; |
| 273 | /* Accumulate device status, but not the device busy flag. */ | 271 | /* Accumulate device status, but not the device busy flag. */ |
| 274 | cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; | 272 | cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY; |
| 275 | /* dstat is not always valid. */ | 273 | /* dstat is not always valid. */ |
| 276 | if (irb->scsw.stctl & | 274 | if (irb->scsw.cmd.stctl & |
| 277 | (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS | 275 | (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS |
| 278 | | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) | 276 | | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) |
| 279 | cdev_irb->scsw.dstat |= irb->scsw.dstat; | 277 | cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat; |
| 280 | /* Accumulate subchannel status. */ | 278 | /* Accumulate subchannel status. */ |
| 281 | cdev_irb->scsw.cstat |= irb->scsw.cstat; | 279 | cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat; |
| 282 | /* Copy residual count if it is valid. */ | 280 | /* Copy residual count if it is valid. */ |
| 283 | if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && | 281 | if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && |
| 284 | (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) | 282 | (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) |
| 285 | cdev_irb->scsw.count = irb->scsw.count; | 283 | == 0) |
| 284 | cdev_irb->scsw.cmd.count = irb->scsw.cmd.count; | ||
| 286 | 285 | ||
| 287 | /* Take care of bits in the extended status word. */ | 286 | /* Take care of bits in the extended status word. */ |
| 288 | ccw_device_accumulate_esw(cdev, irb); | 287 | ccw_device_accumulate_esw(cdev, irb); |
| @@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) | |||
| 299 | * sense facility available/supported when enabling the | 298 | * sense facility available/supported when enabling the |
| 300 | * concurrent sense facility. | 299 | * concurrent sense facility. |
| 301 | */ | 300 | */ |
| 302 | if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | 301 | if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
| 303 | !(cdev_irb->esw.esw0.erw.cons)) | 302 | !(cdev_irb->esw.esw0.erw.cons)) |
| 304 | cdev->private->flags.dosense = 1; | 303 | cdev->private->flags.dosense = 1; |
| 305 | } | 304 | } |
| @@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) | |||
| 317 | sch = to_subchannel(cdev->dev.parent); | 316 | sch = to_subchannel(cdev->dev.parent); |
| 318 | 317 | ||
| 319 | /* A sense is required, can we do it now ? */ | 318 | /* A sense is required, can we do it now ? */ |
| 320 | if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) | 319 | if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) |
| 321 | /* | 320 | /* |
| 322 | * we received an Unit Check but we have no final | 321 | * we received an Unit Check but we have no final |
| 323 | * status yet, therefore we must delay the SENSE | 322 | * status yet, therefore we must delay the SENSE |
| @@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) | |||
| 355 | * If not, the remaining bit have no meaning and we must ignore them. | 354 | * If not, the remaining bit have no meaning and we must ignore them. |
| 356 | * The esw is not meaningful as well... | 355 | * The esw is not meaningful as well... |
| 357 | */ | 356 | */ |
| 358 | if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) | 357 | if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) |
| 359 | return; | 358 | return; |
| 360 | 359 | ||
| 361 | /* Check for channel checks and interface control checks. */ | 360 | /* Check for channel checks and interface control checks. */ |
| 362 | ccw_device_msg_control_check(cdev, irb); | 361 | ccw_device_msg_control_check(cdev, irb); |
| 363 | 362 | ||
| 364 | /* Check for path not operational. */ | 363 | /* Check for path not operational. */ |
| 365 | if (irb->scsw.pno && irb->scsw.fctl != 0 && | 364 | if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) |
| 366 | (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || | ||
| 367 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) | ||
| 368 | ccw_device_path_notoper(cdev); | 365 | ccw_device_path_notoper(cdev); |
| 369 | 366 | ||
| 370 | if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | 367 | if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && |
| 371 | (irb->scsw.dstat & DEV_STAT_CHN_END)) { | 368 | (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) { |
| 372 | cdev->private->irb.esw.esw0.erw.cons = 1; | 369 | cdev->private->irb.esw.esw0.erw.cons = 1; |
| 373 | cdev->private->flags.dosense = 0; | 370 | cdev->private->flags.dosense = 0; |
| 374 | } | 371 | } |
| @@ -386,11 +383,11 @@ int | |||
| 386 | ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) | 383 | ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) |
| 387 | { | 384 | { |
| 388 | ccw_device_accumulate_irb(cdev, irb); | 385 | ccw_device_accumulate_irb(cdev, irb); |
| 389 | if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) | 386 | if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) |
| 390 | return -EBUSY; | 387 | return -EBUSY; |
| 391 | /* Check for basic sense. */ | 388 | /* Check for basic sense. */ |
| 392 | if (cdev->private->flags.dosense && | 389 | if (cdev->private->flags.dosense && |
| 393 | !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { | 390 | !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) { |
| 394 | cdev->private->irb.esw.esw0.erw.cons = 1; | 391 | cdev->private->irb.esw.esw0.erw.cons = 1; |
| 395 | cdev->private->flags.dosense = 0; | 392 | cdev->private->flags.dosense = 0; |
| 396 | return 0; | 393 | return 0; |
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c new file mode 100644 index 000000000000..61677dfbdc9b --- /dev/null +++ b/drivers/s390/cio/fcx.c | |||
| @@ -0,0 +1,350 @@ | |||
| 1 | /* | ||
| 2 | * Functions for assembling fcx enabled I/O control blocks. | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/types.h> | ||
| 10 | #include <linux/string.h> | ||
| 11 | #include <linux/errno.h> | ||
| 12 | #include <linux/err.h> | ||
| 13 | #include <linux/module.h> | ||
| 14 | #include <asm/fcx.h> | ||
| 15 | #include "cio.h" | ||
| 16 | |||
| 17 | /** | ||
| 18 | * tcw_get_intrg - return pointer to associated interrogate tcw | ||
| 19 | * @tcw: pointer to the original tcw | ||
| 20 | * | ||
| 21 | * Return a pointer to the interrogate tcw associated with the specified tcw | ||
| 22 | * or %NULL if there is no associated interrogate tcw. | ||
| 23 | */ | ||
| 24 | struct tcw *tcw_get_intrg(struct tcw *tcw) | ||
| 25 | { | ||
| 26 | return (struct tcw *) ((addr_t) tcw->intrg); | ||
| 27 | } | ||
| 28 | EXPORT_SYMBOL(tcw_get_intrg); | ||
| 29 | |||
| 30 | /** | ||
| 31 | * tcw_get_data - return pointer to input/output data associated with tcw | ||
| 32 | * @tcw: pointer to the tcw | ||
| 33 | * | ||
| 34 | * Return the input or output data address specified in the tcw depending | ||
| 35 | * on whether the r-bit or the w-bit is set. If neither bit is set, return | ||
| 36 | * %NULL. | ||
| 37 | */ | ||
| 38 | void *tcw_get_data(struct tcw *tcw) | ||
| 39 | { | ||
| 40 | if (tcw->r) | ||
| 41 | return (void *) ((addr_t) tcw->input); | ||
| 42 | if (tcw->w) | ||
| 43 | return (void *) ((addr_t) tcw->output); | ||
| 44 | return NULL; | ||
| 45 | } | ||
| 46 | EXPORT_SYMBOL(tcw_get_data); | ||
| 47 | |||
| 48 | /** | ||
| 49 | * tcw_get_tccb - return pointer to tccb associated with tcw | ||
| 50 | * @tcw: pointer to the tcw | ||
| 51 | * | ||
| 52 | * Return pointer to the tccb associated with this tcw. | ||
| 53 | */ | ||
| 54 | struct tccb *tcw_get_tccb(struct tcw *tcw) | ||
| 55 | { | ||
| 56 | return (struct tccb *) ((addr_t) tcw->tccb); | ||
| 57 | } | ||
| 58 | EXPORT_SYMBOL(tcw_get_tccb); | ||
| 59 | |||
| 60 | /** | ||
| 61 | * tcw_get_tsb - return pointer to tsb associated with tcw | ||
| 62 | * @tcw: pointer to the tcw | ||
| 63 | * | ||
| 64 | * Return pointer to the tsb associated with this tcw. | ||
| 65 | */ | ||
| 66 | struct tsb *tcw_get_tsb(struct tcw *tcw) | ||
| 67 | { | ||
| 68 | return (struct tsb *) ((addr_t) tcw->tsb); | ||
| 69 | } | ||
| 70 | EXPORT_SYMBOL(tcw_get_tsb); | ||
| 71 | |||
| 72 | /** | ||
| 73 | * tcw_init - initialize tcw data structure | ||
| 74 | * @tcw: pointer to the tcw to be initialized | ||
| 75 | * @r: initial value of the r-bit | ||
| 76 | * @w: initial value of the w-bit | ||
| 77 | * | ||
| 78 | * Initialize all fields of the specified tcw data structure with zero and | ||
| 79 | * fill in the format, flags, r and w fields. | ||
| 80 | */ | ||
| 81 | void tcw_init(struct tcw *tcw, int r, int w) | ||
| 82 | { | ||
| 83 | memset(tcw, 0, sizeof(struct tcw)); | ||
| 84 | tcw->format = TCW_FORMAT_DEFAULT; | ||
| 85 | tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT); | ||
| 86 | if (r) | ||
| 87 | tcw->r = 1; | ||
| 88 | if (w) | ||
| 89 | tcw->w = 1; | ||
| 90 | } | ||
| 91 | EXPORT_SYMBOL(tcw_init); | ||
| 92 | |||
| 93 | static inline size_t tca_size(struct tccb *tccb) | ||
| 94 | { | ||
| 95 | return tccb->tcah.tcal - 12; | ||
| 96 | } | ||
| 97 | |||
| 98 | static u32 calc_dcw_count(struct tccb *tccb) | ||
| 99 | { | ||
| 100 | int offset; | ||
| 101 | struct dcw *dcw; | ||
| 102 | u32 count = 0; | ||
| 103 | size_t size; | ||
| 104 | |||
| 105 | size = tca_size(tccb); | ||
| 106 | for (offset = 0; offset < size;) { | ||
| 107 | dcw = (struct dcw *) &tccb->tca[offset]; | ||
| 108 | count += dcw->count; | ||
| 109 | if (!(dcw->flags & DCW_FLAGS_CC)) | ||
| 110 | break; | ||
| 111 | offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4); | ||
| 112 | } | ||
| 113 | return count; | ||
| 114 | } | ||
| 115 | |||
| 116 | static u32 calc_cbc_size(struct tidaw *tidaw, int num) | ||
| 117 | { | ||
| 118 | int i; | ||
| 119 | u32 cbc_data; | ||
| 120 | u32 cbc_count = 0; | ||
| 121 | u64 data_count = 0; | ||
| 122 | |||
| 123 | for (i = 0; i < num; i++) { | ||
| 124 | if (tidaw[i].flags & TIDAW_FLAGS_LAST) | ||
| 125 | break; | ||
| 126 | /* TODO: find out if padding applies to total of data | ||
| 127 | * transferred or data transferred by this tidaw. Assumption: | ||
| 128 | * applies to total. */ | ||
| 129 | data_count += tidaw[i].count; | ||
| 130 | if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) { | ||
| 131 | cbc_data = 4 + ALIGN(data_count, 4) - data_count; | ||
| 132 | cbc_count += cbc_data; | ||
| 133 | data_count += cbc_data; | ||
| 134 | } | ||
| 135 | } | ||
| 136 | return cbc_count; | ||
| 137 | } | ||
| 138 | |||
| 139 | /** | ||
| 140 | * tcw_finalize - finalize tcw length fields and tidaw list | ||
| 141 | * @tcw: pointer to the tcw | ||
| 142 | * @num_tidaws: the number of tidaws used to address input/output data or zero | ||
| 143 | * if no tida is used | ||
| 144 | * | ||
| 145 | * Calculate the input-/output-count and tccbl field in the tcw, add a | ||
| 146 | * tcat the tccb and terminate the data tidaw list if used. | ||
| 147 | * | ||
| 148 | * Note: in case input- or output-tida is used, the tidaw-list must be stored | ||
| 149 | * in contiguous storage (no ttic). The tcal field in the tccb must be | ||
| 150 | * up-to-date. | ||
| 151 | */ | ||
| 152 | void tcw_finalize(struct tcw *tcw, int num_tidaws) | ||
| 153 | { | ||
| 154 | struct tidaw *tidaw; | ||
| 155 | struct tccb *tccb; | ||
| 156 | struct tccb_tcat *tcat; | ||
| 157 | u32 count; | ||
| 158 | |||
| 159 | /* Terminate tidaw list. */ | ||
| 160 | tidaw = tcw_get_data(tcw); | ||
| 161 | if (num_tidaws > 0) | ||
| 162 | tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST; | ||
| 163 | /* Add tcat to tccb. */ | ||
| 164 | tccb = tcw_get_tccb(tcw); | ||
| 165 | tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; | ||
| 166 | memset(tcat, 0, sizeof(tcat)); | ||
| 167 | /* Calculate tcw input/output count and tcat transport count. */ | ||
| 168 | count = calc_dcw_count(tccb); | ||
| 169 | if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) | ||
| 170 | count += calc_cbc_size(tidaw, num_tidaws); | ||
| 171 | if (tcw->r) | ||
| 172 | tcw->input_count = count; | ||
| 173 | else if (tcw->w) | ||
| 174 | tcw->output_count = count; | ||
| 175 | tcat->count = ALIGN(count, 4) + 4; | ||
| 176 | /* Calculate tccbl. */ | ||
| 177 | tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) + | ||
| 178 | sizeof(struct tccb_tcat) - 20) >> 2; | ||
| 179 | } | ||
| 180 | EXPORT_SYMBOL(tcw_finalize); | ||
| 181 | |||
| 182 | /** | ||
| 183 | * tcw_set_intrg - set the interrogate tcw address of a tcw | ||
| 184 | * @tcw: the tcw address | ||
| 185 | * @intrg_tcw: the address of the interrogate tcw | ||
| 186 | * | ||
| 187 | * Set the address of the interrogate tcw in the specified tcw. | ||
| 188 | */ | ||
| 189 | void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw) | ||
| 190 | { | ||
| 191 | tcw->intrg = (u32) ((addr_t) intrg_tcw); | ||
| 192 | } | ||
| 193 | EXPORT_SYMBOL(tcw_set_intrg); | ||
| 194 | |||
| 195 | /** | ||
| 196 | * tcw_set_data - set data address and tida flag of a tcw | ||
| 197 | * @tcw: the tcw address | ||
| 198 | * @data: the data address | ||
| 199 | * @use_tidal: zero of the data address specifies a contiguous block of data, | ||
| 200 | * non-zero if it specifies a list if tidaws. | ||
| 201 | * | ||
| 202 | * Set the input/output data address of a tcw (depending on the value of the | ||
| 203 | * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag | ||
| 204 | * is set as well. | ||
| 205 | */ | ||
| 206 | void tcw_set_data(struct tcw *tcw, void *data, int use_tidal) | ||
| 207 | { | ||
| 208 | if (tcw->r) { | ||
| 209 | tcw->input = (u64) ((addr_t) data); | ||
| 210 | if (use_tidal) | ||
| 211 | tcw->flags |= TCW_FLAGS_INPUT_TIDA; | ||
| 212 | } else if (tcw->w) { | ||
| 213 | tcw->output = (u64) ((addr_t) data); | ||
| 214 | if (use_tidal) | ||
| 215 | tcw->flags |= TCW_FLAGS_OUTPUT_TIDA; | ||
| 216 | } | ||
| 217 | } | ||
| 218 | EXPORT_SYMBOL(tcw_set_data); | ||
| 219 | |||
| 220 | /** | ||
| 221 | * tcw_set_tccb - set tccb address of a tcw | ||
| 222 | * @tcw: the tcw address | ||
| 223 | * @tccb: the tccb address | ||
| 224 | * | ||
| 225 | * Set the address of the tccb in the specified tcw. | ||
| 226 | */ | ||
| 227 | void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb) | ||
| 228 | { | ||
| 229 | tcw->tccb = (u64) ((addr_t) tccb); | ||
| 230 | } | ||
| 231 | EXPORT_SYMBOL(tcw_set_tccb); | ||
| 232 | |||
| 233 | /** | ||
| 234 | * tcw_set_tsb - set tsb address of a tcw | ||
| 235 | * @tcw: the tcw address | ||
| 236 | * @tsb: the tsb address | ||
| 237 | * | ||
| 238 | * Set the address of the tsb in the specified tcw. | ||
| 239 | */ | ||
| 240 | void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb) | ||
| 241 | { | ||
| 242 | tcw->tsb = (u64) ((addr_t) tsb); | ||
| 243 | } | ||
| 244 | EXPORT_SYMBOL(tcw_set_tsb); | ||
| 245 | |||
| 246 | /** | ||
| 247 | * tccb_init - initialize tccb | ||
| 248 | * @tccb: the tccb address | ||
| 249 | * @size: the maximum size of the tccb | ||
| 250 | * @sac: the service-action-code to be user | ||
| 251 | * | ||
| 252 | * Initialize the header of the specified tccb by resetting all values to zero | ||
| 253 | * and filling in defaults for format, sac and initial tcal fields. | ||
| 254 | */ | ||
| 255 | void tccb_init(struct tccb *tccb, size_t size, u32 sac) | ||
| 256 | { | ||
| 257 | memset(tccb, 0, size); | ||
| 258 | tccb->tcah.format = TCCB_FORMAT_DEFAULT; | ||
| 259 | tccb->tcah.sac = sac; | ||
| 260 | tccb->tcah.tcal = 12; | ||
| 261 | } | ||
| 262 | EXPORT_SYMBOL(tccb_init); | ||
| 263 | |||
| 264 | /** | ||
| 265 | * tsb_init - initialize tsb | ||
| 266 | * @tsb: the tsb address | ||
| 267 | * | ||
| 268 | * Initialize the specified tsb by resetting all values to zero. | ||
| 269 | */ | ||
| 270 | void tsb_init(struct tsb *tsb) | ||
| 271 | { | ||
| 272 | memset(tsb, 0, sizeof(tsb)); | ||
| 273 | } | ||
| 274 | EXPORT_SYMBOL(tsb_init); | ||
| 275 | |||
| 276 | /** | ||
| 277 | * tccb_add_dcw - add a dcw to the tccb | ||
| 278 | * @tccb: the tccb address | ||
| 279 | * @tccb_size: the maximum tccb size | ||
| 280 | * @cmd: the dcw command | ||
| 281 | * @flags: flags for the dcw | ||
| 282 | * @cd: pointer to control data for this dcw or NULL if none is required | ||
| 283 | * @cd_count: number of control data bytes for this dcw | ||
| 284 | * @count: number of data bytes for this dcw | ||
| 285 | * | ||
| 286 | * Add a new dcw to the specified tccb by writing the dcw information specified | ||
| 287 | * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return | ||
| 288 | * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw | ||
| 289 | * would exceed the available space as defined by @tccb_size. | ||
| 290 | * | ||
| 291 | * Note: the tcal field of the tccb header will be updates to reflect added | ||
| 292 | * content. | ||
| 293 | */ | ||
| 294 | struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags, | ||
| 295 | void *cd, u8 cd_count, u32 count) | ||
| 296 | { | ||
| 297 | struct dcw *dcw; | ||
| 298 | int size; | ||
| 299 | int tca_offset; | ||
| 300 | |||
| 301 | /* Check for space. */ | ||
| 302 | tca_offset = tca_size(tccb); | ||
| 303 | size = ALIGN(sizeof(struct dcw) + cd_count, 4); | ||
| 304 | if (sizeof(struct tccb_tcah) + tca_offset + size + | ||
| 305 | sizeof(struct tccb_tcat) > tccb_size) | ||
| 306 | return ERR_PTR(-ENOSPC); | ||
| 307 | /* Add dcw to tca. */ | ||
| 308 | dcw = (struct dcw *) &tccb->tca[tca_offset]; | ||
| 309 | memset(dcw, 0, size); | ||
| 310 | dcw->cmd = cmd; | ||
| 311 | dcw->flags = flags; | ||
| 312 | dcw->count = count; | ||
| 313 | dcw->cd_count = cd_count; | ||
| 314 | if (cd) | ||
| 315 | memcpy(&dcw->cd[0], cd, cd_count); | ||
| 316 | tccb->tcah.tcal += size; | ||
| 317 | return dcw; | ||
| 318 | } | ||
| 319 | EXPORT_SYMBOL(tccb_add_dcw); | ||
| 320 | |||
| 321 | /** | ||
| 322 | * tcw_add_tidaw - add a tidaw to a tcw | ||
| 323 | * @tcw: the tcw address | ||
| 324 | * @num_tidaws: the current number of tidaws | ||
| 325 | * @flags: flags for the new tidaw | ||
| 326 | * @addr: address value for the new tidaw | ||
| 327 | * @count: count value for the new tidaw | ||
| 328 | * | ||
| 329 | * Add a new tidaw to the input/output data tidaw-list of the specified tcw | ||
| 330 | * (depending on the value of the r-flag and w-flag) and return a pointer to | ||
| 331 | * the new tidaw. | ||
| 332 | * | ||
| 333 | * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller | ||
| 334 | * must ensure that there is enough space for the new tidaw. The last-tidaw | ||
| 335 | * flag for the last tidaw in the list will be set by tcw_finalize. | ||
| 336 | */ | ||
| 337 | struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, | ||
| 338 | void *addr, u32 count) | ||
| 339 | { | ||
| 340 | struct tidaw *tidaw; | ||
| 341 | |||
| 342 | /* Add tidaw to tidaw-list. */ | ||
| 343 | tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws; | ||
| 344 | memset(tidaw, 0, sizeof(struct tidaw)); | ||
| 345 | tidaw->flags = flags; | ||
| 346 | tidaw->count = count; | ||
| 347 | tidaw->addr = (u64) ((addr_t) addr); | ||
| 348 | return tidaw; | ||
| 349 | } | ||
| 350 | EXPORT_SYMBOL(tcw_add_tidaw); | ||
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index 144466ab8c15..528065cb5021 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | #ifndef S390_IDSET_H | 8 | #ifndef S390_IDSET_H |
| 9 | #define S390_IDSET_H S390_IDSET_H | 9 | #define S390_IDSET_H S390_IDSET_H |
| 10 | 10 | ||
| 11 | #include "schid.h" | 11 | #include <asm/schid.h> |
| 12 | 12 | ||
| 13 | struct idset; | 13 | struct idset; |
| 14 | 14 | ||
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8c613160bfce..3f8f1cf69c76 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
| @@ -1,12 +1,12 @@ | |||
| 1 | #ifndef S390_IO_SCH_H | 1 | #ifndef S390_IO_SCH_H |
| 2 | #define S390_IO_SCH_H | 2 | #define S390_IO_SCH_H |
| 3 | 3 | ||
| 4 | #include "schid.h" | 4 | #include <asm/schid.h> |
| 5 | 5 | ||
| 6 | /* | 6 | /* |
| 7 | * operation request block | 7 | * command-mode operation request block |
| 8 | */ | 8 | */ |
| 9 | struct orb { | 9 | struct cmd_orb { |
| 10 | u32 intparm; /* interruption parameter */ | 10 | u32 intparm; /* interruption parameter */ |
| 11 | u32 key : 4; /* flags, like key, suspend control, etc. */ | 11 | u32 key : 4; /* flags, like key, suspend control, etc. */ |
| 12 | u32 spnd : 1; /* suspend control */ | 12 | u32 spnd : 1; /* suspend control */ |
| @@ -28,8 +28,36 @@ struct orb { | |||
| 28 | u32 cpa; /* channel program address */ | 28 | u32 cpa; /* channel program address */ |
| 29 | } __attribute__ ((packed, aligned(4))); | 29 | } __attribute__ ((packed, aligned(4))); |
| 30 | 30 | ||
| 31 | /* | ||
| 32 | * transport-mode operation request block | ||
| 33 | */ | ||
| 34 | struct tm_orb { | ||
| 35 | u32 intparm; | ||
| 36 | u32 key:4; | ||
| 37 | u32 :9; | ||
| 38 | u32 b:1; | ||
| 39 | u32 :2; | ||
| 40 | u32 lpm:8; | ||
| 41 | u32 :7; | ||
| 42 | u32 x:1; | ||
| 43 | u32 tcw; | ||
| 44 | u32 prio:8; | ||
| 45 | u32 :8; | ||
| 46 | u32 rsvpgm:8; | ||
| 47 | u32 :8; | ||
| 48 | u32 :32; | ||
| 49 | u32 :32; | ||
| 50 | u32 :32; | ||
| 51 | u32 :32; | ||
| 52 | } __attribute__ ((packed, aligned(4))); | ||
| 53 | |||
| 54 | union orb { | ||
| 55 | struct cmd_orb cmd; | ||
| 56 | struct tm_orb tm; | ||
| 57 | } __attribute__ ((packed, aligned(4))); | ||
| 58 | |||
| 31 | struct io_subchannel_private { | 59 | struct io_subchannel_private { |
| 32 | struct orb orb; /* operation request block */ | 60 | union orb orb; /* operation request block */ |
| 33 | struct ccw1 sense_ccw; /* static ccw for sense command */ | 61 | struct ccw1 sense_ccw; /* static ccw for sense command */ |
| 34 | } __attribute__ ((aligned(8))); | 62 | } __attribute__ ((aligned(8))); |
| 35 | 63 | ||
| @@ -95,16 +123,18 @@ struct ccw_device_private { | |||
| 95 | void *cmb_wait; /* deferred cmb enable/disable */ | 123 | void *cmb_wait; /* deferred cmb enable/disable */ |
| 96 | }; | 124 | }; |
| 97 | 125 | ||
| 98 | static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) | 126 | static inline int ssch(struct subchannel_id schid, volatile union orb *addr) |
| 99 | { | 127 | { |
| 100 | register struct subchannel_id reg1 asm("1") = schid; | 128 | register struct subchannel_id reg1 asm("1") = schid; |
| 101 | int ccode; | 129 | int ccode = -EIO; |
| 102 | 130 | ||
| 103 | asm volatile( | 131 | asm volatile( |
| 104 | " ssch 0(%2)\n" | 132 | " ssch 0(%2)\n" |
| 105 | " ipm %0\n" | 133 | "0: ipm %0\n" |
| 106 | " srl %0,28" | 134 | " srl %0,28\n" |
| 107 | : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); | 135 | "1:\n" |
| 136 | EX_TABLE(0b, 1b) | ||
| 137 | : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); | ||
| 108 | return ccode; | 138 | return ccode; |
| 109 | } | 139 | } |
| 110 | 140 | ||
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 652ea3625f9d..9fa2ac13ac85 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #define S390_CIO_IOASM_H | 2 | #define S390_CIO_IOASM_H |
| 3 | 3 | ||
| 4 | #include <asm/chpid.h> | 4 | #include <asm/chpid.h> |
| 5 | #include "schid.h" | 5 | #include <asm/schid.h> |
| 6 | 6 | ||
| 7 | /* | 7 | /* |
| 8 | * TPI info structure | 8 | * TPI info structure |
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c new file mode 100644 index 000000000000..c592087be0f1 --- /dev/null +++ b/drivers/s390/cio/isc.c | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | /* | ||
| 2 | * Functions for registration of I/O interruption subclasses on s390. | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/spinlock.h> | ||
| 9 | #include <linux/module.h> | ||
| 10 | #include <asm/isc.h> | ||
| 11 | |||
| 12 | static unsigned int isc_refs[MAX_ISC + 1]; | ||
| 13 | static DEFINE_SPINLOCK(isc_ref_lock); | ||
| 14 | |||
| 15 | |||
| 16 | /** | ||
| 17 | * isc_register - register an I/O interruption subclass. | ||
| 18 | * @isc: I/O interruption subclass to register | ||
| 19 | * | ||
| 20 | * The number of users for @isc is increased. If this is the first user to | ||
| 21 | * register @isc, the corresponding I/O interruption subclass mask is enabled. | ||
| 22 | * | ||
| 23 | * Context: | ||
| 24 | * This function must not be called in interrupt context. | ||
| 25 | */ | ||
| 26 | void isc_register(unsigned int isc) | ||
| 27 | { | ||
| 28 | if (isc > MAX_ISC) { | ||
| 29 | WARN_ON(1); | ||
| 30 | return; | ||
| 31 | } | ||
| 32 | |||
| 33 | spin_lock(&isc_ref_lock); | ||
| 34 | if (isc_refs[isc] == 0) | ||
| 35 | ctl_set_bit(6, 31 - isc); | ||
| 36 | isc_refs[isc]++; | ||
| 37 | spin_unlock(&isc_ref_lock); | ||
| 38 | } | ||
| 39 | EXPORT_SYMBOL_GPL(isc_register); | ||
| 40 | |||
| 41 | /** | ||
| 42 | * isc_unregister - unregister an I/O interruption subclass. | ||
| 43 | * @isc: I/O interruption subclass to unregister | ||
| 44 | * | ||
| 45 | * The number of users for @isc is decreased. If this is the last user to | ||
| 46 | * unregister @isc, the corresponding I/O interruption subclass mask is | ||
| 47 | * disabled. | ||
| 48 | * Note: This function must not be called if isc_register() hasn't been called | ||
| 49 | * before by the driver for @isc. | ||
| 50 | * | ||
| 51 | * Context: | ||
| 52 | * This function must not be called in interrupt context. | ||
| 53 | */ | ||
| 54 | void isc_unregister(unsigned int isc) | ||
| 55 | { | ||
| 56 | spin_lock(&isc_ref_lock); | ||
| 57 | /* check for misuse */ | ||
| 58 | if (isc > MAX_ISC || isc_refs[isc] == 0) { | ||
| 59 | WARN_ON(1); | ||
| 60 | goto out_unlock; | ||
| 61 | } | ||
| 62 | if (isc_refs[isc] == 1) | ||
| 63 | ctl_clear_bit(6, 31 - isc); | ||
| 64 | isc_refs[isc]--; | ||
| 65 | out_unlock: | ||
| 66 | spin_unlock(&isc_ref_lock); | ||
| 67 | } | ||
| 68 | EXPORT_SYMBOL_GPL(isc_unregister); | ||
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c new file mode 100644 index 000000000000..17da9ab932ed --- /dev/null +++ b/drivers/s390/cio/itcw.c | |||
| @@ -0,0 +1,327 @@ | |||
| 1 | /* | ||
| 2 | * Functions for incremental construction of fcx enabled I/O control blocks. | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/types.h> | ||
| 10 | #include <linux/string.h> | ||
| 11 | #include <linux/errno.h> | ||
| 12 | #include <linux/err.h> | ||
| 13 | #include <linux/module.h> | ||
| 14 | #include <asm/fcx.h> | ||
| 15 | #include <asm/itcw.h> | ||
| 16 | |||
| 17 | /** | ||
| 18 | * struct itcw - incremental tcw helper data type | ||
| 19 | * | ||
| 20 | * This structure serves as a handle for the incremental construction of a | ||
| 21 | * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate | ||
| 22 | * tcw and associated data. The data structures are contained inside a single | ||
| 23 | * contiguous buffer provided by the user. | ||
| 24 | * | ||
| 25 | * The itcw construction functions take care of overall data integrity: | ||
| 26 | * - reset unused fields to zero | ||
| 27 | * - fill in required pointers | ||
| 28 | * - ensure required alignment for data structures | ||
| 29 | * - prevent data structures to cross 4k-byte boundary where required | ||
| 30 | * - calculate tccb-related length fields | ||
| 31 | * - optionally provide ready-made interrogate tcw and associated structures | ||
| 32 | * | ||
| 33 | * Restrictions apply to the itcws created with these construction functions: | ||
| 34 | * - tida only supported for data address, not for tccb | ||
| 35 | * - only contiguous tidaw-lists (no ttic) | ||
| 36 | * - total number of bytes required per itcw may not exceed 4k bytes | ||
| 37 | * - either read or write operation (may not work with r=0 and w=0) | ||
| 38 | * | ||
| 39 | * Example: | ||
| 40 | * struct itcw *itcw; | ||
| 41 | * void *buffer; | ||
| 42 | * size_t size; | ||
| 43 | * | ||
| 44 | * size = itcw_calc_size(1, 2, 0); | ||
| 45 | * buffer = kmalloc(size, GFP_DMA); | ||
| 46 | * if (!buffer) | ||
| 47 | * return -ENOMEM; | ||
| 48 | * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0); | ||
| 49 | * if (IS_ERR(itcw)) | ||
| 50 | * return PTR_ER(itcw); | ||
| 51 | * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72); | ||
| 52 | * itcw_add_tidaw(itcw, 0, 0x30000, 20); | ||
| 53 | * itcw_add_tidaw(itcw, 0, 0x40000, 52); | ||
| 54 | * itcw_finalize(itcw); | ||
| 55 | * | ||
| 56 | */ | ||
| 57 | struct itcw { | ||
| 58 | struct tcw *tcw; | ||
| 59 | struct tcw *intrg_tcw; | ||
| 60 | int num_tidaws; | ||
| 61 | int max_tidaws; | ||
| 62 | int intrg_num_tidaws; | ||
| 63 | int intrg_max_tidaws; | ||
| 64 | }; | ||
| 65 | |||
| 66 | /** | ||
| 67 | * itcw_get_tcw - return pointer to tcw associated with the itcw | ||
| 68 | * @itcw: address of the itcw | ||
| 69 | * | ||
| 70 | * Return pointer to the tcw associated with the itcw. | ||
| 71 | */ | ||
| 72 | struct tcw *itcw_get_tcw(struct itcw *itcw) | ||
| 73 | { | ||
| 74 | return itcw->tcw; | ||
| 75 | } | ||
| 76 | EXPORT_SYMBOL(itcw_get_tcw); | ||
| 77 | |||
| 78 | /** | ||
| 79 | * itcw_calc_size - return the size of an itcw with the given parameters | ||
| 80 | * @intrg: if non-zero, add an interrogate tcw | ||
| 81 | * @max_tidaws: maximum number of tidaws to be used for data addressing or zero | ||
| 82 | * if no tida is to be used. | ||
| 83 | * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing | ||
| 84 | * by the interrogate tcw, if specified | ||
| 85 | * | ||
| 86 | * Calculate and return the number of bytes required to hold an itcw with the | ||
| 87 | * given parameters and assuming tccbs with maximum size. | ||
| 88 | * | ||
| 89 | * Note that the resulting size also contains bytes needed for alignment | ||
| 90 | * padding as well as padding to ensure that data structures don't cross a | ||
| 91 | * 4k-boundary where required. | ||
| 92 | */ | ||
| 93 | size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) | ||
| 94 | { | ||
| 95 | size_t len; | ||
| 96 | |||
| 97 | /* Main data. */ | ||
| 98 | len = sizeof(struct itcw); | ||
| 99 | len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + | ||
| 100 | /* TSB */ sizeof(struct tsb) + | ||
| 101 | /* TIDAL */ max_tidaws * sizeof(struct tidaw); | ||
| 102 | /* Interrogate data. */ | ||
| 103 | if (intrg) { | ||
| 104 | len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + | ||
| 105 | /* TSB */ sizeof(struct tsb) + | ||
| 106 | /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); | ||
| 107 | } | ||
| 108 | /* Maximum required alignment padding. */ | ||
| 109 | len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; | ||
| 110 | /* Maximum padding for structures that may not cross 4k boundary. */ | ||
| 111 | if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) | ||
| 112 | len += max(max_tidaws, intrg_max_tidaws) * | ||
| 113 | sizeof(struct tidaw) - 1; | ||
| 114 | return len; | ||
| 115 | } | ||
| 116 | EXPORT_SYMBOL(itcw_calc_size); | ||
| 117 | |||
| 118 | #define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095)) | ||
| 119 | |||
| 120 | static inline void *fit_chunk(addr_t *start, addr_t end, size_t len, | ||
| 121 | int align, int check_4k) | ||
| 122 | { | ||
| 123 | addr_t addr; | ||
| 124 | |||
| 125 | addr = ALIGN(*start, align); | ||
| 126 | if (check_4k && CROSS4K(addr, len)) { | ||
| 127 | addr = ALIGN(addr, 4096); | ||
| 128 | addr = ALIGN(addr, align); | ||
| 129 | } | ||
| 130 | if (addr + len > end) | ||
| 131 | return ERR_PTR(-ENOSPC); | ||
| 132 | *start = addr + len; | ||
| 133 | return (void *) addr; | ||
| 134 | } | ||
| 135 | |||
| 136 | /** | ||
| 137 | * itcw_init - initialize incremental tcw data structure | ||
| 138 | * @buffer: address of buffer to use for data structures | ||
| 139 | * @size: number of bytes in buffer | ||
| 140 | * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write | ||
| 141 | * operation tcw | ||
| 142 | * @intrg: if non-zero, add and initialize an interrogate tcw | ||
| 143 | * @max_tidaws: maximum number of tidaws to be used for data addressing or zero | ||
| 144 | * if no tida is to be used. | ||
| 145 | * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing | ||
| 146 | * by the interrogate tcw, if specified | ||
| 147 | * | ||
| 148 | * Prepare the specified buffer to be used as an incremental tcw, i.e. a | ||
| 149 | * helper data structure that can be used to construct a valid tcw by | ||
| 150 | * successive calls to other helper functions. Note: the buffer needs to be | ||
| 151 | * located below the 2G address limit. The resulting tcw has the following | ||
| 152 | * restrictions: | ||
| 153 | * - no tccb tidal | ||
| 154 | * - input/output tidal is contiguous (no ttic) | ||
| 155 | * - total data should not exceed 4k | ||
| 156 | * - tcw specifies either read or write operation | ||
| 157 | * | ||
| 158 | * On success, return pointer to the resulting incremental tcw data structure, | ||
| 159 | * ERR_PTR otherwise. | ||
| 160 | */ | ||
| 161 | struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, | ||
| 162 | int max_tidaws, int intrg_max_tidaws) | ||
| 163 | { | ||
| 164 | struct itcw *itcw; | ||
| 165 | void *chunk; | ||
| 166 | addr_t start; | ||
| 167 | addr_t end; | ||
| 168 | |||
| 169 | /* Check for 2G limit. */ | ||
| 170 | start = (addr_t) buffer; | ||
| 171 | end = start + size; | ||
| 172 | if (end > (1 << 31)) | ||
| 173 | return ERR_PTR(-EINVAL); | ||
| 174 | memset(buffer, 0, size); | ||
| 175 | /* ITCW. */ | ||
| 176 | chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); | ||
| 177 | if (IS_ERR(chunk)) | ||
| 178 | return chunk; | ||
| 179 | itcw = chunk; | ||
| 180 | itcw->max_tidaws = max_tidaws; | ||
| 181 | itcw->intrg_max_tidaws = intrg_max_tidaws; | ||
| 182 | /* Main TCW. */ | ||
| 183 | chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); | ||
| 184 | if (IS_ERR(chunk)) | ||
| 185 | return chunk; | ||
| 186 | itcw->tcw = chunk; | ||
| 187 | tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0, | ||
| 188 | (op == ITCW_OP_WRITE) ? 1 : 0); | ||
| 189 | /* Interrogate TCW. */ | ||
| 190 | if (intrg) { | ||
| 191 | chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); | ||
| 192 | if (IS_ERR(chunk)) | ||
| 193 | return chunk; | ||
| 194 | itcw->intrg_tcw = chunk; | ||
| 195 | tcw_init(itcw->intrg_tcw, 1, 0); | ||
| 196 | tcw_set_intrg(itcw->tcw, itcw->intrg_tcw); | ||
| 197 | } | ||
| 198 | /* Data TIDAL. */ | ||
| 199 | if (max_tidaws > 0) { | ||
| 200 | chunk = fit_chunk(&start, end, sizeof(struct tidaw) * | ||
| 201 | max_tidaws, 16, 1); | ||
| 202 | if (IS_ERR(chunk)) | ||
| 203 | return chunk; | ||
| 204 | tcw_set_data(itcw->tcw, chunk, 1); | ||
| 205 | } | ||
| 206 | /* Interrogate data TIDAL. */ | ||
| 207 | if (intrg && (intrg_max_tidaws > 0)) { | ||
| 208 | chunk = fit_chunk(&start, end, sizeof(struct tidaw) * | ||
| 209 | intrg_max_tidaws, 16, 1); | ||
| 210 | if (IS_ERR(chunk)) | ||
| 211 | return chunk; | ||
| 212 | tcw_set_data(itcw->intrg_tcw, chunk, 1); | ||
| 213 | } | ||
| 214 | /* TSB. */ | ||
| 215 | chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); | ||
| 216 | if (IS_ERR(chunk)) | ||
| 217 | return chunk; | ||
| 218 | tsb_init(chunk); | ||
| 219 | tcw_set_tsb(itcw->tcw, chunk); | ||
| 220 | /* Interrogate TSB. */ | ||
| 221 | if (intrg) { | ||
| 222 | chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); | ||
| 223 | if (IS_ERR(chunk)) | ||
| 224 | return chunk; | ||
| 225 | tsb_init(chunk); | ||
| 226 | tcw_set_tsb(itcw->intrg_tcw, chunk); | ||
| 227 | } | ||
| 228 | /* TCCB. */ | ||
| 229 | chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); | ||
| 230 | if (IS_ERR(chunk)) | ||
| 231 | return chunk; | ||
| 232 | tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT); | ||
| 233 | tcw_set_tccb(itcw->tcw, chunk); | ||
| 234 | /* Interrogate TCCB. */ | ||
| 235 | if (intrg) { | ||
| 236 | chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); | ||
| 237 | if (IS_ERR(chunk)) | ||
| 238 | return chunk; | ||
| 239 | tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG); | ||
| 240 | tcw_set_tccb(itcw->intrg_tcw, chunk); | ||
| 241 | tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL, | ||
| 242 | sizeof(struct dcw_intrg_data), 0); | ||
| 243 | tcw_finalize(itcw->intrg_tcw, 0); | ||
| 244 | } | ||
| 245 | return itcw; | ||
| 246 | } | ||
| 247 | EXPORT_SYMBOL(itcw_init); | ||
| 248 | |||
| 249 | /** | ||
| 250 | * itcw_add_dcw - add a dcw to the itcw | ||
| 251 | * @itcw: address of the itcw | ||
| 252 | * @cmd: the dcw command | ||
| 253 | * @flags: flags for the dcw | ||
| 254 | * @cd: address of control data for this dcw or NULL if none is required | ||
| 255 | * @cd_count: number of control data bytes for this dcw | ||
| 256 | * @count: number of data bytes for this dcw | ||
| 257 | * | ||
| 258 | * Add a new dcw to the specified itcw by writing the dcw information specified | ||
| 259 | * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return | ||
| 260 | * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw | ||
| 261 | * would exceed the available space. | ||
| 262 | * | ||
| 263 | * Note: the tcal field of the tccb header will be updated to reflect added | ||
| 264 | * content. | ||
| 265 | */ | ||
| 266 | struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, | ||
| 267 | u8 cd_count, u32 count) | ||
| 268 | { | ||
| 269 | return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd, | ||
| 270 | flags, cd, cd_count, count); | ||
| 271 | } | ||
| 272 | EXPORT_SYMBOL(itcw_add_dcw); | ||
| 273 | |||
| 274 | /** | ||
| 275 | * itcw_add_tidaw - add a tidaw to the itcw | ||
| 276 | * @itcw: address of the itcw | ||
| 277 | * @flags: flags for the new tidaw | ||
| 278 | * @addr: address value for the new tidaw | ||
| 279 | * @count: count value for the new tidaw | ||
| 280 | * | ||
| 281 | * Add a new tidaw to the input/output data tidaw-list of the specified itcw | ||
| 282 | * (depending on the value of the r-flag and w-flag). Return a pointer to | ||
| 283 | * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the | ||
| 284 | * available space. | ||
| 285 | * | ||
| 286 | * Note: the tidaw-list is assumed to be contiguous with no ttics. The | ||
| 287 | * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. | ||
| 288 | */ | ||
| 289 | struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) | ||
| 290 | { | ||
| 291 | if (itcw->num_tidaws >= itcw->max_tidaws) | ||
| 292 | return ERR_PTR(-ENOSPC); | ||
| 293 | return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); | ||
| 294 | } | ||
| 295 | EXPORT_SYMBOL(itcw_add_tidaw); | ||
| 296 | |||
| 297 | /** | ||
| 298 | * itcw_set_data - set data address and tida flag of the itcw | ||
| 299 | * @itcw: address of the itcw | ||
| 300 | * @addr: the data address | ||
| 301 | * @use_tidal: zero of the data address specifies a contiguous block of data, | ||
| 302 | * non-zero if it specifies a list if tidaws. | ||
| 303 | * | ||
| 304 | * Set the input/output data address of the itcw (depending on the value of the | ||
| 305 | * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag | ||
| 306 | * is set as well. | ||
| 307 | */ | ||
| 308 | void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal) | ||
| 309 | { | ||
| 310 | tcw_set_data(itcw->tcw, addr, use_tidal); | ||
| 311 | } | ||
| 312 | EXPORT_SYMBOL(itcw_set_data); | ||
| 313 | |||
| 314 | /** | ||
| 315 | * itcw_finalize - calculate length and count fields of the itcw | ||
| 316 | * @itcw: address of the itcw | ||
| 317 | * | ||
| 318 | * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb. | ||
| 319 | * In case input- or output-tida is used, the tidaw-list must be stored in | ||
| 320 | * continuous storage (no ttic). The tcal field in the tccb must be | ||
| 321 | * up-to-date. | ||
| 322 | */ | ||
| 323 | void itcw_finalize(struct itcw *itcw) | ||
| 324 | { | ||
| 325 | tcw_finalize(itcw->tcw, itcw->num_tidaws); | ||
| 326 | } | ||
| 327 | EXPORT_SYMBOL(itcw_finalize); | ||
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 445cf364e461..2bf36e14b102 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
| @@ -2082,7 +2082,6 @@ qdio_timeout_handler(struct ccw_device *cdev) | |||
| 2082 | default: | 2082 | default: |
| 2083 | BUG(); | 2083 | BUG(); |
| 2084 | } | 2084 | } |
| 2085 | ccw_device_set_timeout(cdev, 0); | ||
| 2086 | wake_up(&cdev->private->wait_q); | 2085 | wake_up(&cdev->private->wait_q); |
| 2087 | } | 2086 | } |
| 2088 | 2087 | ||
| @@ -2121,6 +2120,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 2121 | case -EIO: | 2120 | case -EIO: |
| 2122 | QDIO_PRINT_ERR("i/o error on device %s\n", | 2121 | QDIO_PRINT_ERR("i/o error on device %s\n", |
| 2123 | cdev->dev.bus_id); | 2122 | cdev->dev.bus_id); |
| 2123 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | ||
| 2124 | wake_up(&cdev->private->wait_q); | ||
| 2124 | return; | 2125 | return; |
| 2125 | case -ETIMEDOUT: | 2126 | case -ETIMEDOUT: |
| 2126 | qdio_timeout_handler(cdev); | 2127 | qdio_timeout_handler(cdev); |
| @@ -2139,8 +2140,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 2139 | QDIO_DBF_TEXT4(0, trace, dbf_text); | 2140 | QDIO_DBF_TEXT4(0, trace, dbf_text); |
| 2140 | #endif /* CONFIG_QDIO_DEBUG */ | 2141 | #endif /* CONFIG_QDIO_DEBUG */ |
| 2141 | 2142 | ||
| 2142 | cstat = irb->scsw.cstat; | 2143 | cstat = irb->scsw.cmd.cstat; |
| 2143 | dstat = irb->scsw.dstat; | 2144 | dstat = irb->scsw.cmd.dstat; |
| 2144 | 2145 | ||
| 2145 | switch (irq_ptr->state) { | 2146 | switch (irq_ptr->state) { |
| 2146 | case QDIO_IRQ_STATE_INACTIVE: | 2147 | case QDIO_IRQ_STATE_INACTIVE: |
| @@ -2353,9 +2354,6 @@ tiqdio_check_chsc_availability(void) | |||
| 2353 | { | 2354 | { |
| 2354 | char dbf_text[15]; | 2355 | char dbf_text[15]; |
| 2355 | 2356 | ||
| 2356 | if (!css_characteristics_avail) | ||
| 2357 | return -EIO; | ||
| 2358 | |||
| 2359 | /* Check for bit 41. */ | 2357 | /* Check for bit 41. */ |
| 2360 | if (!css_general_characteristics.aif) { | 2358 | if (!css_general_characteristics.aif) { |
| 2361 | QDIO_PRINT_WARN("Adapter interruption facility not " \ | 2359 | QDIO_PRINT_WARN("Adapter interruption facility not " \ |
| @@ -2667,12 +2665,12 @@ qdio_shutdown(struct ccw_device *cdev, int how) | |||
| 2667 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 2665 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); |
| 2668 | } else if (rc == 0) { | 2666 | } else if (rc == 0) { |
| 2669 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); | 2667 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); |
| 2670 | ccw_device_set_timeout(cdev, timeout); | ||
| 2671 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); | 2668 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); |
| 2672 | 2669 | ||
| 2673 | wait_event(cdev->private->wait_q, | 2670 | wait_event_interruptible_timeout(cdev->private->wait_q, |
| 2674 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || | 2671 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || |
| 2675 | irq_ptr->state == QDIO_IRQ_STATE_ERR); | 2672 | irq_ptr->state == QDIO_IRQ_STATE_ERR, |
| 2673 | timeout); | ||
| 2676 | } else { | 2674 | } else { |
| 2677 | QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " | 2675 | QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " |
| 2678 | "device %s\n", result, cdev->dev.bus_id); | 2676 | "device %s\n", result, cdev->dev.bus_id); |
| @@ -2692,7 +2690,6 @@ qdio_shutdown(struct ccw_device *cdev, int how) | |||
| 2692 | 2690 | ||
| 2693 | /* Ignore errors. */ | 2691 | /* Ignore errors. */ |
| 2694 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | 2692 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); |
| 2695 | ccw_device_set_timeout(cdev, 0); | ||
| 2696 | out: | 2693 | out: |
| 2697 | up(&irq_ptr->setting_up_sema); | 2694 | up(&irq_ptr->setting_up_sema); |
| 2698 | return result; | 2695 | return result; |
| @@ -2907,13 +2904,10 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) | |||
| 2907 | QDIO_DBF_TEXT0(0,setup,dbf_text); | 2904 | QDIO_DBF_TEXT0(0,setup,dbf_text); |
| 2908 | QDIO_DBF_TEXT0(0,trace,dbf_text); | 2905 | QDIO_DBF_TEXT0(0,trace,dbf_text); |
| 2909 | 2906 | ||
| 2910 | if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { | 2907 | if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) |
| 2911 | ccw_device_set_timeout(cdev, 0); | ||
| 2912 | return; | 2908 | return; |
| 2913 | } | ||
| 2914 | 2909 | ||
| 2915 | qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); | 2910 | qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); |
| 2916 | ccw_device_set_timeout(cdev, 0); | ||
| 2917 | } | 2911 | } |
| 2918 | 2912 | ||
| 2919 | int | 2913 | int |
| @@ -3196,8 +3190,6 @@ qdio_establish(struct qdio_initialize *init_data) | |||
| 3196 | irq_ptr->schid.ssid, irq_ptr->schid.sch_no, | 3190 | irq_ptr->schid.ssid, irq_ptr->schid.sch_no, |
| 3197 | result, result2); | 3191 | result, result2); |
| 3198 | result=result2; | 3192 | result=result2; |
| 3199 | if (result) | ||
| 3200 | ccw_device_set_timeout(cdev, 0); | ||
| 3201 | } | 3193 | } |
| 3202 | 3194 | ||
| 3203 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); | 3195 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); |
| @@ -3279,7 +3271,6 @@ qdio_activate(struct ccw_device *cdev, int flags) | |||
| 3279 | 3271 | ||
| 3280 | spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); | 3272 | spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); |
| 3281 | 3273 | ||
| 3282 | ccw_device_set_timeout(cdev, 0); | ||
| 3283 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); | 3274 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); |
| 3284 | result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, | 3275 | result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, |
| 3285 | 0, DOIO_DENY_PREFETCH); | 3276 | 0, DOIO_DENY_PREFETCH); |
| @@ -3722,7 +3713,8 @@ tiqdio_register_thinints(void) | |||
| 3722 | char dbf_text[20]; | 3713 | char dbf_text[20]; |
| 3723 | 3714 | ||
| 3724 | tiqdio_ind = | 3715 | tiqdio_ind = |
| 3725 | s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL); | 3716 | s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL, |
| 3717 | TIQDIO_THININT_ISC); | ||
| 3726 | if (IS_ERR(tiqdio_ind)) { | 3718 | if (IS_ERR(tiqdio_ind)) { |
| 3727 | sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); | 3719 | sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); |
| 3728 | QDIO_DBF_TEXT0(0,setup,dbf_text); | 3720 | QDIO_DBF_TEXT0(0,setup,dbf_text); |
| @@ -3738,7 +3730,8 @@ static void | |||
| 3738 | tiqdio_unregister_thinints(void) | 3730 | tiqdio_unregister_thinints(void) |
| 3739 | { | 3731 | { |
| 3740 | if (tiqdio_ind) | 3732 | if (tiqdio_ind) |
| 3741 | s390_unregister_adapter_interrupt(tiqdio_ind); | 3733 | s390_unregister_adapter_interrupt(tiqdio_ind, |
| 3734 | TIQDIO_THININT_ISC); | ||
| 3742 | } | 3735 | } |
| 3743 | 3736 | ||
| 3744 | static int | 3737 | static int |
| @@ -3899,6 +3892,7 @@ init_QDIO(void) | |||
| 3899 | qdio_mempool_alloc, | 3892 | qdio_mempool_alloc, |
| 3900 | qdio_mempool_free, NULL); | 3893 | qdio_mempool_free, NULL); |
| 3901 | 3894 | ||
| 3895 | isc_register(QDIO_AIRQ_ISC); | ||
| 3902 | if (tiqdio_check_chsc_availability()) | 3896 | if (tiqdio_check_chsc_availability()) |
| 3903 | QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); | 3897 | QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); |
| 3904 | 3898 | ||
| @@ -3911,6 +3905,7 @@ static void __exit | |||
| 3911 | cleanup_QDIO(void) | 3905 | cleanup_QDIO(void) |
| 3912 | { | 3906 | { |
| 3913 | tiqdio_unregister_thinints(); | 3907 | tiqdio_unregister_thinints(); |
| 3908 | isc_unregister(QDIO_AIRQ_ISC); | ||
| 3914 | qdio_remove_procfs_entry(); | 3909 | qdio_remove_procfs_entry(); |
| 3915 | qdio_release_qdio_memory(); | 3910 | qdio_release_qdio_memory(); |
| 3916 | qdio_unregister_dbf_views(); | 3911 | qdio_unregister_dbf_views(); |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index c3df6b2c38b7..7656081a24d2 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
| @@ -2,8 +2,8 @@ | |||
| 2 | #define _CIO_QDIO_H | 2 | #define _CIO_QDIO_H |
| 3 | 3 | ||
| 4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
| 5 | 5 | #include <asm/isc.h> | |
| 6 | #include "schid.h" | 6 | #include <asm/schid.h> |
| 7 | 7 | ||
| 8 | #ifdef CONFIG_QDIO_DEBUG | 8 | #ifdef CONFIG_QDIO_DEBUG |
| 9 | #define QDIO_VERBOSE_LEVEL 9 | 9 | #define QDIO_VERBOSE_LEVEL 9 |
| @@ -26,7 +26,7 @@ | |||
| 26 | */ | 26 | */ |
| 27 | #define IQDIO_FILL_LEVEL_TO_POLL 4 | 27 | #define IQDIO_FILL_LEVEL_TO_POLL 4 |
| 28 | 28 | ||
| 29 | #define TIQDIO_THININT_ISC 3 | 29 | #define TIQDIO_THININT_ISC QDIO_AIRQ_ISC |
| 30 | #define TIQDIO_DELAY_TARGET 0 | 30 | #define TIQDIO_DELAY_TARGET 0 |
| 31 | #define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ | 31 | #define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ |
| 32 | #define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ | 32 | #define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ |
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c new file mode 100644 index 000000000000..f8da25ab576d --- /dev/null +++ b/drivers/s390/cio/scsw.c | |||
| @@ -0,0 +1,843 @@ | |||
| 1 | /* | ||
| 2 | * Helper functions for scsw access. | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/types.h> | ||
| 9 | #include <linux/module.h> | ||
| 10 | #include <asm/cio.h> | ||
| 11 | #include "css.h" | ||
| 12 | #include "chsc.h" | ||
| 13 | |||
| 14 | /** | ||
| 15 | * scsw_is_tm - check for transport mode scsw | ||
| 16 | * @scsw: pointer to scsw | ||
| 17 | * | ||
| 18 | * Return non-zero if the specified scsw is a transport mode scsw, zero | ||
| 19 | * otherwise. | ||
| 20 | */ | ||
| 21 | int scsw_is_tm(union scsw *scsw) | ||
| 22 | { | ||
| 23 | return css_general_characteristics.fcx && (scsw->tm.x == 1); | ||
| 24 | } | ||
| 25 | EXPORT_SYMBOL(scsw_is_tm); | ||
| 26 | |||
| 27 | /** | ||
| 28 | * scsw_key - return scsw key field | ||
| 29 | * @scsw: pointer to scsw | ||
| 30 | * | ||
| 31 | * Return the value of the key field of the specified scsw, regardless of | ||
| 32 | * whether it is a transport mode or command mode scsw. | ||
| 33 | */ | ||
| 34 | u32 scsw_key(union scsw *scsw) | ||
| 35 | { | ||
| 36 | if (scsw_is_tm(scsw)) | ||
| 37 | return scsw->tm.key; | ||
| 38 | else | ||
| 39 | return scsw->cmd.key; | ||
| 40 | } | ||
| 41 | EXPORT_SYMBOL(scsw_key); | ||
| 42 | |||
| 43 | /** | ||
| 44 | * scsw_eswf - return scsw eswf field | ||
| 45 | * @scsw: pointer to scsw | ||
| 46 | * | ||
| 47 | * Return the value of the eswf field of the specified scsw, regardless of | ||
| 48 | * whether it is a transport mode or command mode scsw. | ||
| 49 | */ | ||
| 50 | u32 scsw_eswf(union scsw *scsw) | ||
| 51 | { | ||
| 52 | if (scsw_is_tm(scsw)) | ||
| 53 | return scsw->tm.eswf; | ||
| 54 | else | ||
| 55 | return scsw->cmd.eswf; | ||
| 56 | } | ||
| 57 | EXPORT_SYMBOL(scsw_eswf); | ||
| 58 | |||
| 59 | /** | ||
| 60 | * scsw_cc - return scsw cc field | ||
| 61 | * @scsw: pointer to scsw | ||
| 62 | * | ||
| 63 | * Return the value of the cc field of the specified scsw, regardless of | ||
| 64 | * whether it is a transport mode or command mode scsw. | ||
| 65 | */ | ||
| 66 | u32 scsw_cc(union scsw *scsw) | ||
| 67 | { | ||
| 68 | if (scsw_is_tm(scsw)) | ||
| 69 | return scsw->tm.cc; | ||
| 70 | else | ||
| 71 | return scsw->cmd.cc; | ||
| 72 | } | ||
| 73 | EXPORT_SYMBOL(scsw_cc); | ||
| 74 | |||
| 75 | /** | ||
| 76 | * scsw_ectl - return scsw ectl field | ||
| 77 | * @scsw: pointer to scsw | ||
| 78 | * | ||
| 79 | * Return the value of the ectl field of the specified scsw, regardless of | ||
| 80 | * whether it is a transport mode or command mode scsw. | ||
| 81 | */ | ||
| 82 | u32 scsw_ectl(union scsw *scsw) | ||
| 83 | { | ||
| 84 | if (scsw_is_tm(scsw)) | ||
| 85 | return scsw->tm.ectl; | ||
| 86 | else | ||
| 87 | return scsw->cmd.ectl; | ||
| 88 | } | ||
| 89 | EXPORT_SYMBOL(scsw_ectl); | ||
| 90 | |||
| 91 | /** | ||
| 92 | * scsw_pno - return scsw pno field | ||
| 93 | * @scsw: pointer to scsw | ||
| 94 | * | ||
| 95 | * Return the value of the pno field of the specified scsw, regardless of | ||
| 96 | * whether it is a transport mode or command mode scsw. | ||
| 97 | */ | ||
| 98 | u32 scsw_pno(union scsw *scsw) | ||
| 99 | { | ||
| 100 | if (scsw_is_tm(scsw)) | ||
| 101 | return scsw->tm.pno; | ||
| 102 | else | ||
| 103 | return scsw->cmd.pno; | ||
| 104 | } | ||
| 105 | EXPORT_SYMBOL(scsw_pno); | ||
| 106 | |||
| 107 | /** | ||
| 108 | * scsw_fctl - return scsw fctl field | ||
| 109 | * @scsw: pointer to scsw | ||
| 110 | * | ||
| 111 | * Return the value of the fctl field of the specified scsw, regardless of | ||
| 112 | * whether it is a transport mode or command mode scsw. | ||
| 113 | */ | ||
| 114 | u32 scsw_fctl(union scsw *scsw) | ||
| 115 | { | ||
| 116 | if (scsw_is_tm(scsw)) | ||
| 117 | return scsw->tm.fctl; | ||
| 118 | else | ||
| 119 | return scsw->cmd.fctl; | ||
| 120 | } | ||
| 121 | EXPORT_SYMBOL(scsw_fctl); | ||
| 122 | |||
| 123 | /** | ||
| 124 | * scsw_actl - return scsw actl field | ||
| 125 | * @scsw: pointer to scsw | ||
| 126 | * | ||
| 127 | * Return the value of the actl field of the specified scsw, regardless of | ||
| 128 | * whether it is a transport mode or command mode scsw. | ||
| 129 | */ | ||
| 130 | u32 scsw_actl(union scsw *scsw) | ||
| 131 | { | ||
| 132 | if (scsw_is_tm(scsw)) | ||
| 133 | return scsw->tm.actl; | ||
| 134 | else | ||
| 135 | return scsw->cmd.actl; | ||
| 136 | } | ||
| 137 | EXPORT_SYMBOL(scsw_actl); | ||
| 138 | |||
| 139 | /** | ||
| 140 | * scsw_stctl - return scsw stctl field | ||
| 141 | * @scsw: pointer to scsw | ||
| 142 | * | ||
| 143 | * Return the value of the stctl field of the specified scsw, regardless of | ||
| 144 | * whether it is a transport mode or command mode scsw. | ||
| 145 | */ | ||
| 146 | u32 scsw_stctl(union scsw *scsw) | ||
| 147 | { | ||
| 148 | if (scsw_is_tm(scsw)) | ||
| 149 | return scsw->tm.stctl; | ||
| 150 | else | ||
| 151 | return scsw->cmd.stctl; | ||
| 152 | } | ||
| 153 | EXPORT_SYMBOL(scsw_stctl); | ||
| 154 | |||
| 155 | /** | ||
| 156 | * scsw_dstat - return scsw dstat field | ||
| 157 | * @scsw: pointer to scsw | ||
| 158 | * | ||
| 159 | * Return the value of the dstat field of the specified scsw, regardless of | ||
| 160 | * whether it is a transport mode or command mode scsw. | ||
| 161 | */ | ||
| 162 | u32 scsw_dstat(union scsw *scsw) | ||
| 163 | { | ||
| 164 | if (scsw_is_tm(scsw)) | ||
| 165 | return scsw->tm.dstat; | ||
| 166 | else | ||
| 167 | return scsw->cmd.dstat; | ||
| 168 | } | ||
| 169 | EXPORT_SYMBOL(scsw_dstat); | ||
| 170 | |||
| 171 | /** | ||
| 172 | * scsw_cstat - return scsw cstat field | ||
| 173 | * @scsw: pointer to scsw | ||
| 174 | * | ||
| 175 | * Return the value of the cstat field of the specified scsw, regardless of | ||
| 176 | * whether it is a transport mode or command mode scsw. | ||
| 177 | */ | ||
| 178 | u32 scsw_cstat(union scsw *scsw) | ||
| 179 | { | ||
| 180 | if (scsw_is_tm(scsw)) | ||
| 181 | return scsw->tm.cstat; | ||
| 182 | else | ||
| 183 | return scsw->cmd.cstat; | ||
| 184 | } | ||
| 185 | EXPORT_SYMBOL(scsw_cstat); | ||
| 186 | |||
| 187 | /** | ||
| 188 | * scsw_cmd_is_valid_key - check key field validity | ||
| 189 | * @scsw: pointer to scsw | ||
| 190 | * | ||
| 191 | * Return non-zero if the key field of the specified command mode scsw is | ||
| 192 | * valid, zero otherwise. | ||
| 193 | */ | ||
| 194 | int scsw_cmd_is_valid_key(union scsw *scsw) | ||
| 195 | { | ||
| 196 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
| 197 | } | ||
| 198 | EXPORT_SYMBOL(scsw_cmd_is_valid_key); | ||
| 199 | |||
| 200 | /** | ||
| 201 | * scsw_cmd_is_valid_sctl - check fctl field validity | ||
| 202 | * @scsw: pointer to scsw | ||
| 203 | * | ||
| 204 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
| 205 | * valid, zero otherwise. | ||
| 206 | */ | ||
| 207 | int scsw_cmd_is_valid_sctl(union scsw *scsw) | ||
| 208 | { | ||
| 209 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
| 210 | } | ||
| 211 | EXPORT_SYMBOL(scsw_cmd_is_valid_sctl); | ||
| 212 | |||
| 213 | /** | ||
| 214 | * scsw_cmd_is_valid_eswf - check eswf field validity | ||
| 215 | * @scsw: pointer to scsw | ||
| 216 | * | ||
| 217 | * Return non-zero if the eswf field of the specified command mode scsw is | ||
| 218 | * valid, zero otherwise. | ||
| 219 | */ | ||
| 220 | int scsw_cmd_is_valid_eswf(union scsw *scsw) | ||
| 221 | { | ||
| 222 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
| 223 | } | ||
| 224 | EXPORT_SYMBOL(scsw_cmd_is_valid_eswf); | ||
| 225 | |||
| 226 | /** | ||
| 227 | * scsw_cmd_is_valid_cc - check cc field validity | ||
| 228 | * @scsw: pointer to scsw | ||
| 229 | * | ||
| 230 | * Return non-zero if the cc field of the specified command mode scsw is | ||
| 231 | * valid, zero otherwise. | ||
| 232 | */ | ||
| 233 | int scsw_cmd_is_valid_cc(union scsw *scsw) | ||
| 234 | { | ||
| 235 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
| 236 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); | ||
| 237 | } | ||
| 238 | EXPORT_SYMBOL(scsw_cmd_is_valid_cc); | ||
| 239 | |||
| 240 | /** | ||
| 241 | * scsw_cmd_is_valid_fmt - check fmt field validity | ||
| 242 | * @scsw: pointer to scsw | ||
| 243 | * | ||
| 244 | * Return non-zero if the fmt field of the specified command mode scsw is | ||
| 245 | * valid, zero otherwise. | ||
| 246 | */ | ||
| 247 | int scsw_cmd_is_valid_fmt(union scsw *scsw) | ||
| 248 | { | ||
| 249 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
| 250 | } | ||
| 251 | EXPORT_SYMBOL(scsw_cmd_is_valid_fmt); | ||
| 252 | |||
| 253 | /** | ||
| 254 | * scsw_cmd_is_valid_pfch - check pfch field validity | ||
| 255 | * @scsw: pointer to scsw | ||
| 256 | * | ||
| 257 | * Return non-zero if the pfch field of the specified command mode scsw is | ||
| 258 | * valid, zero otherwise. | ||
| 259 | */ | ||
| 260 | int scsw_cmd_is_valid_pfch(union scsw *scsw) | ||
| 261 | { | ||
| 262 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
| 263 | } | ||
| 264 | EXPORT_SYMBOL(scsw_cmd_is_valid_pfch); | ||
| 265 | |||
| 266 | /** | ||
| 267 | * scsw_cmd_is_valid_isic - check isic field validity | ||
| 268 | * @scsw: pointer to scsw | ||
| 269 | * | ||
| 270 | * Return non-zero if the isic field of the specified command mode scsw is | ||
| 271 | * valid, zero otherwise. | ||
| 272 | */ | ||
| 273 | int scsw_cmd_is_valid_isic(union scsw *scsw) | ||
| 274 | { | ||
| 275 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
| 276 | } | ||
| 277 | EXPORT_SYMBOL(scsw_cmd_is_valid_isic); | ||
| 278 | |||
| 279 | /** | ||
| 280 | * scsw_cmd_is_valid_alcc - check alcc field validity | ||
| 281 | * @scsw: pointer to scsw | ||
| 282 | * | ||
| 283 | * Return non-zero if the alcc field of the specified command mode scsw is | ||
| 284 | * valid, zero otherwise. | ||
| 285 | */ | ||
| 286 | int scsw_cmd_is_valid_alcc(union scsw *scsw) | ||
| 287 | { | ||
| 288 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
| 289 | } | ||
| 290 | EXPORT_SYMBOL(scsw_cmd_is_valid_alcc); | ||
| 291 | |||
| 292 | /** | ||
| 293 | * scsw_cmd_is_valid_ssi - check ssi field validity | ||
| 294 | * @scsw: pointer to scsw | ||
| 295 | * | ||
| 296 | * Return non-zero if the ssi field of the specified command mode scsw is | ||
| 297 | * valid, zero otherwise. | ||
| 298 | */ | ||
| 299 | int scsw_cmd_is_valid_ssi(union scsw *scsw) | ||
| 300 | { | ||
| 301 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); | ||
| 302 | } | ||
| 303 | EXPORT_SYMBOL(scsw_cmd_is_valid_ssi); | ||
| 304 | |||
| 305 | /** | ||
| 306 | * scsw_cmd_is_valid_zcc - check zcc field validity | ||
| 307 | * @scsw: pointer to scsw | ||
| 308 | * | ||
| 309 | * Return non-zero if the zcc field of the specified command mode scsw is | ||
| 310 | * valid, zero otherwise. | ||
| 311 | */ | ||
| 312 | int scsw_cmd_is_valid_zcc(union scsw *scsw) | ||
| 313 | { | ||
| 314 | return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && | ||
| 315 | (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); | ||
| 316 | } | ||
| 317 | EXPORT_SYMBOL(scsw_cmd_is_valid_zcc); | ||
| 318 | |||
| 319 | /** | ||
| 320 | * scsw_cmd_is_valid_ectl - check ectl field validity | ||
| 321 | * @scsw: pointer to scsw | ||
| 322 | * | ||
| 323 | * Return non-zero if the ectl field of the specified command mode scsw is | ||
| 324 | * valid, zero otherwise. | ||
| 325 | */ | ||
| 326 | int scsw_cmd_is_valid_ectl(union scsw *scsw) | ||
| 327 | { | ||
| 328 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
| 329 | !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
| 330 | (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); | ||
| 331 | } | ||
| 332 | EXPORT_SYMBOL(scsw_cmd_is_valid_ectl); | ||
| 333 | |||
| 334 | /** | ||
| 335 | * scsw_cmd_is_valid_pno - check pno field validity | ||
| 336 | * @scsw: pointer to scsw | ||
| 337 | * | ||
| 338 | * Return non-zero if the pno field of the specified command mode scsw is | ||
| 339 | * valid, zero otherwise. | ||
| 340 | */ | ||
| 341 | int scsw_cmd_is_valid_pno(union scsw *scsw) | ||
| 342 | { | ||
| 343 | return (scsw->cmd.fctl != 0) && | ||
| 344 | (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
| 345 | (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || | ||
| 346 | ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && | ||
| 347 | (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); | ||
| 348 | } | ||
| 349 | EXPORT_SYMBOL(scsw_cmd_is_valid_pno); | ||
| 350 | |||
| 351 | /** | ||
| 352 | * scsw_cmd_is_valid_fctl - check fctl field validity | ||
| 353 | * @scsw: pointer to scsw | ||
| 354 | * | ||
| 355 | * Return non-zero if the fctl field of the specified command mode scsw is | ||
| 356 | * valid, zero otherwise. | ||
| 357 | */ | ||
| 358 | int scsw_cmd_is_valid_fctl(union scsw *scsw) | ||
| 359 | { | ||
| 360 | /* Only valid if pmcw.dnv == 1*/ | ||
| 361 | return 1; | ||
| 362 | } | ||
| 363 | EXPORT_SYMBOL(scsw_cmd_is_valid_fctl); | ||
| 364 | |||
| 365 | /** | ||
| 366 | * scsw_cmd_is_valid_actl - check actl field validity | ||
| 367 | * @scsw: pointer to scsw | ||
| 368 | * | ||
| 369 | * Return non-zero if the actl field of the specified command mode scsw is | ||
| 370 | * valid, zero otherwise. | ||
| 371 | */ | ||
| 372 | int scsw_cmd_is_valid_actl(union scsw *scsw) | ||
| 373 | { | ||
| 374 | /* Only valid if pmcw.dnv == 1*/ | ||
| 375 | return 1; | ||
| 376 | } | ||
| 377 | EXPORT_SYMBOL(scsw_cmd_is_valid_actl); | ||
| 378 | |||
| 379 | /** | ||
| 380 | * scsw_cmd_is_valid_stctl - check stctl field validity | ||
| 381 | * @scsw: pointer to scsw | ||
| 382 | * | ||
| 383 | * Return non-zero if the stctl field of the specified command mode scsw is | ||
| 384 | * valid, zero otherwise. | ||
| 385 | */ | ||
| 386 | int scsw_cmd_is_valid_stctl(union scsw *scsw) | ||
| 387 | { | ||
| 388 | /* Only valid if pmcw.dnv == 1*/ | ||
| 389 | return 1; | ||
| 390 | } | ||
| 391 | EXPORT_SYMBOL(scsw_cmd_is_valid_stctl); | ||
| 392 | |||
| 393 | /** | ||
| 394 | * scsw_cmd_is_valid_dstat - check dstat field validity | ||
| 395 | * @scsw: pointer to scsw | ||
| 396 | * | ||
| 397 | * Return non-zero if the dstat field of the specified command mode scsw is | ||
| 398 | * valid, zero otherwise. | ||
| 399 | */ | ||
| 400 | int scsw_cmd_is_valid_dstat(union scsw *scsw) | ||
| 401 | { | ||
| 402 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
| 403 | (scsw->cmd.cc != 3); | ||
| 404 | } | ||
| 405 | EXPORT_SYMBOL(scsw_cmd_is_valid_dstat); | ||
| 406 | |||
| 407 | /** | ||
| 408 | * scsw_cmd_is_valid_cstat - check cstat field validity | ||
| 409 | * @scsw: pointer to scsw | ||
| 410 | * | ||
| 411 | * Return non-zero if the cstat field of the specified command mode scsw is | ||
| 412 | * valid, zero otherwise. | ||
| 413 | */ | ||
| 414 | int scsw_cmd_is_valid_cstat(union scsw *scsw) | ||
| 415 | { | ||
| 416 | return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && | ||
| 417 | (scsw->cmd.cc != 3); | ||
| 418 | } | ||
| 419 | EXPORT_SYMBOL(scsw_cmd_is_valid_cstat); | ||
| 420 | |||
| 421 | /** | ||
| 422 | * scsw_tm_is_valid_key - check key field validity | ||
| 423 | * @scsw: pointer to scsw | ||
| 424 | * | ||
| 425 | * Return non-zero if the key field of the specified transport mode scsw is | ||
| 426 | * valid, zero otherwise. | ||
| 427 | */ | ||
| 428 | int scsw_tm_is_valid_key(union scsw *scsw) | ||
| 429 | { | ||
| 430 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); | ||
| 431 | } | ||
| 432 | EXPORT_SYMBOL(scsw_tm_is_valid_key); | ||
| 433 | |||
| 434 | /** | ||
| 435 | * scsw_tm_is_valid_eswf - check eswf field validity | ||
| 436 | * @scsw: pointer to scsw | ||
| 437 | * | ||
| 438 | * Return non-zero if the eswf field of the specified transport mode scsw is | ||
| 439 | * valid, zero otherwise. | ||
| 440 | */ | ||
| 441 | int scsw_tm_is_valid_eswf(union scsw *scsw) | ||
| 442 | { | ||
| 443 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
| 444 | } | ||
| 445 | EXPORT_SYMBOL(scsw_tm_is_valid_eswf); | ||
| 446 | |||
| 447 | /** | ||
| 448 | * scsw_tm_is_valid_cc - check cc field validity | ||
| 449 | * @scsw: pointer to scsw | ||
| 450 | * | ||
| 451 | * Return non-zero if the cc field of the specified transport mode scsw is | ||
| 452 | * valid, zero otherwise. | ||
| 453 | */ | ||
| 454 | int scsw_tm_is_valid_cc(union scsw *scsw) | ||
| 455 | { | ||
| 456 | return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && | ||
| 457 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); | ||
| 458 | } | ||
| 459 | EXPORT_SYMBOL(scsw_tm_is_valid_cc); | ||
| 460 | |||
| 461 | /** | ||
| 462 | * scsw_tm_is_valid_fmt - check fmt field validity | ||
| 463 | * @scsw: pointer to scsw | ||
| 464 | * | ||
| 465 | * Return non-zero if the fmt field of the specified transport mode scsw is | ||
| 466 | * valid, zero otherwise. | ||
| 467 | */ | ||
| 468 | int scsw_tm_is_valid_fmt(union scsw *scsw) | ||
| 469 | { | ||
| 470 | return 1; | ||
| 471 | } | ||
| 472 | EXPORT_SYMBOL(scsw_tm_is_valid_fmt); | ||
| 473 | |||
| 474 | /** | ||
| 475 | * scsw_tm_is_valid_x - check x field validity | ||
| 476 | * @scsw: pointer to scsw | ||
| 477 | * | ||
| 478 | * Return non-zero if the x field of the specified transport mode scsw is | ||
| 479 | * valid, zero otherwise. | ||
| 480 | */ | ||
| 481 | int scsw_tm_is_valid_x(union scsw *scsw) | ||
| 482 | { | ||
| 483 | return 1; | ||
| 484 | } | ||
| 485 | EXPORT_SYMBOL(scsw_tm_is_valid_x); | ||
| 486 | |||
| 487 | /** | ||
| 488 | * scsw_tm_is_valid_q - check q field validity | ||
| 489 | * @scsw: pointer to scsw | ||
| 490 | * | ||
| 491 | * Return non-zero if the q field of the specified transport mode scsw is | ||
| 492 | * valid, zero otherwise. | ||
| 493 | */ | ||
| 494 | int scsw_tm_is_valid_q(union scsw *scsw) | ||
| 495 | { | ||
| 496 | return 1; | ||
| 497 | } | ||
| 498 | EXPORT_SYMBOL(scsw_tm_is_valid_q); | ||
| 499 | |||
| 500 | /** | ||
| 501 | * scsw_tm_is_valid_ectl - check ectl field validity | ||
| 502 | * @scsw: pointer to scsw | ||
| 503 | * | ||
| 504 | * Return non-zero if the ectl field of the specified transport mode scsw is | ||
| 505 | * valid, zero otherwise. | ||
| 506 | */ | ||
| 507 | int scsw_tm_is_valid_ectl(union scsw *scsw) | ||
| 508 | { | ||
| 509 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
| 510 | !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
| 511 | (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); | ||
| 512 | } | ||
| 513 | EXPORT_SYMBOL(scsw_tm_is_valid_ectl); | ||
| 514 | |||
| 515 | /** | ||
| 516 | * scsw_tm_is_valid_pno - check pno field validity | ||
| 517 | * @scsw: pointer to scsw | ||
| 518 | * | ||
| 519 | * Return non-zero if the pno field of the specified transport mode scsw is | ||
| 520 | * valid, zero otherwise. | ||
| 521 | */ | ||
| 522 | int scsw_tm_is_valid_pno(union scsw *scsw) | ||
| 523 | { | ||
| 524 | return (scsw->tm.fctl != 0) && | ||
| 525 | (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
| 526 | (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || | ||
| 527 | ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && | ||
| 528 | (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); | ||
| 529 | } | ||
| 530 | EXPORT_SYMBOL(scsw_tm_is_valid_pno); | ||
| 531 | |||
| 532 | /** | ||
| 533 | * scsw_tm_is_valid_fctl - check fctl field validity | ||
| 534 | * @scsw: pointer to scsw | ||
| 535 | * | ||
| 536 | * Return non-zero if the fctl field of the specified transport mode scsw is | ||
| 537 | * valid, zero otherwise. | ||
| 538 | */ | ||
| 539 | int scsw_tm_is_valid_fctl(union scsw *scsw) | ||
| 540 | { | ||
| 541 | /* Only valid if pmcw.dnv == 1*/ | ||
| 542 | return 1; | ||
| 543 | } | ||
| 544 | EXPORT_SYMBOL(scsw_tm_is_valid_fctl); | ||
| 545 | |||
| 546 | /** | ||
| 547 | * scsw_tm_is_valid_actl - check actl field validity | ||
| 548 | * @scsw: pointer to scsw | ||
| 549 | * | ||
| 550 | * Return non-zero if the actl field of the specified transport mode scsw is | ||
| 551 | * valid, zero otherwise. | ||
| 552 | */ | ||
| 553 | int scsw_tm_is_valid_actl(union scsw *scsw) | ||
| 554 | { | ||
| 555 | /* Only valid if pmcw.dnv == 1*/ | ||
| 556 | return 1; | ||
| 557 | } | ||
| 558 | EXPORT_SYMBOL(scsw_tm_is_valid_actl); | ||
| 559 | |||
| 560 | /** | ||
| 561 | * scsw_tm_is_valid_stctl - check stctl field validity | ||
| 562 | * @scsw: pointer to scsw | ||
| 563 | * | ||
| 564 | * Return non-zero if the stctl field of the specified transport mode scsw is | ||
| 565 | * valid, zero otherwise. | ||
| 566 | */ | ||
| 567 | int scsw_tm_is_valid_stctl(union scsw *scsw) | ||
| 568 | { | ||
| 569 | /* Only valid if pmcw.dnv == 1*/ | ||
| 570 | return 1; | ||
| 571 | } | ||
| 572 | EXPORT_SYMBOL(scsw_tm_is_valid_stctl); | ||
| 573 | |||
| 574 | /** | ||
| 575 | * scsw_tm_is_valid_dstat - check dstat field validity | ||
| 576 | * @scsw: pointer to scsw | ||
| 577 | * | ||
| 578 | * Return non-zero if the dstat field of the specified transport mode scsw is | ||
| 579 | * valid, zero otherwise. | ||
| 580 | */ | ||
| 581 | int scsw_tm_is_valid_dstat(union scsw *scsw) | ||
| 582 | { | ||
| 583 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
| 584 | (scsw->tm.cc != 3); | ||
| 585 | } | ||
| 586 | EXPORT_SYMBOL(scsw_tm_is_valid_dstat); | ||
| 587 | |||
| 588 | /** | ||
| 589 | * scsw_tm_is_valid_cstat - check cstat field validity | ||
| 590 | * @scsw: pointer to scsw | ||
| 591 | * | ||
| 592 | * Return non-zero if the cstat field of the specified transport mode scsw is | ||
| 593 | * valid, zero otherwise. | ||
| 594 | */ | ||
| 595 | int scsw_tm_is_valid_cstat(union scsw *scsw) | ||
| 596 | { | ||
| 597 | return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && | ||
| 598 | (scsw->tm.cc != 3); | ||
| 599 | } | ||
| 600 | EXPORT_SYMBOL(scsw_tm_is_valid_cstat); | ||
| 601 | |||
| 602 | /** | ||
| 603 | * scsw_tm_is_valid_fcxs - check fcxs field validity | ||
| 604 | * @scsw: pointer to scsw | ||
| 605 | * | ||
| 606 | * Return non-zero if the fcxs field of the specified transport mode scsw is | ||
| 607 | * valid, zero otherwise. | ||
| 608 | */ | ||
| 609 | int scsw_tm_is_valid_fcxs(union scsw *scsw) | ||
| 610 | { | ||
| 611 | return 1; | ||
| 612 | } | ||
| 613 | EXPORT_SYMBOL(scsw_tm_is_valid_fcxs); | ||
| 614 | |||
| 615 | /** | ||
| 616 | * scsw_tm_is_valid_schxs - check schxs field validity | ||
| 617 | * @scsw: pointer to scsw | ||
| 618 | * | ||
| 619 | * Return non-zero if the schxs field of the specified transport mode scsw is | ||
| 620 | * valid, zero otherwise. | ||
| 621 | */ | ||
| 622 | int scsw_tm_is_valid_schxs(union scsw *scsw) | ||
| 623 | { | ||
| 624 | return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | | ||
| 625 | SCHN_STAT_INTF_CTRL_CHK | | ||
| 626 | SCHN_STAT_PROT_CHECK | | ||
| 627 | SCHN_STAT_CHN_DATA_CHK)); | ||
| 628 | } | ||
| 629 | EXPORT_SYMBOL(scsw_tm_is_valid_schxs); | ||
| 630 | |||
| 631 | /** | ||
| 632 | * scsw_is_valid_actl - check actl field validity | ||
| 633 | * @scsw: pointer to scsw | ||
| 634 | * | ||
| 635 | * Return non-zero if the actl field of the specified scsw is valid, | ||
| 636 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 637 | * Return zero if the field does not contain a valid value. | ||
| 638 | */ | ||
| 639 | int scsw_is_valid_actl(union scsw *scsw) | ||
| 640 | { | ||
| 641 | if (scsw_is_tm(scsw)) | ||
| 642 | return scsw_tm_is_valid_actl(scsw); | ||
| 643 | else | ||
| 644 | return scsw_cmd_is_valid_actl(scsw); | ||
| 645 | } | ||
| 646 | EXPORT_SYMBOL(scsw_is_valid_actl); | ||
| 647 | |||
| 648 | /** | ||
| 649 | * scsw_is_valid_cc - check cc field validity | ||
| 650 | * @scsw: pointer to scsw | ||
| 651 | * | ||
| 652 | * Return non-zero if the cc field of the specified scsw is valid, | ||
| 653 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 654 | * Return zero if the field does not contain a valid value. | ||
| 655 | */ | ||
| 656 | int scsw_is_valid_cc(union scsw *scsw) | ||
| 657 | { | ||
| 658 | if (scsw_is_tm(scsw)) | ||
| 659 | return scsw_tm_is_valid_cc(scsw); | ||
| 660 | else | ||
| 661 | return scsw_cmd_is_valid_cc(scsw); | ||
| 662 | } | ||
| 663 | EXPORT_SYMBOL(scsw_is_valid_cc); | ||
| 664 | |||
| 665 | /** | ||
| 666 | * scsw_is_valid_cstat - check cstat field validity | ||
| 667 | * @scsw: pointer to scsw | ||
| 668 | * | ||
| 669 | * Return non-zero if the cstat field of the specified scsw is valid, | ||
| 670 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 671 | * Return zero if the field does not contain a valid value. | ||
| 672 | */ | ||
| 673 | int scsw_is_valid_cstat(union scsw *scsw) | ||
| 674 | { | ||
| 675 | if (scsw_is_tm(scsw)) | ||
| 676 | return scsw_tm_is_valid_cstat(scsw); | ||
| 677 | else | ||
| 678 | return scsw_cmd_is_valid_cstat(scsw); | ||
| 679 | } | ||
| 680 | EXPORT_SYMBOL(scsw_is_valid_cstat); | ||
| 681 | |||
| 682 | /** | ||
| 683 | * scsw_is_valid_dstat - check dstat field validity | ||
| 684 | * @scsw: pointer to scsw | ||
| 685 | * | ||
| 686 | * Return non-zero if the dstat field of the specified scsw is valid, | ||
| 687 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 688 | * Return zero if the field does not contain a valid value. | ||
| 689 | */ | ||
| 690 | int scsw_is_valid_dstat(union scsw *scsw) | ||
| 691 | { | ||
| 692 | if (scsw_is_tm(scsw)) | ||
| 693 | return scsw_tm_is_valid_dstat(scsw); | ||
| 694 | else | ||
| 695 | return scsw_cmd_is_valid_dstat(scsw); | ||
| 696 | } | ||
| 697 | EXPORT_SYMBOL(scsw_is_valid_dstat); | ||
| 698 | |||
| 699 | /** | ||
| 700 | * scsw_is_valid_ectl - check ectl field validity | ||
| 701 | * @scsw: pointer to scsw | ||
| 702 | * | ||
| 703 | * Return non-zero if the ectl field of the specified scsw is valid, | ||
| 704 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 705 | * Return zero if the field does not contain a valid value. | ||
| 706 | */ | ||
| 707 | int scsw_is_valid_ectl(union scsw *scsw) | ||
| 708 | { | ||
| 709 | if (scsw_is_tm(scsw)) | ||
| 710 | return scsw_tm_is_valid_ectl(scsw); | ||
| 711 | else | ||
| 712 | return scsw_cmd_is_valid_ectl(scsw); | ||
| 713 | } | ||
| 714 | EXPORT_SYMBOL(scsw_is_valid_ectl); | ||
| 715 | |||
| 716 | /** | ||
| 717 | * scsw_is_valid_eswf - check eswf field validity | ||
| 718 | * @scsw: pointer to scsw | ||
| 719 | * | ||
| 720 | * Return non-zero if the eswf field of the specified scsw is valid, | ||
| 721 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 722 | * Return zero if the field does not contain a valid value. | ||
| 723 | */ | ||
| 724 | int scsw_is_valid_eswf(union scsw *scsw) | ||
| 725 | { | ||
| 726 | if (scsw_is_tm(scsw)) | ||
| 727 | return scsw_tm_is_valid_eswf(scsw); | ||
| 728 | else | ||
| 729 | return scsw_cmd_is_valid_eswf(scsw); | ||
| 730 | } | ||
| 731 | EXPORT_SYMBOL(scsw_is_valid_eswf); | ||
| 732 | |||
| 733 | /** | ||
| 734 | * scsw_is_valid_fctl - check fctl field validity | ||
| 735 | * @scsw: pointer to scsw | ||
| 736 | * | ||
| 737 | * Return non-zero if the fctl field of the specified scsw is valid, | ||
| 738 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 739 | * Return zero if the field does not contain a valid value. | ||
| 740 | */ | ||
| 741 | int scsw_is_valid_fctl(union scsw *scsw) | ||
| 742 | { | ||
| 743 | if (scsw_is_tm(scsw)) | ||
| 744 | return scsw_tm_is_valid_fctl(scsw); | ||
| 745 | else | ||
| 746 | return scsw_cmd_is_valid_fctl(scsw); | ||
| 747 | } | ||
| 748 | EXPORT_SYMBOL(scsw_is_valid_fctl); | ||
| 749 | |||
| 750 | /** | ||
| 751 | * scsw_is_valid_key - check key field validity | ||
| 752 | * @scsw: pointer to scsw | ||
| 753 | * | ||
| 754 | * Return non-zero if the key field of the specified scsw is valid, | ||
| 755 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 756 | * Return zero if the field does not contain a valid value. | ||
| 757 | */ | ||
| 758 | int scsw_is_valid_key(union scsw *scsw) | ||
| 759 | { | ||
| 760 | if (scsw_is_tm(scsw)) | ||
| 761 | return scsw_tm_is_valid_key(scsw); | ||
| 762 | else | ||
| 763 | return scsw_cmd_is_valid_key(scsw); | ||
| 764 | } | ||
| 765 | EXPORT_SYMBOL(scsw_is_valid_key); | ||
| 766 | |||
| 767 | /** | ||
| 768 | * scsw_is_valid_pno - check pno field validity | ||
| 769 | * @scsw: pointer to scsw | ||
| 770 | * | ||
| 771 | * Return non-zero if the pno field of the specified scsw is valid, | ||
| 772 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 773 | * Return zero if the field does not contain a valid value. | ||
| 774 | */ | ||
| 775 | int scsw_is_valid_pno(union scsw *scsw) | ||
| 776 | { | ||
| 777 | if (scsw_is_tm(scsw)) | ||
| 778 | return scsw_tm_is_valid_pno(scsw); | ||
| 779 | else | ||
| 780 | return scsw_cmd_is_valid_pno(scsw); | ||
| 781 | } | ||
| 782 | EXPORT_SYMBOL(scsw_is_valid_pno); | ||
| 783 | |||
| 784 | /** | ||
| 785 | * scsw_is_valid_stctl - check stctl field validity | ||
| 786 | * @scsw: pointer to scsw | ||
| 787 | * | ||
| 788 | * Return non-zero if the stctl field of the specified scsw is valid, | ||
| 789 | * regardless of whether it is a transport mode or command mode scsw. | ||
| 790 | * Return zero if the field does not contain a valid value. | ||
| 791 | */ | ||
| 792 | int scsw_is_valid_stctl(union scsw *scsw) | ||
| 793 | { | ||
| 794 | if (scsw_is_tm(scsw)) | ||
| 795 | return scsw_tm_is_valid_stctl(scsw); | ||
| 796 | else | ||
| 797 | return scsw_cmd_is_valid_stctl(scsw); | ||
| 798 | } | ||
| 799 | EXPORT_SYMBOL(scsw_is_valid_stctl); | ||
| 800 | |||
| 801 | /** | ||
| 802 | * scsw_cmd_is_solicited - check for solicited scsw | ||
| 803 | * @scsw: pointer to scsw | ||
| 804 | * | ||
| 805 | * Return non-zero if the command mode scsw indicates that the associated | ||
| 806 | * status condition is solicited, zero if it is unsolicited. | ||
| 807 | */ | ||
| 808 | int scsw_cmd_is_solicited(union scsw *scsw) | ||
| 809 | { | ||
| 810 | return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != | ||
| 811 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
| 812 | } | ||
| 813 | EXPORT_SYMBOL(scsw_cmd_is_solicited); | ||
| 814 | |||
| 815 | /** | ||
| 816 | * scsw_tm_is_solicited - check for solicited scsw | ||
| 817 | * @scsw: pointer to scsw | ||
| 818 | * | ||
| 819 | * Return non-zero if the transport mode scsw indicates that the associated | ||
| 820 | * status condition is solicited, zero if it is unsolicited. | ||
| 821 | */ | ||
| 822 | int scsw_tm_is_solicited(union scsw *scsw) | ||
| 823 | { | ||
| 824 | return (scsw->tm.cc != 0) || (scsw->tm.stctl != | ||
| 825 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); | ||
| 826 | } | ||
| 827 | EXPORT_SYMBOL(scsw_tm_is_solicited); | ||
| 828 | |||
| 829 | /** | ||
| 830 | * scsw_is_solicited - check for solicited scsw | ||
| 831 | * @scsw: pointer to scsw | ||
| 832 | * | ||
| 833 | * Return non-zero if the transport or command mode scsw indicates that the | ||
| 834 | * associated status condition is solicited, zero if it is unsolicited. | ||
| 835 | */ | ||
| 836 | int scsw_is_solicited(union scsw *scsw) | ||
| 837 | { | ||
| 838 | if (scsw_is_tm(scsw)) | ||
| 839 | return scsw_tm_is_solicited(scsw); | ||
| 840 | else | ||
| 841 | return scsw_cmd_is_solicited(scsw); | ||
| 842 | } | ||
| 843 | EXPORT_SYMBOL(scsw_is_solicited); | ||
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a1ab3e3efd11..62b6b55230d0 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
| @@ -34,13 +34,15 @@ | |||
| 34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
| 35 | #include <asm/s390_rdev.h> | 35 | #include <asm/s390_rdev.h> |
| 36 | #include <asm/reset.h> | 36 | #include <asm/reset.h> |
| 37 | #include <linux/hrtimer.h> | ||
| 38 | #include <linux/ktime.h> | ||
| 37 | 39 | ||
| 38 | #include "ap_bus.h" | 40 | #include "ap_bus.h" |
| 39 | 41 | ||
| 40 | /* Some prototypes. */ | 42 | /* Some prototypes. */ |
| 41 | static void ap_scan_bus(struct work_struct *); | 43 | static void ap_scan_bus(struct work_struct *); |
| 42 | static void ap_poll_all(unsigned long); | 44 | static void ap_poll_all(unsigned long); |
| 43 | static void ap_poll_timeout(unsigned long); | 45 | static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); |
| 44 | static int ap_poll_thread_start(void); | 46 | static int ap_poll_thread_start(void); |
| 45 | static void ap_poll_thread_stop(void); | 47 | static void ap_poll_thread_stop(void); |
| 46 | static void ap_request_timeout(unsigned long); | 48 | static void ap_request_timeout(unsigned long); |
| @@ -80,12 +82,15 @@ static DECLARE_WORK(ap_config_work, ap_scan_bus); | |||
| 80 | /* | 82 | /* |
| 81 | * Tasklet & timer for AP request polling. | 83 | * Tasklet & timer for AP request polling. |
| 82 | */ | 84 | */ |
| 83 | static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0); | ||
| 84 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); | 85 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); |
| 85 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); | 86 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); |
| 86 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); | 87 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); |
| 87 | static struct task_struct *ap_poll_kthread = NULL; | 88 | static struct task_struct *ap_poll_kthread = NULL; |
| 88 | static DEFINE_MUTEX(ap_poll_thread_mutex); | 89 | static DEFINE_MUTEX(ap_poll_thread_mutex); |
| 90 | static struct hrtimer ap_poll_timer; | ||
| 91 | /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. | ||
| 92 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ | ||
| 93 | static unsigned long long poll_timeout = 250000; | ||
| 89 | 94 | ||
| 90 | /** | 95 | /** |
| 91 | * ap_intructions_available() - Test if AP instructions are available. | 96 | * ap_intructions_available() - Test if AP instructions are available. |
| @@ -636,11 +641,39 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus, | |||
| 636 | 641 | ||
| 637 | static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); | 642 | static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); |
| 638 | 643 | ||
| 644 | static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) | ||
| 645 | { | ||
| 646 | return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); | ||
| 647 | } | ||
| 648 | |||
| 649 | static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, | ||
| 650 | size_t count) | ||
| 651 | { | ||
| 652 | unsigned long long time; | ||
| 653 | ktime_t hr_time; | ||
| 654 | |||
| 655 | /* 120 seconds = maximum poll interval */ | ||
| 656 | if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000) | ||
| 657 | return -EINVAL; | ||
| 658 | poll_timeout = time; | ||
| 659 | hr_time = ktime_set(0, poll_timeout); | ||
| 660 | |||
| 661 | if (!hrtimer_is_queued(&ap_poll_timer) || | ||
| 662 | !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) { | ||
| 663 | ap_poll_timer.expires = hr_time; | ||
| 664 | hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS); | ||
| 665 | } | ||
| 666 | return count; | ||
| 667 | } | ||
| 668 | |||
| 669 | static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); | ||
| 670 | |||
| 639 | static struct bus_attribute *const ap_bus_attrs[] = { | 671 | static struct bus_attribute *const ap_bus_attrs[] = { |
| 640 | &bus_attr_ap_domain, | 672 | &bus_attr_ap_domain, |
| 641 | &bus_attr_config_time, | 673 | &bus_attr_config_time, |
| 642 | &bus_attr_poll_thread, | 674 | &bus_attr_poll_thread, |
| 643 | NULL | 675 | &bus_attr_poll_timeout, |
| 676 | NULL, | ||
| 644 | }; | 677 | }; |
| 645 | 678 | ||
| 646 | /** | 679 | /** |
| @@ -895,9 +928,10 @@ ap_config_timeout(unsigned long ptr) | |||
| 895 | */ | 928 | */ |
| 896 | static inline void ap_schedule_poll_timer(void) | 929 | static inline void ap_schedule_poll_timer(void) |
| 897 | { | 930 | { |
| 898 | if (timer_pending(&ap_poll_timer)) | 931 | if (hrtimer_is_queued(&ap_poll_timer)) |
| 899 | return; | 932 | return; |
| 900 | mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME); | 933 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), |
| 934 | HRTIMER_MODE_ABS); | ||
| 901 | } | 935 | } |
| 902 | 936 | ||
| 903 | /** | 937 | /** |
| @@ -1115,13 +1149,14 @@ EXPORT_SYMBOL(ap_cancel_message); | |||
| 1115 | 1149 | ||
| 1116 | /** | 1150 | /** |
| 1117 | * ap_poll_timeout(): AP receive polling for finished AP requests. | 1151 | * ap_poll_timeout(): AP receive polling for finished AP requests. |
| 1118 | * @unused: Unused variable. | 1152 | * @unused: Unused pointer. |
| 1119 | * | 1153 | * |
| 1120 | * Schedules the AP tasklet. | 1154 | * Schedules the AP tasklet using a high resolution timer. |
| 1121 | */ | 1155 | */ |
| 1122 | static void ap_poll_timeout(unsigned long unused) | 1156 | static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) |
| 1123 | { | 1157 | { |
| 1124 | tasklet_schedule(&ap_tasklet); | 1158 | tasklet_schedule(&ap_tasklet); |
| 1159 | return HRTIMER_NORESTART; | ||
| 1125 | } | 1160 | } |
| 1126 | 1161 | ||
| 1127 | /** | 1162 | /** |
| @@ -1344,6 +1379,14 @@ int __init ap_module_init(void) | |||
| 1344 | ap_config_timer.expires = jiffies + ap_config_time * HZ; | 1379 | ap_config_timer.expires = jiffies + ap_config_time * HZ; |
| 1345 | add_timer(&ap_config_timer); | 1380 | add_timer(&ap_config_timer); |
| 1346 | 1381 | ||
| 1382 | /* Setup the high resultion poll timer. | ||
| 1383 | * If we are running under z/VM adjust polling to z/VM polling rate. | ||
| 1384 | */ | ||
| 1385 | if (MACHINE_IS_VM) | ||
| 1386 | poll_timeout = 1500000; | ||
| 1387 | hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
| 1388 | ap_poll_timer.function = ap_poll_timeout; | ||
| 1389 | |||
| 1347 | /* Start the low priority AP bus poll thread. */ | 1390 | /* Start the low priority AP bus poll thread. */ |
| 1348 | if (ap_thread_flag) { | 1391 | if (ap_thread_flag) { |
| 1349 | rc = ap_poll_thread_start(); | 1392 | rc = ap_poll_thread_start(); |
| @@ -1355,7 +1398,7 @@ int __init ap_module_init(void) | |||
| 1355 | 1398 | ||
| 1356 | out_work: | 1399 | out_work: |
| 1357 | del_timer_sync(&ap_config_timer); | 1400 | del_timer_sync(&ap_config_timer); |
| 1358 | del_timer_sync(&ap_poll_timer); | 1401 | hrtimer_cancel(&ap_poll_timer); |
| 1359 | destroy_workqueue(ap_work_queue); | 1402 | destroy_workqueue(ap_work_queue); |
| 1360 | out_root: | 1403 | out_root: |
| 1361 | s390_root_dev_unregister(ap_root_device); | 1404 | s390_root_dev_unregister(ap_root_device); |
| @@ -1386,7 +1429,7 @@ void ap_module_exit(void) | |||
| 1386 | ap_reset_domain(); | 1429 | ap_reset_domain(); |
| 1387 | ap_poll_thread_stop(); | 1430 | ap_poll_thread_stop(); |
| 1388 | del_timer_sync(&ap_config_timer); | 1431 | del_timer_sync(&ap_config_timer); |
| 1389 | del_timer_sync(&ap_poll_timer); | 1432 | hrtimer_cancel(&ap_poll_timer); |
| 1390 | destroy_workqueue(ap_work_queue); | 1433 | destroy_workqueue(ap_work_queue); |
| 1391 | tasklet_kill(&ap_tasklet); | 1434 | tasklet_kill(&ap_tasklet); |
| 1392 | s390_root_dev_unregister(ap_root_device); | 1435 | s390_root_dev_unregister(ap_root_device); |
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index c1e1200c43fc..446378b308fc 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h | |||
| @@ -92,6 +92,8 @@ struct ap_queue_status { | |||
| 92 | #define AP_DEVICE_TYPE_PCIXCC 5 | 92 | #define AP_DEVICE_TYPE_PCIXCC 5 |
| 93 | #define AP_DEVICE_TYPE_CEX2A 6 | 93 | #define AP_DEVICE_TYPE_CEX2A 6 |
| 94 | #define AP_DEVICE_TYPE_CEX2C 7 | 94 | #define AP_DEVICE_TYPE_CEX2C 7 |
| 95 | #define AP_DEVICE_TYPE_CEX2A2 8 | ||
| 96 | #define AP_DEVICE_TYPE_CEX2C2 9 | ||
| 95 | 97 | ||
| 96 | /* | 98 | /* |
| 97 | * AP reset flag states | 99 | * AP reset flag states |
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 4d36e805a234..8a4964f3584b 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
| @@ -1068,10 +1068,8 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer, | |||
| 1068 | 1068 | ||
| 1069 | #define LBUFSIZE 1200UL | 1069 | #define LBUFSIZE 1200UL |
| 1070 | lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); | 1070 | lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); |
| 1071 | if (!lbuf) { | 1071 | if (!lbuf) |
| 1072 | PRINTK("kmalloc failed!\n"); | ||
| 1073 | return 0; | 1072 | return 0; |
| 1074 | } | ||
| 1075 | 1073 | ||
| 1076 | local_count = min(LBUFSIZE - 1, count); | 1074 | local_count = min(LBUFSIZE - 1, count); |
| 1077 | if (copy_from_user(lbuf, buffer, local_count) != 0) { | 1075 | if (copy_from_user(lbuf, buffer, local_count) != 0) { |
| @@ -1081,23 +1079,15 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer, | |||
| 1081 | lbuf[local_count] = '\0'; | 1079 | lbuf[local_count] = '\0'; |
| 1082 | 1080 | ||
| 1083 | ptr = strstr(lbuf, "Online devices"); | 1081 | ptr = strstr(lbuf, "Online devices"); |
| 1084 | if (!ptr) { | 1082 | if (!ptr) |
| 1085 | PRINTK("Unable to parse data (missing \"Online devices\")\n"); | ||
| 1086 | goto out; | 1083 | goto out; |
| 1087 | } | ||
| 1088 | ptr = strstr(ptr, "\n"); | 1084 | ptr = strstr(ptr, "\n"); |
| 1089 | if (!ptr) { | 1085 | if (!ptr) |
| 1090 | PRINTK("Unable to parse data (missing newline " | ||
| 1091 | "after \"Online devices\")\n"); | ||
| 1092 | goto out; | 1086 | goto out; |
| 1093 | } | ||
| 1094 | ptr++; | 1087 | ptr++; |
| 1095 | 1088 | ||
| 1096 | if (strstr(ptr, "Waiting work element counts") == NULL) { | 1089 | if (strstr(ptr, "Waiting work element counts") == NULL) |
| 1097 | PRINTK("Unable to parse data (missing " | ||
| 1098 | "\"Waiting work element counts\")\n"); | ||
| 1099 | goto out; | 1090 | goto out; |
| 1100 | } | ||
| 1101 | 1091 | ||
| 1102 | for (j = 0; j < 64 && *ptr; ptr++) { | 1092 | for (j = 0; j < 64 && *ptr; ptr++) { |
| 1103 | /* | 1093 | /* |
| @@ -1197,16 +1187,12 @@ int __init zcrypt_api_init(void) | |||
| 1197 | 1187 | ||
| 1198 | /* Register the request sprayer. */ | 1188 | /* Register the request sprayer. */ |
| 1199 | rc = misc_register(&zcrypt_misc_device); | 1189 | rc = misc_register(&zcrypt_misc_device); |
| 1200 | if (rc < 0) { | 1190 | if (rc < 0) |
| 1201 | PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n", | ||
| 1202 | zcrypt_misc_device.minor, rc); | ||
| 1203 | goto out; | 1191 | goto out; |
| 1204 | } | ||
| 1205 | 1192 | ||
| 1206 | /* Set up the proc file system */ | 1193 | /* Set up the proc file system */ |
| 1207 | zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); | 1194 | zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); |
| 1208 | if (!zcrypt_entry) { | 1195 | if (!zcrypt_entry) { |
| 1209 | PRINTK("Couldn't create z90crypt proc entry\n"); | ||
| 1210 | rc = -ENOMEM; | 1196 | rc = -ENOMEM; |
| 1211 | goto out_misc; | 1197 | goto out_misc; |
| 1212 | } | 1198 | } |
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 5c6e222b2ac4..1d1ec74dadb2 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h | |||
| @@ -30,34 +30,6 @@ | |||
| 30 | #ifndef _ZCRYPT_API_H_ | 30 | #ifndef _ZCRYPT_API_H_ |
| 31 | #define _ZCRYPT_API_H_ | 31 | #define _ZCRYPT_API_H_ |
| 32 | 32 | ||
| 33 | /** | ||
| 34 | * Macro definitions | ||
| 35 | * | ||
| 36 | * PDEBUG debugs in the form "zcrypt: function_name -> message" | ||
| 37 | * | ||
| 38 | * PRINTK is like PDEBUG, except that it is always enabled | ||
| 39 | * PRINTKN is like PRINTK, except that it does not include the function name | ||
| 40 | * PRINTKW is like PRINTK, except that it uses KERN_WARNING | ||
| 41 | * PRINTKC is like PRINTK, except that it uses KERN_CRIT | ||
| 42 | */ | ||
| 43 | #define DEV_NAME "zcrypt" | ||
| 44 | |||
| 45 | #define PRINTK(fmt, args...) \ | ||
| 46 | printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args) | ||
| 47 | #define PRINTKN(fmt, args...) \ | ||
| 48 | printk(KERN_DEBUG DEV_NAME ": " fmt, ## args) | ||
| 49 | #define PRINTKW(fmt, args...) \ | ||
| 50 | printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args) | ||
| 51 | #define PRINTKC(fmt, args...) \ | ||
| 52 | printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args) | ||
| 53 | |||
| 54 | #ifdef ZCRYPT_DEBUG | ||
| 55 | #define PDEBUG(fmt, args...) \ | ||
| 56 | printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args) | ||
| 57 | #else | ||
| 58 | #define PDEBUG(fmt, args...) do {} while (0) | ||
| 59 | #endif | ||
| 60 | |||
| 61 | #include "ap_bus.h" | 33 | #include "ap_bus.h" |
| 62 | #include <asm/zcrypt.h> | 34 | #include <asm/zcrypt.h> |
| 63 | 35 | ||
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 08657f604b8c..54f4cbc3be9e 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c | |||
| @@ -49,6 +49,7 @@ | |||
| 49 | 49 | ||
| 50 | static struct ap_device_id zcrypt_cex2a_ids[] = { | 50 | static struct ap_device_id zcrypt_cex2a_ids[] = { |
| 51 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, | 51 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, |
| 52 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) }, | ||
| 52 | { /* end of list */ }, | 53 | { /* end of list */ }, |
| 53 | }; | 54 | }; |
| 54 | 55 | ||
| @@ -242,9 +243,6 @@ static int convert_response(struct zcrypt_device *zdev, | |||
| 242 | return convert_type80(zdev, reply, | 243 | return convert_type80(zdev, reply, |
| 243 | outputdata, outputdatalength); | 244 | outputdata, outputdatalength); |
| 244 | default: /* Unknown response type, this should NEVER EVER happen */ | 245 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 245 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
| 246 | *(unsigned int *) reply->message, | ||
| 247 | *(unsigned int *) (reply->message+4)); | ||
| 248 | zdev->online = 0; | 246 | zdev->online = 0; |
| 249 | return -EAGAIN; /* repeat the request on a different device. */ | 247 | return -EAGAIN; /* repeat the request on a different device. */ |
| 250 | } | 248 | } |
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 3e27fe77d207..03ba27f05f92 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h | |||
| @@ -92,10 +92,6 @@ static inline int convert_error(struct zcrypt_device *zdev, | |||
| 92 | { | 92 | { |
| 93 | struct error_hdr *ehdr = reply->message; | 93 | struct error_hdr *ehdr = reply->message; |
| 94 | 94 | ||
| 95 | PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n", | ||
| 96 | ehdr->type, *(unsigned int *) reply->message, | ||
| 97 | *(unsigned int *) (reply->message + 4)); | ||
| 98 | |||
| 99 | switch (ehdr->reply_code) { | 95 | switch (ehdr->reply_code) { |
| 100 | case REP82_ERROR_OPERAND_INVALID: | 96 | case REP82_ERROR_OPERAND_INVALID: |
| 101 | case REP82_ERROR_OPERAND_SIZE: | 97 | case REP82_ERROR_OPERAND_SIZE: |
| @@ -123,8 +119,6 @@ static inline int convert_error(struct zcrypt_device *zdev, | |||
| 123 | zdev->online = 0; | 119 | zdev->online = 0; |
| 124 | return -EAGAIN; | 120 | return -EAGAIN; |
| 125 | default: | 121 | default: |
| 126 | PRINTKW("unknown type %02x reply code = %d\n", | ||
| 127 | ehdr->type, ehdr->reply_code); | ||
| 128 | zdev->online = 0; | 122 | zdev->online = 0; |
| 129 | return -EAGAIN; /* repeat the request on a different device. */ | 123 | return -EAGAIN; /* repeat the request on a different device. */ |
| 130 | } | 124 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 6e93b4751782..12da4815ba8e 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c | |||
| @@ -226,9 +226,6 @@ static int convert_response(struct zcrypt_device *zdev, | |||
| 226 | return convert_type84(zdev, reply, | 226 | return convert_type84(zdev, reply, |
| 227 | outputdata, outputdatalength); | 227 | outputdata, outputdatalength); |
| 228 | default: /* Unknown response type, this should NEVER EVER happen */ | 228 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 229 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
| 230 | *(unsigned int *) reply->message, | ||
| 231 | *(unsigned int *) (reply->message+4)); | ||
| 232 | zdev->online = 0; | 229 | zdev->online = 0; |
| 233 | return -EAGAIN; /* repeat the request on a different device. */ | 230 | return -EAGAIN; /* repeat the request on a different device. */ |
| 234 | } | 231 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 17ea56ce1c11..779952cb19fc 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c | |||
| @@ -361,26 +361,18 @@ static int convert_type86(struct zcrypt_device *zdev, | |||
| 361 | service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); | 361 | service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); |
| 362 | if (unlikely(service_rc != 0)) { | 362 | if (unlikely(service_rc != 0)) { |
| 363 | service_rs = le16_to_cpu(msg->cprb.ccp_rscode); | 363 | service_rs = le16_to_cpu(msg->cprb.ccp_rscode); |
| 364 | if (service_rc == 8 && service_rs == 66) { | 364 | if (service_rc == 8 && service_rs == 66) |
| 365 | PDEBUG("Bad block format on PCICC\n"); | ||
| 366 | return -EINVAL; | 365 | return -EINVAL; |
| 367 | } | 366 | if (service_rc == 8 && service_rs == 65) |
| 368 | if (service_rc == 8 && service_rs == 65) { | ||
| 369 | PDEBUG("Probably an even modulus on PCICC\n"); | ||
| 370 | return -EINVAL; | 367 | return -EINVAL; |
| 371 | } | ||
| 372 | if (service_rc == 8 && service_rs == 770) { | 368 | if (service_rc == 8 && service_rs == 770) { |
| 373 | PDEBUG("Invalid key length on PCICC\n"); | ||
| 374 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; | 369 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; |
| 375 | return -EAGAIN; | 370 | return -EAGAIN; |
| 376 | } | 371 | } |
| 377 | if (service_rc == 8 && service_rs == 783) { | 372 | if (service_rc == 8 && service_rs == 783) { |
| 378 | PDEBUG("Extended bitlengths not enabled on PCICC\n"); | ||
| 379 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; | 373 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; |
| 380 | return -EAGAIN; | 374 | return -EAGAIN; |
| 381 | } | 375 | } |
| 382 | PRINTK("Unknown service rc/rs (PCICC): %d/%d\n", | ||
| 383 | service_rc, service_rs); | ||
| 384 | zdev->online = 0; | 376 | zdev->online = 0; |
| 385 | return -EAGAIN; /* repeat the request on a different device. */ | 377 | return -EAGAIN; /* repeat the request on a different device. */ |
| 386 | } | 378 | } |
| @@ -434,9 +426,6 @@ static int convert_response(struct zcrypt_device *zdev, | |||
| 434 | outputdata, outputdatalength); | 426 | outputdata, outputdatalength); |
| 435 | /* no break, incorrect cprb version is an unknown response */ | 427 | /* no break, incorrect cprb version is an unknown response */ |
| 436 | default: /* Unknown response type, this should NEVER EVER happen */ | 428 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 437 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
| 438 | *(unsigned int *) reply->message, | ||
| 439 | *(unsigned int *) (reply->message+4)); | ||
| 440 | zdev->online = 0; | 429 | zdev->online = 0; |
| 441 | return -EAGAIN; /* repeat the request on a different device. */ | 430 | return -EAGAIN; /* repeat the request on a different device. */ |
| 442 | } | 431 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 0bc9b3188e64..d8ad36f81540 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
| @@ -72,6 +72,7 @@ struct response_type { | |||
| 72 | static struct ap_device_id zcrypt_pcixcc_ids[] = { | 72 | static struct ap_device_id zcrypt_pcixcc_ids[] = { |
| 73 | { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, | 73 | { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, |
| 74 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, | 74 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, |
| 75 | { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) }, | ||
| 75 | { /* end of list */ }, | 76 | { /* end of list */ }, |
| 76 | }; | 77 | }; |
| 77 | 78 | ||
| @@ -289,38 +290,19 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, | |||
| 289 | ap_msg->length = sizeof(struct type6_hdr) + | 290 | ap_msg->length = sizeof(struct type6_hdr) + |
| 290 | CEIL4(xcRB->request_control_blk_length) + | 291 | CEIL4(xcRB->request_control_blk_length) + |
| 291 | xcRB->request_data_length; | 292 | xcRB->request_data_length; |
| 292 | if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) { | 293 | if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) |
| 293 | PRINTK("Combined message is too large (%ld/%d/%d).\n", | ||
| 294 | sizeof(struct type6_hdr), | ||
| 295 | xcRB->request_control_blk_length, | ||
| 296 | xcRB->request_data_length); | ||
| 297 | return -EFAULT; | 294 | return -EFAULT; |
| 298 | } | 295 | if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE) |
| 299 | if (CEIL4(xcRB->reply_control_blk_length) > | ||
| 300 | PCIXCC_MAX_XCRB_REPLY_SIZE) { | ||
| 301 | PDEBUG("Reply CPRB length is too large (%d).\n", | ||
| 302 | xcRB->request_control_blk_length); | ||
| 303 | return -EFAULT; | 296 | return -EFAULT; |
| 304 | } | 297 | if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) |
| 305 | if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) { | ||
| 306 | PDEBUG("Reply data block length is too large (%d).\n", | ||
| 307 | xcRB->reply_data_length); | ||
| 308 | return -EFAULT; | 298 | return -EFAULT; |
| 309 | } | ||
| 310 | replylen = CEIL4(xcRB->reply_control_blk_length) + | 299 | replylen = CEIL4(xcRB->reply_control_blk_length) + |
| 311 | CEIL4(xcRB->reply_data_length) + | 300 | CEIL4(xcRB->reply_data_length) + |
| 312 | sizeof(struct type86_fmt2_msg); | 301 | sizeof(struct type86_fmt2_msg); |
| 313 | if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { | 302 | if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { |
| 314 | PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE" | ||
| 315 | " (%d/%d/%d).\n", | ||
| 316 | sizeof(struct type86_fmt2_msg), | ||
| 317 | xcRB->reply_control_blk_length, | ||
| 318 | xcRB->reply_data_length); | ||
| 319 | xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - | 303 | xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - |
| 320 | (sizeof(struct type86_fmt2_msg) + | 304 | (sizeof(struct type86_fmt2_msg) + |
| 321 | CEIL4(xcRB->reply_data_length)); | 305 | CEIL4(xcRB->reply_data_length)); |
| 322 | PDEBUG("Capping Reply CPRB length at %d\n", | ||
| 323 | xcRB->reply_control_blk_length); | ||
| 324 | } | 306 | } |
| 325 | 307 | ||
| 326 | /* prepare type6 header */ | 308 | /* prepare type6 header */ |
| @@ -339,11 +321,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, | |||
| 339 | xcRB->request_control_blk_length)) | 321 | xcRB->request_control_blk_length)) |
| 340 | return -EFAULT; | 322 | return -EFAULT; |
| 341 | if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > | 323 | if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > |
| 342 | xcRB->request_control_blk_length) { | 324 | xcRB->request_control_blk_length) |
| 343 | PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len, | ||
| 344 | xcRB->request_control_blk_length); | ||
| 345 | return -EFAULT; | 325 | return -EFAULT; |
| 346 | } | ||
| 347 | function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; | 326 | function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; |
| 348 | memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); | 327 | memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); |
| 349 | 328 | ||
| @@ -471,29 +450,18 @@ static int convert_type86_ica(struct zcrypt_device *zdev, | |||
| 471 | service_rc = msg->cprbx.ccp_rtcode; | 450 | service_rc = msg->cprbx.ccp_rtcode; |
| 472 | if (unlikely(service_rc != 0)) { | 451 | if (unlikely(service_rc != 0)) { |
| 473 | service_rs = msg->cprbx.ccp_rscode; | 452 | service_rs = msg->cprbx.ccp_rscode; |
| 474 | if (service_rc == 8 && service_rs == 66) { | 453 | if (service_rc == 8 && service_rs == 66) |
| 475 | PDEBUG("Bad block format on PCIXCC/CEX2C\n"); | ||
| 476 | return -EINVAL; | 454 | return -EINVAL; |
| 477 | } | 455 | if (service_rc == 8 && service_rs == 65) |
| 478 | if (service_rc == 8 && service_rs == 65) { | ||
| 479 | PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n"); | ||
| 480 | return -EINVAL; | 456 | return -EINVAL; |
| 481 | } | 457 | if (service_rc == 8 && service_rs == 770) |
| 482 | if (service_rc == 8 && service_rs == 770) { | ||
| 483 | PDEBUG("Invalid key length on PCIXCC/CEX2C\n"); | ||
| 484 | return -EINVAL; | 458 | return -EINVAL; |
| 485 | } | ||
| 486 | if (service_rc == 8 && service_rs == 783) { | 459 | if (service_rc == 8 && service_rs == 783) { |
| 487 | PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n"); | ||
| 488 | zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; | 460 | zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; |
| 489 | return -EAGAIN; | 461 | return -EAGAIN; |
| 490 | } | 462 | } |
| 491 | if (service_rc == 12 && service_rs == 769) { | 463 | if (service_rc == 12 && service_rs == 769) |
| 492 | PDEBUG("Invalid key on PCIXCC/CEX2C\n"); | ||
| 493 | return -EINVAL; | 464 | return -EINVAL; |
| 494 | } | ||
| 495 | PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n", | ||
| 496 | service_rc, service_rs); | ||
| 497 | zdev->online = 0; | 465 | zdev->online = 0; |
| 498 | return -EAGAIN; /* repeat the request on a different device. */ | 466 | return -EAGAIN; /* repeat the request on a different device. */ |
| 499 | } | 467 | } |
| @@ -569,11 +537,8 @@ static int convert_type86_rng(struct zcrypt_device *zdev, | |||
| 569 | } __attribute__((packed)) *msg = reply->message; | 537 | } __attribute__((packed)) *msg = reply->message; |
| 570 | char *data = reply->message; | 538 | char *data = reply->message; |
| 571 | 539 | ||
| 572 | if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) { | 540 | if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) |
| 573 | PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n", | ||
| 574 | rc, rs); | ||
| 575 | return -EINVAL; | 541 | return -EINVAL; |
| 576 | } | ||
| 577 | memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); | 542 | memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); |
| 578 | return msg->fmt2.count2; | 543 | return msg->fmt2.count2; |
| 579 | } | 544 | } |
| @@ -598,9 +563,6 @@ static int convert_response_ica(struct zcrypt_device *zdev, | |||
| 598 | outputdata, outputdatalength); | 563 | outputdata, outputdatalength); |
| 599 | /* no break, incorrect cprb version is an unknown response */ | 564 | /* no break, incorrect cprb version is an unknown response */ |
| 600 | default: /* Unknown response type, this should NEVER EVER happen */ | 565 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 601 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
| 602 | *(unsigned int *) reply->message, | ||
| 603 | *(unsigned int *) (reply->message+4)); | ||
| 604 | zdev->online = 0; | 566 | zdev->online = 0; |
| 605 | return -EAGAIN; /* repeat the request on a different device. */ | 567 | return -EAGAIN; /* repeat the request on a different device. */ |
| 606 | } | 568 | } |
| @@ -627,9 +589,6 @@ static int convert_response_xcrb(struct zcrypt_device *zdev, | |||
| 627 | return convert_type86_xcrb(zdev, reply, xcRB); | 589 | return convert_type86_xcrb(zdev, reply, xcRB); |
| 628 | /* no break, incorrect cprb version is an unknown response */ | 590 | /* no break, incorrect cprb version is an unknown response */ |
| 629 | default: /* Unknown response type, this should NEVER EVER happen */ | 591 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 630 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
| 631 | *(unsigned int *) reply->message, | ||
| 632 | *(unsigned int *) (reply->message+4)); | ||
| 633 | xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ | 592 | xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ |
| 634 | zdev->online = 0; | 593 | zdev->online = 0; |
| 635 | return -EAGAIN; /* repeat the request on a different device. */ | 594 | return -EAGAIN; /* repeat the request on a different device. */ |
| @@ -653,9 +612,6 @@ static int convert_response_rng(struct zcrypt_device *zdev, | |||
| 653 | return convert_type86_rng(zdev, reply, data); | 612 | return convert_type86_rng(zdev, reply, data); |
| 654 | /* no break, incorrect cprb version is an unknown response */ | 613 | /* no break, incorrect cprb version is an unknown response */ |
| 655 | default: /* Unknown response type, this should NEVER EVER happen */ | 614 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 656 | PRINTK("Unrecognized Message Header: %08x%08x\n", | ||
| 657 | *(unsigned int *) reply->message, | ||
| 658 | *(unsigned int *) (reply->message+4)); | ||
| 659 | zdev->online = 0; | 615 | zdev->online = 0; |
| 660 | return -EAGAIN; /* repeat the request on a different device. */ | 616 | return -EAGAIN; /* repeat the request on a different device. */ |
| 661 | } | 617 | } |
| @@ -700,10 +656,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, | |||
| 700 | memcpy(msg->message, reply->message, length); | 656 | memcpy(msg->message, reply->message, length); |
| 701 | break; | 657 | break; |
| 702 | default: | 658 | default: |
| 703 | PRINTK("Invalid internal response type: %i\n", | 659 | memcpy(msg->message, &error_reply, sizeof error_reply); |
| 704 | resp_type->type); | ||
| 705 | memcpy(msg->message, &error_reply, | ||
| 706 | sizeof error_reply); | ||
| 707 | } | 660 | } |
| 708 | } else | 661 | } else |
| 709 | memcpy(msg->message, reply->message, sizeof error_reply); | 662 | memcpy(msg->message, reply->message, sizeof error_reply); |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 04a1d7bf678c..c644669a75c2 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
| @@ -703,7 +703,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 703 | if (!cdev->dev.driver_data) { | 703 | if (!cdev->dev.driver_data) { |
| 704 | printk(KERN_WARNING "claw: unsolicited interrupt for device:" | 704 | printk(KERN_WARNING "claw: unsolicited interrupt for device:" |
| 705 | "%s received c-%02x d-%02x\n", | 705 | "%s received c-%02x d-%02x\n", |
| 706 | cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); | 706 | cdev->dev.bus_id, irb->scsw.cmd.cstat, |
| 707 | irb->scsw.cmd.dstat); | ||
| 707 | #ifdef FUNCTRACE | 708 | #ifdef FUNCTRACE |
| 708 | printk(KERN_INFO "claw: %s() " | 709 | printk(KERN_INFO "claw: %s() " |
| 709 | "exit on line %d\n",__func__,__LINE__); | 710 | "exit on line %d\n",__func__,__LINE__); |
| @@ -732,22 +733,23 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 732 | #ifdef IOTRACE | 733 | #ifdef IOTRACE |
| 733 | printk(KERN_INFO "%s: interrupt for device: %04x " | 734 | printk(KERN_INFO "%s: interrupt for device: %04x " |
| 734 | "received c-%02x d-%02x state-%02x\n", | 735 | "received c-%02x d-%02x state-%02x\n", |
| 735 | dev->name, p_ch->devno, irb->scsw.cstat, | 736 | dev->name, p_ch->devno, irb->scsw.cmd.cstat, |
| 736 | irb->scsw.dstat, p_ch->claw_state); | 737 | irb->scsw.cmd.dstat, p_ch->claw_state); |
| 737 | #endif | 738 | #endif |
| 738 | 739 | ||
| 739 | /* Copy interruption response block. */ | 740 | /* Copy interruption response block. */ |
| 740 | memcpy(p_ch->irb, irb, sizeof(struct irb)); | 741 | memcpy(p_ch->irb, irb, sizeof(struct irb)); |
| 741 | 742 | ||
| 742 | /* Check for good subchannel return code, otherwise error message */ | 743 | /* Check for good subchannel return code, otherwise error message */ |
| 743 | if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) { | 744 | if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { |
| 744 | printk(KERN_INFO "%s: subchannel check for device: %04x -" | 745 | printk(KERN_INFO "%s: subchannel check for device: %04x -" |
| 745 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", | 746 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", |
| 746 | dev->name, p_ch->devno, | 747 | dev->name, p_ch->devno, |
| 747 | irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa); | 748 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, |
| 749 | irb->scsw.cmd.cpa); | ||
| 748 | #ifdef IOTRACE | 750 | #ifdef IOTRACE |
| 749 | dumpit((char *)irb,sizeof(struct irb)); | 751 | dumpit((char *)irb,sizeof(struct irb)); |
| 750 | dumpit((char *)(unsigned long)irb->scsw.cpa, | 752 | dumpit((char *)(unsigned long)irb->scsw.cmd.cpa, |
| 751 | sizeof(struct ccw1)); | 753 | sizeof(struct ccw1)); |
| 752 | #endif | 754 | #endif |
| 753 | #ifdef FUNCTRACE | 755 | #ifdef FUNCTRACE |
| @@ -759,22 +761,24 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 759 | } | 761 | } |
| 760 | 762 | ||
| 761 | /* Check the reason-code of a unit check */ | 763 | /* Check the reason-code of a unit check */ |
| 762 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 764 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) |
| 763 | ccw_check_unit_check(p_ch, irb->ecw[0]); | 765 | ccw_check_unit_check(p_ch, irb->ecw[0]); |
| 764 | } | ||
| 765 | 766 | ||
| 766 | /* State machine to bring the connection up, down and to restart */ | 767 | /* State machine to bring the connection up, down and to restart */ |
| 767 | p_ch->last_dstat = irb->scsw.dstat; | 768 | p_ch->last_dstat = irb->scsw.cmd.dstat; |
| 768 | 769 | ||
| 769 | switch (p_ch->claw_state) { | 770 | switch (p_ch->claw_state) { |
| 770 | case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ | 771 | case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ |
| 771 | #ifdef DEBUGMSG | 772 | #ifdef DEBUGMSG |
| 772 | printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); | 773 | printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); |
| 773 | #endif | 774 | #endif |
| 774 | if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 775 | if (!((p_ch->irb->scsw.cmd.stctl & |
| 775 | (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 776 | SCSW_STCTL_SEC_STATUS) || |
| 776 | (p_ch->irb->scsw.stctl == | 777 | (p_ch->irb->scsw.cmd.stctl == |
| 777 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | 778 | SCSW_STCTL_STATUS_PEND) || |
| 779 | (p_ch->irb->scsw.cmd.stctl == | ||
| 780 | (SCSW_STCTL_ALERT_STATUS | | ||
| 781 | SCSW_STCTL_STATUS_PEND)))) { | ||
| 778 | #ifdef FUNCTRACE | 782 | #ifdef FUNCTRACE |
| 779 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 783 | printk(KERN_INFO "%s:%s Exit on line %d\n", |
| 780 | dev->name,__func__,__LINE__); | 784 | dev->name,__func__,__LINE__); |
| @@ -798,10 +802,13 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 798 | printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", | 802 | printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", |
| 799 | dev->name); | 803 | dev->name); |
| 800 | #endif | 804 | #endif |
| 801 | if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 805 | if (!((p_ch->irb->scsw.cmd.stctl & |
| 802 | (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 806 | SCSW_STCTL_SEC_STATUS) || |
| 803 | (p_ch->irb->scsw.stctl == | 807 | (p_ch->irb->scsw.cmd.stctl == |
| 804 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | 808 | SCSW_STCTL_STATUS_PEND) || |
| 809 | (p_ch->irb->scsw.cmd.stctl == | ||
| 810 | (SCSW_STCTL_ALERT_STATUS | | ||
| 811 | SCSW_STCTL_STATUS_PEND)))) { | ||
| 805 | #ifdef FUNCTRACE | 812 | #ifdef FUNCTRACE |
| 806 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 813 | printk(KERN_INFO "%s:%s Exit on line %d\n", |
| 807 | dev->name,__func__,__LINE__); | 814 | dev->name,__func__,__LINE__); |
| @@ -828,8 +835,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 828 | "interrupt for device:" | 835 | "interrupt for device:" |
| 829 | "%s received c-%02x d-%02x\n", | 836 | "%s received c-%02x d-%02x\n", |
| 830 | cdev->dev.bus_id, | 837 | cdev->dev.bus_id, |
| 831 | irb->scsw.cstat, | 838 | irb->scsw.cmd.cstat, |
| 832 | irb->scsw.dstat); | 839 | irb->scsw.cmd.dstat); |
| 833 | return; | 840 | return; |
| 834 | } | 841 | } |
| 835 | #ifdef DEBUGMSG | 842 | #ifdef DEBUGMSG |
| @@ -844,7 +851,7 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 844 | return; | 851 | return; |
| 845 | case CLAW_START_READ: | 852 | case CLAW_START_READ: |
| 846 | CLAW_DBF_TEXT(4,trace,"ReadIRQ"); | 853 | CLAW_DBF_TEXT(4,trace,"ReadIRQ"); |
| 847 | if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 854 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
| 848 | clear_bit(0, (void *)&p_ch->IO_active); | 855 | clear_bit(0, (void *)&p_ch->IO_active); |
| 849 | if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || | 856 | if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || |
| 850 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || | 857 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || |
| @@ -863,8 +870,8 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 863 | CLAW_DBF_TEXT(4,trace,"notrdy"); | 870 | CLAW_DBF_TEXT(4,trace,"notrdy"); |
| 864 | return; | 871 | return; |
| 865 | } | 872 | } |
| 866 | if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) && | 873 | if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) && |
| 867 | (p_ch->irb->scsw.dstat==0)) { | 874 | (p_ch->irb->scsw.cmd.dstat == 0)) { |
| 868 | if (test_and_set_bit(CLAW_BH_ACTIVE, | 875 | if (test_and_set_bit(CLAW_BH_ACTIVE, |
| 869 | (void *)&p_ch->flag_a) == 0) { | 876 | (void *)&p_ch->flag_a) == 0) { |
| 870 | tasklet_schedule(&p_ch->tasklet); | 877 | tasklet_schedule(&p_ch->tasklet); |
| @@ -879,10 +886,13 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 879 | CLAW_DBF_TEXT(4,trace,"PCI_read"); | 886 | CLAW_DBF_TEXT(4,trace,"PCI_read"); |
| 880 | return; | 887 | return; |
| 881 | } | 888 | } |
| 882 | if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 889 | if (!((p_ch->irb->scsw.cmd.stctl & |
| 883 | (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 890 | SCSW_STCTL_SEC_STATUS) || |
| 884 | (p_ch->irb->scsw.stctl == | 891 | (p_ch->irb->scsw.cmd.stctl == |
| 885 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | 892 | SCSW_STCTL_STATUS_PEND) || |
| 893 | (p_ch->irb->scsw.cmd.stctl == | ||
| 894 | (SCSW_STCTL_ALERT_STATUS | | ||
| 895 | SCSW_STCTL_STATUS_PEND)))) { | ||
| 886 | #ifdef FUNCTRACE | 896 | #ifdef FUNCTRACE |
| 887 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 897 | printk(KERN_INFO "%s:%s Exit on line %d\n", |
| 888 | dev->name,__func__,__LINE__); | 898 | dev->name,__func__,__LINE__); |
| @@ -911,7 +921,7 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 911 | CLAW_DBF_TEXT(4,trace,"RdIRQXit"); | 921 | CLAW_DBF_TEXT(4,trace,"RdIRQXit"); |
| 912 | return; | 922 | return; |
| 913 | case CLAW_START_WRITE: | 923 | case CLAW_START_WRITE: |
| 914 | if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 924 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
| 915 | printk(KERN_INFO "%s: Unit Check Occured in " | 925 | printk(KERN_INFO "%s: Unit Check Occured in " |
| 916 | "write channel\n",dev->name); | 926 | "write channel\n",dev->name); |
| 917 | clear_bit(0, (void *)&p_ch->IO_active); | 927 | clear_bit(0, (void *)&p_ch->IO_active); |
| @@ -934,16 +944,19 @@ claw_irq_handler(struct ccw_device *cdev, | |||
| 934 | CLAW_DBF_TEXT(4,trace,"rstrtwrt"); | 944 | CLAW_DBF_TEXT(4,trace,"rstrtwrt"); |
| 935 | return; | 945 | return; |
| 936 | } | 946 | } |
| 937 | if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { | 947 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { |
| 938 | clear_bit(0, (void *)&p_ch->IO_active); | 948 | clear_bit(0, (void *)&p_ch->IO_active); |
| 939 | printk(KERN_INFO "%s: Unit Exception " | 949 | printk(KERN_INFO "%s: Unit Exception " |
| 940 | "Occured in write channel\n", | 950 | "Occured in write channel\n", |
| 941 | dev->name); | 951 | dev->name); |
| 942 | } | 952 | } |
| 943 | if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 953 | if (!((p_ch->irb->scsw.cmd.stctl & |
| 944 | (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 954 | SCSW_STCTL_SEC_STATUS) || |
| 945 | (p_ch->irb->scsw.stctl == | 955 | (p_ch->irb->scsw.cmd.stctl == |
| 946 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | 956 | SCSW_STCTL_STATUS_PEND) || |
| 957 | (p_ch->irb->scsw.cmd.stctl == | ||
| 958 | (SCSW_STCTL_ALERT_STATUS | | ||
| 959 | SCSW_STCTL_STATUS_PEND)))) { | ||
| 947 | #ifdef FUNCTRACE | 960 | #ifdef FUNCTRACE |
| 948 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 961 | printk(KERN_INFO "%s:%s Exit on line %d\n", |
| 949 | dev->name,__func__,__LINE__); | 962 | dev->name,__func__,__LINE__); |
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 2a106f3a076d..7e6bd387f4d8 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c | |||
| @@ -257,9 +257,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) | |||
| 257 | if (duration > ch->prof.tx_time) | 257 | if (duration > ch->prof.tx_time) |
| 258 | ch->prof.tx_time = duration; | 258 | ch->prof.tx_time = duration; |
| 259 | 259 | ||
| 260 | if (ch->irb->scsw.count != 0) | 260 | if (ch->irb->scsw.cmd.count != 0) |
| 261 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", | 261 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", |
| 262 | dev->name, ch->irb->scsw.count); | 262 | dev->name, ch->irb->scsw.cmd.count); |
| 263 | fsm_deltimer(&ch->timer); | 263 | fsm_deltimer(&ch->timer); |
| 264 | while ((skb = skb_dequeue(&ch->io_queue))) { | 264 | while ((skb = skb_dequeue(&ch->io_queue))) { |
| 265 | priv->stats.tx_packets++; | 265 | priv->stats.tx_packets++; |
| @@ -353,7 +353,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) | |||
| 353 | struct channel *ch = arg; | 353 | struct channel *ch = arg; |
| 354 | struct net_device *dev = ch->netdev; | 354 | struct net_device *dev = ch->netdev; |
| 355 | struct ctcm_priv *priv = dev->priv; | 355 | struct ctcm_priv *priv = dev->priv; |
| 356 | int len = ch->max_bufsize - ch->irb->scsw.count; | 356 | int len = ch->max_bufsize - ch->irb->scsw.cmd.count; |
| 357 | struct sk_buff *skb = ch->trans_skb; | 357 | struct sk_buff *skb = ch->trans_skb; |
| 358 | __u16 block_len = *((__u16 *)skb->data); | 358 | __u16 block_len = *((__u16 *)skb->data); |
| 359 | int check_len; | 359 | int check_len; |
| @@ -1234,9 +1234,9 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
| 1234 | if (duration > ch->prof.tx_time) | 1234 | if (duration > ch->prof.tx_time) |
| 1235 | ch->prof.tx_time = duration; | 1235 | ch->prof.tx_time = duration; |
| 1236 | 1236 | ||
| 1237 | if (ch->irb->scsw.count != 0) | 1237 | if (ch->irb->scsw.cmd.count != 0) |
| 1238 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", | 1238 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", |
| 1239 | dev->name, ch->irb->scsw.count); | 1239 | dev->name, ch->irb->scsw.cmd.count); |
| 1240 | fsm_deltimer(&ch->timer); | 1240 | fsm_deltimer(&ch->timer); |
| 1241 | while ((skb = skb_dequeue(&ch->io_queue))) { | 1241 | while ((skb = skb_dequeue(&ch->io_queue))) { |
| 1242 | priv->stats.tx_packets++; | 1242 | priv->stats.tx_packets++; |
| @@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) | |||
| 1394 | struct sk_buff *skb = ch->trans_skb; | 1394 | struct sk_buff *skb = ch->trans_skb; |
| 1395 | struct sk_buff *new_skb; | 1395 | struct sk_buff *new_skb; |
| 1396 | unsigned long saveflags = 0; /* avoids compiler warning */ | 1396 | unsigned long saveflags = 0; /* avoids compiler warning */ |
| 1397 | int len = ch->max_bufsize - ch->irb->scsw.count; | 1397 | int len = ch->max_bufsize - ch->irb->scsw.cmd.count; |
| 1398 | 1398 | ||
| 1399 | if (do_debug_data) { | 1399 | if (do_debug_data) { |
| 1400 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", | 1400 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index d52843da4f55..6b13c1c1beb8 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
| @@ -1236,8 +1236,8 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
| 1236 | /* Check for unsolicited interrupts. */ | 1236 | /* Check for unsolicited interrupts. */ |
| 1237 | if (cgdev == NULL) { | 1237 | if (cgdev == NULL) { |
| 1238 | ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", | 1238 | ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", |
| 1239 | cdev->dev.bus_id, irb->scsw.cstat, | 1239 | cdev->dev.bus_id, irb->scsw.cmd.cstat, |
| 1240 | irb->scsw.dstat); | 1240 | irb->scsw.cmd.dstat); |
| 1241 | return; | 1241 | return; |
| 1242 | } | 1242 | } |
| 1243 | 1243 | ||
| @@ -1266,40 +1266,40 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
| 1266 | "received c-%02x d-%02x\n", | 1266 | "received c-%02x d-%02x\n", |
| 1267 | dev->name, | 1267 | dev->name, |
| 1268 | ch->id, | 1268 | ch->id, |
| 1269 | irb->scsw.cstat, | 1269 | irb->scsw.cmd.cstat, |
| 1270 | irb->scsw.dstat); | 1270 | irb->scsw.cmd.dstat); |
| 1271 | 1271 | ||
| 1272 | /* Copy interruption response block. */ | 1272 | /* Copy interruption response block. */ |
| 1273 | memcpy(ch->irb, irb, sizeof(struct irb)); | 1273 | memcpy(ch->irb, irb, sizeof(struct irb)); |
| 1274 | 1274 | ||
| 1275 | /* Check for good subchannel return code, otherwise error message */ | 1275 | /* Check for good subchannel return code, otherwise error message */ |
| 1276 | if (irb->scsw.cstat) { | 1276 | if (irb->scsw.cmd.cstat) { |
| 1277 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); | 1277 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); |
| 1278 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", | 1278 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", |
| 1279 | dev->name, ch->id, irb->scsw.cstat, | 1279 | dev->name, ch->id, irb->scsw.cmd.cstat, |
| 1280 | irb->scsw.dstat); | 1280 | irb->scsw.cmd.dstat); |
| 1281 | return; | 1281 | return; |
| 1282 | } | 1282 | } |
| 1283 | 1283 | ||
| 1284 | /* Check the reason-code of a unit check */ | 1284 | /* Check the reason-code of a unit check */ |
| 1285 | if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { | 1285 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
| 1286 | ccw_unit_check(ch, irb->ecw[0]); | 1286 | ccw_unit_check(ch, irb->ecw[0]); |
| 1287 | return; | 1287 | return; |
| 1288 | } | 1288 | } |
| 1289 | if (irb->scsw.dstat & DEV_STAT_BUSY) { | 1289 | if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) { |
| 1290 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) | 1290 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) |
| 1291 | fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); | 1291 | fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); |
| 1292 | else | 1292 | else |
| 1293 | fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); | 1293 | fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); |
| 1294 | return; | 1294 | return; |
| 1295 | } | 1295 | } |
| 1296 | if (irb->scsw.dstat & DEV_STAT_ATTENTION) { | 1296 | if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { |
| 1297 | fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); | 1297 | fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); |
| 1298 | return; | 1298 | return; |
| 1299 | } | 1299 | } |
| 1300 | if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || | 1300 | if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || |
| 1301 | (irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || | 1301 | (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || |
| 1302 | (irb->scsw.stctl == | 1302 | (irb->scsw.cmd.stctl == |
| 1303 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) | 1303 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) |
| 1304 | fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); | 1304 | fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); |
| 1305 | else | 1305 | else |
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index 8e7697305a4c..f4a32375c037 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c | |||
| @@ -36,7 +36,6 @@ const char *cu3088_type[] = { | |||
| 36 | "CTC/A", | 36 | "CTC/A", |
| 37 | "ESCON channel", | 37 | "ESCON channel", |
| 38 | "FICON channel", | 38 | "FICON channel", |
| 39 | "P390 LCS card", | ||
| 40 | "OSA LCS card", | 39 | "OSA LCS card", |
| 41 | "CLAW channel device", | 40 | "CLAW channel device", |
| 42 | "unknown channel type", | 41 | "unknown channel type", |
| @@ -49,7 +48,6 @@ static struct ccw_device_id cu3088_ids[] = { | |||
| 49 | { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, | 48 | { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, |
| 50 | { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, | 49 | { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, |
| 51 | { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, | 50 | { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, |
| 52 | { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 }, | ||
| 53 | { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, | 51 | { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, |
| 54 | { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, | 52 | { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, |
| 55 | { /* end of list */ } | 53 | { /* end of list */ } |
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h index 1753661f702a..d8558a7105a5 100644 --- a/drivers/s390/net/cu3088.h +++ b/drivers/s390/net/cu3088.h | |||
| @@ -17,9 +17,6 @@ enum channel_types { | |||
| 17 | /* Device is a FICON channel */ | 17 | /* Device is a FICON channel */ |
| 18 | channel_type_ficon, | 18 | channel_type_ficon, |
| 19 | 19 | ||
| 20 | /* Device is a P390 LCS card */ | ||
| 21 | channel_type_p390, | ||
| 22 | |||
| 23 | /* Device is a OSA2 card */ | 20 | /* Device is a OSA2 card */ |
| 24 | channel_type_osa2, | 21 | channel_type_osa2, |
| 25 | 22 | ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index dd22f4b37037..6de28385b354 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
| @@ -1327,8 +1327,8 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
| 1327 | char *sense; | 1327 | char *sense; |
| 1328 | 1328 | ||
| 1329 | sense = (char *) irb->ecw; | 1329 | sense = (char *) irb->ecw; |
| 1330 | cstat = irb->scsw.cstat; | 1330 | cstat = irb->scsw.cmd.cstat; |
| 1331 | dstat = irb->scsw.dstat; | 1331 | dstat = irb->scsw.cmd.dstat; |
| 1332 | 1332 | ||
| 1333 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | | 1333 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | |
| 1334 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 1334 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
| @@ -1388,11 +1388,13 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 1388 | else | 1388 | else |
| 1389 | channel = &card->write; | 1389 | channel = &card->write; |
| 1390 | 1390 | ||
| 1391 | cstat = irb->scsw.cstat; | 1391 | cstat = irb->scsw.cmd.cstat; |
| 1392 | dstat = irb->scsw.dstat; | 1392 | dstat = irb->scsw.cmd.dstat; |
| 1393 | LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); | 1393 | LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); |
| 1394 | LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat); | 1394 | LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, |
| 1395 | LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl); | 1395 | irb->scsw.cmd.dstat); |
| 1396 | LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, | ||
| 1397 | irb->scsw.cmd.actl); | ||
| 1396 | 1398 | ||
| 1397 | /* Check for channel and device errors presented */ | 1399 | /* Check for channel and device errors presented */ |
| 1398 | rc = lcs_get_problem(cdev, irb); | 1400 | rc = lcs_get_problem(cdev, irb); |
| @@ -1410,11 +1412,11 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 1410 | } | 1412 | } |
| 1411 | /* How far in the ccw chain have we processed? */ | 1413 | /* How far in the ccw chain have we processed? */ |
| 1412 | if ((channel->state != LCS_CH_STATE_INIT) && | 1414 | if ((channel->state != LCS_CH_STATE_INIT) && |
| 1413 | (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { | 1415 | (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) { |
| 1414 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) | 1416 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) |
| 1415 | - channel->ccws; | 1417 | - channel->ccws; |
| 1416 | if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || | 1418 | if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || |
| 1417 | (irb->scsw.cstat & SCHN_STAT_PCI)) | 1419 | (irb->scsw.cmd.cstat & SCHN_STAT_PCI)) |
| 1418 | /* Bloody io subsystem tells us lies about cpa... */ | 1420 | /* Bloody io subsystem tells us lies about cpa... */ |
| 1419 | index = (index - 1) & (LCS_NUM_BUFFS - 1); | 1421 | index = (index - 1) & (LCS_NUM_BUFFS - 1); |
| 1420 | while (channel->io_idx != index) { | 1422 | while (channel->io_idx != index) { |
| @@ -1425,25 +1427,24 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 1425 | } | 1427 | } |
| 1426 | } | 1428 | } |
| 1427 | 1429 | ||
| 1428 | if ((irb->scsw.dstat & DEV_STAT_DEV_END) || | 1430 | if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) || |
| 1429 | (irb->scsw.dstat & DEV_STAT_CHN_END) || | 1431 | (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) || |
| 1430 | (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) | 1432 | (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) |
| 1431 | /* Mark channel as stopped. */ | 1433 | /* Mark channel as stopped. */ |
| 1432 | channel->state = LCS_CH_STATE_STOPPED; | 1434 | channel->state = LCS_CH_STATE_STOPPED; |
| 1433 | else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) | 1435 | else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) |
| 1434 | /* CCW execution stopped on a suspend bit. */ | 1436 | /* CCW execution stopped on a suspend bit. */ |
| 1435 | channel->state = LCS_CH_STATE_SUSPENDED; | 1437 | channel->state = LCS_CH_STATE_SUSPENDED; |
| 1436 | if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { | 1438 | if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { |
| 1437 | if (irb->scsw.cc != 0) { | 1439 | if (irb->scsw.cmd.cc != 0) { |
| 1438 | ccw_device_halt(channel->ccwdev, (addr_t) channel); | 1440 | ccw_device_halt(channel->ccwdev, (addr_t) channel); |
| 1439 | return; | 1441 | return; |
| 1440 | } | 1442 | } |
| 1441 | /* The channel has been stopped by halt_IO. */ | 1443 | /* The channel has been stopped by halt_IO. */ |
| 1442 | channel->state = LCS_CH_STATE_HALTED; | 1444 | channel->state = LCS_CH_STATE_HALTED; |
| 1443 | } | 1445 | } |
| 1444 | if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { | 1446 | if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) |
| 1445 | channel->state = LCS_CH_STATE_CLEARED; | 1447 | channel->state = LCS_CH_STATE_CLEARED; |
| 1446 | } | ||
| 1447 | /* Do the rest in the tasklet. */ | 1448 | /* Do the rest in the tasklet. */ |
| 1448 | tasklet_schedule(&channel->irq_tasklet); | 1449 | tasklet_schedule(&channel->irq_tasklet); |
| 1449 | } | 1450 | } |
| @@ -1761,7 +1762,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) | |||
| 1761 | netif_carrier_off(card->dev); | 1762 | netif_carrier_off(card->dev); |
| 1762 | break; | 1763 | break; |
| 1763 | default: | 1764 | default: |
| 1764 | PRINT_INFO("UNRECOGNIZED LGW COMMAND\n"); | 1765 | LCS_DBF_TEXT(5, trace, "noLGWcmd"); |
| 1765 | break; | 1766 | break; |
| 1766 | } | 1767 | } |
| 1767 | } else | 1768 | } else |
| @@ -2042,13 +2043,12 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev) | |||
| 2042 | LCS_DBF_TEXT(2, setup, "add_dev"); | 2043 | LCS_DBF_TEXT(2, setup, "add_dev"); |
| 2043 | card = lcs_alloc_card(); | 2044 | card = lcs_alloc_card(); |
| 2044 | if (!card) { | 2045 | if (!card) { |
| 2045 | PRINT_ERR("Allocation of lcs card failed\n"); | 2046 | LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM); |
| 2046 | put_device(&ccwgdev->dev); | 2047 | put_device(&ccwgdev->dev); |
| 2047 | return -ENOMEM; | 2048 | return -ENOMEM; |
| 2048 | } | 2049 | } |
| 2049 | ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); | 2050 | ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); |
| 2050 | if (ret) { | 2051 | if (ret) { |
| 2051 | PRINT_ERR("Creating attributes failed"); | ||
| 2052 | lcs_free_card(card); | 2052 | lcs_free_card(card); |
| 2053 | put_device(&ccwgdev->dev); | 2053 | put_device(&ccwgdev->dev); |
| 2054 | return ret; | 2054 | return ret; |
| @@ -2140,7 +2140,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
| 2140 | default: | 2140 | default: |
| 2141 | LCS_DBF_TEXT(3, setup, "errinit"); | 2141 | LCS_DBF_TEXT(3, setup, "errinit"); |
| 2142 | PRINT_ERR("LCS: Initialization failed\n"); | 2142 | PRINT_ERR("LCS: Initialization failed\n"); |
| 2143 | PRINT_ERR("LCS: No device found!\n"); | ||
| 2144 | goto out; | 2143 | goto out; |
| 2145 | } | 2144 | } |
| 2146 | if (!dev) | 2145 | if (!dev) |
| @@ -2269,7 +2268,6 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev) | |||
| 2269 | if (!card) | 2268 | if (!card) |
| 2270 | return; | 2269 | return; |
| 2271 | 2270 | ||
| 2272 | PRINT_INFO("Removing lcs group device ....\n"); | ||
| 2273 | LCS_DBF_TEXT(3, setup, "remdev"); | 2271 | LCS_DBF_TEXT(3, setup, "remdev"); |
| 2274 | LCS_DBF_HEX(3, setup, &card, sizeof(void*)); | 2272 | LCS_DBF_HEX(3, setup, &card, sizeof(void*)); |
| 2275 | if (ccwgdev->state == CCWGROUP_ONLINE) { | 2273 | if (ccwgdev->state == CCWGROUP_ONLINE) { |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index e4ba6a0372ac..9242b5acc66b 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
| @@ -625,9 +625,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, | |||
| 625 | offset += header->next; | 625 | offset += header->next; |
| 626 | header->next -= NETIUCV_HDRLEN; | 626 | header->next -= NETIUCV_HDRLEN; |
| 627 | if (skb_tailroom(pskb) < header->next) { | 627 | if (skb_tailroom(pskb) < header->next) { |
| 628 | PRINT_WARN("%s: Illegal next field in iucv header: " | ||
| 629 | "%d > %d\n", | ||
| 630 | dev->name, header->next, skb_tailroom(pskb)); | ||
| 631 | IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", | 628 | IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", |
| 632 | header->next, skb_tailroom(pskb)); | 629 | header->next, skb_tailroom(pskb)); |
| 633 | return; | 630 | return; |
| @@ -636,8 +633,6 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, | |||
| 636 | skb_reset_mac_header(pskb); | 633 | skb_reset_mac_header(pskb); |
| 637 | skb = dev_alloc_skb(pskb->len); | 634 | skb = dev_alloc_skb(pskb->len); |
| 638 | if (!skb) { | 635 | if (!skb) { |
| 639 | PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", | ||
| 640 | dev->name); | ||
| 641 | IUCV_DBF_TEXT(data, 2, | 636 | IUCV_DBF_TEXT(data, 2, |
| 642 | "Out of memory in netiucv_unpack_skb\n"); | 637 | "Out of memory in netiucv_unpack_skb\n"); |
| 643 | privptr->stats.rx_dropped++; | 638 | privptr->stats.rx_dropped++; |
| @@ -674,7 +669,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
| 674 | 669 | ||
| 675 | if (!conn->netdev) { | 670 | if (!conn->netdev) { |
| 676 | iucv_message_reject(conn->path, msg); | 671 | iucv_message_reject(conn->path, msg); |
| 677 | PRINT_WARN("Received data for unlinked connection\n"); | ||
| 678 | IUCV_DBF_TEXT(data, 2, | 672 | IUCV_DBF_TEXT(data, 2, |
| 679 | "Received data for unlinked connection\n"); | 673 | "Received data for unlinked connection\n"); |
| 680 | return; | 674 | return; |
| @@ -682,8 +676,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
| 682 | if (msg->length > conn->max_buffsize) { | 676 | if (msg->length > conn->max_buffsize) { |
| 683 | iucv_message_reject(conn->path, msg); | 677 | iucv_message_reject(conn->path, msg); |
| 684 | privptr->stats.rx_dropped++; | 678 | privptr->stats.rx_dropped++; |
| 685 | PRINT_WARN("msglen %d > max_buffsize %d\n", | ||
| 686 | msg->length, conn->max_buffsize); | ||
| 687 | IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", | 679 | IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", |
| 688 | msg->length, conn->max_buffsize); | 680 | msg->length, conn->max_buffsize); |
| 689 | return; | 681 | return; |
| @@ -695,7 +687,6 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
| 695 | msg->length, NULL); | 687 | msg->length, NULL); |
| 696 | if (rc || msg->length < 5) { | 688 | if (rc || msg->length < 5) { |
| 697 | privptr->stats.rx_errors++; | 689 | privptr->stats.rx_errors++; |
| 698 | PRINT_WARN("iucv_receive returned %08x\n", rc); | ||
| 699 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); | 690 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); |
| 700 | return; | 691 | return; |
| 701 | } | 692 | } |
| @@ -778,7 +769,6 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) | |||
| 778 | fsm_newstate(fi, CONN_STATE_IDLE); | 769 | fsm_newstate(fi, CONN_STATE_IDLE); |
| 779 | if (privptr) | 770 | if (privptr) |
| 780 | privptr->stats.tx_errors += txpackets; | 771 | privptr->stats.tx_errors += txpackets; |
| 781 | PRINT_WARN("iucv_send returned %08x\n", rc); | ||
| 782 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); | 772 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); |
| 783 | } else { | 773 | } else { |
| 784 | if (privptr) { | 774 | if (privptr) { |
| @@ -806,8 +796,6 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) | |||
| 806 | path->flags = 0; | 796 | path->flags = 0; |
| 807 | rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); | 797 | rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); |
| 808 | if (rc) { | 798 | if (rc) { |
| 809 | PRINT_WARN("%s: IUCV accept failed with error %d\n", | ||
| 810 | netdev->name, rc); | ||
| 811 | IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); | 799 | IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); |
| 812 | return; | 800 | return; |
| 813 | } | 801 | } |
| @@ -873,7 +861,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) | |||
| 873 | IUCV_DBF_TEXT(trace, 3, __func__); | 861 | IUCV_DBF_TEXT(trace, 3, __func__); |
| 874 | 862 | ||
| 875 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 863 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
| 876 | PRINT_DEBUG("%s('%s'): connecting ...\n", | 864 | IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", |
| 877 | conn->netdev->name, conn->userid); | 865 | conn->netdev->name, conn->userid); |
| 878 | 866 | ||
| 879 | /* | 867 | /* |
| @@ -968,8 +956,8 @@ static void conn_action_inval(fsm_instance *fi, int event, void *arg) | |||
| 968 | struct iucv_connection *conn = arg; | 956 | struct iucv_connection *conn = arg; |
| 969 | struct net_device *netdev = conn->netdev; | 957 | struct net_device *netdev = conn->netdev; |
| 970 | 958 | ||
| 971 | PRINT_WARN("%s: Cannot connect without username\n", netdev->name); | 959 | IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n", |
| 972 | IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); | 960 | netdev->name, conn->userid); |
| 973 | } | 961 | } |
| 974 | 962 | ||
| 975 | static const fsm_node conn_fsm[] = { | 963 | static const fsm_node conn_fsm[] = { |
| @@ -1077,9 +1065,6 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) | |||
| 1077 | "connection is up and running\n"); | 1065 | "connection is up and running\n"); |
| 1078 | break; | 1066 | break; |
| 1079 | case DEV_STATE_STOPWAIT: | 1067 | case DEV_STATE_STOPWAIT: |
| 1080 | PRINT_INFO( | ||
| 1081 | "%s: got connection UP event during shutdown!\n", | ||
| 1082 | dev->name); | ||
| 1083 | IUCV_DBF_TEXT(data, 2, | 1068 | IUCV_DBF_TEXT(data, 2, |
| 1084 | "dev_action_connup: in DEV_STATE_STOPWAIT\n"); | 1069 | "dev_action_connup: in DEV_STATE_STOPWAIT\n"); |
| 1085 | break; | 1070 | break; |
| @@ -1174,8 +1159,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, | |||
| 1174 | nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + | 1159 | nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + |
| 1175 | NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); | 1160 | NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); |
| 1176 | if (!nskb) { | 1161 | if (!nskb) { |
| 1177 | PRINT_WARN("%s: Could not allocate tx_skb\n", | ||
| 1178 | conn->netdev->name); | ||
| 1179 | IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); | 1162 | IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); |
| 1180 | rc = -ENOMEM; | 1163 | rc = -ENOMEM; |
| 1181 | return rc; | 1164 | return rc; |
| @@ -1223,7 +1206,6 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, | |||
| 1223 | skb_pull(skb, NETIUCV_HDRLEN); | 1206 | skb_pull(skb, NETIUCV_HDRLEN); |
| 1224 | skb_trim(skb, skb->len - NETIUCV_HDRLEN); | 1207 | skb_trim(skb, skb->len - NETIUCV_HDRLEN); |
| 1225 | } | 1208 | } |
| 1226 | PRINT_WARN("iucv_send returned %08x\n", rc); | ||
| 1227 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); | 1209 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); |
| 1228 | } else { | 1210 | } else { |
| 1229 | if (copied) | 1211 | if (copied) |
| @@ -1293,14 +1275,11 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) | |||
| 1293 | * Some sanity checks ... | 1275 | * Some sanity checks ... |
| 1294 | */ | 1276 | */ |
| 1295 | if (skb == NULL) { | 1277 | if (skb == NULL) { |
| 1296 | PRINT_WARN("%s: NULL sk_buff passed\n", dev->name); | ||
| 1297 | IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); | 1278 | IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); |
| 1298 | privptr->stats.tx_dropped++; | 1279 | privptr->stats.tx_dropped++; |
| 1299 | return 0; | 1280 | return 0; |
| 1300 | } | 1281 | } |
| 1301 | if (skb_headroom(skb) < NETIUCV_HDRLEN) { | 1282 | if (skb_headroom(skb) < NETIUCV_HDRLEN) { |
| 1302 | PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n", | ||
| 1303 | dev->name, NETIUCV_HDRLEN); | ||
| 1304 | IUCV_DBF_TEXT(data, 2, | 1283 | IUCV_DBF_TEXT(data, 2, |
| 1305 | "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); | 1284 | "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); |
| 1306 | dev_kfree_skb(skb); | 1285 | dev_kfree_skb(skb); |
| @@ -1393,7 +1372,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, | |||
| 1393 | 1372 | ||
| 1394 | IUCV_DBF_TEXT(trace, 3, __func__); | 1373 | IUCV_DBF_TEXT(trace, 3, __func__); |
| 1395 | if (count > 9) { | 1374 | if (count > 9) { |
| 1396 | PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); | ||
| 1397 | IUCV_DBF_TEXT_(setup, 2, | 1375 | IUCV_DBF_TEXT_(setup, 2, |
| 1398 | "%d is length of username\n", (int) count); | 1376 | "%d is length of username\n", (int) count); |
| 1399 | return -EINVAL; | 1377 | return -EINVAL; |
| @@ -1409,7 +1387,6 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, | |||
| 1409 | /* trailing lf, grr */ | 1387 | /* trailing lf, grr */ |
| 1410 | break; | 1388 | break; |
| 1411 | } | 1389 | } |
| 1412 | PRINT_WARN("netiucv: Invalid char %c in username!\n", *p); | ||
| 1413 | IUCV_DBF_TEXT_(setup, 2, | 1390 | IUCV_DBF_TEXT_(setup, 2, |
| 1414 | "username: invalid character %c\n", *p); | 1391 | "username: invalid character %c\n", *p); |
| 1415 | return -EINVAL; | 1392 | return -EINVAL; |
| @@ -1421,18 +1398,15 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, | |||
| 1421 | if (memcmp(username, priv->conn->userid, 9) && | 1398 | if (memcmp(username, priv->conn->userid, 9) && |
| 1422 | (ndev->flags & (IFF_UP | IFF_RUNNING))) { | 1399 | (ndev->flags & (IFF_UP | IFF_RUNNING))) { |
| 1423 | /* username changed while the interface is active. */ | 1400 | /* username changed while the interface is active. */ |
| 1424 | PRINT_WARN("netiucv: device %s active, connected to %s\n", | ||
| 1425 | dev->bus_id, priv->conn->userid); | ||
| 1426 | PRINT_WARN("netiucv: user cannot be updated\n"); | ||
| 1427 | IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); | 1401 | IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); |
| 1428 | return -EBUSY; | 1402 | return -EPERM; |
| 1429 | } | 1403 | } |
| 1430 | read_lock_bh(&iucv_connection_rwlock); | 1404 | read_lock_bh(&iucv_connection_rwlock); |
| 1431 | list_for_each_entry(cp, &iucv_connection_list, list) { | 1405 | list_for_each_entry(cp, &iucv_connection_list, list) { |
| 1432 | if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { | 1406 | if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { |
| 1433 | read_unlock_bh(&iucv_connection_rwlock); | 1407 | read_unlock_bh(&iucv_connection_rwlock); |
| 1434 | PRINT_WARN("netiucv: Connection to %s already " | 1408 | IUCV_DBF_TEXT_(setup, 2, "user_write: Connection " |
| 1435 | "exists\n", username); | 1409 | "to %s already exists\n", username); |
| 1436 | return -EEXIST; | 1410 | return -EEXIST; |
| 1437 | } | 1411 | } |
| 1438 | } | 1412 | } |
| @@ -1466,13 +1440,10 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, | |||
| 1466 | bs1 = simple_strtoul(buf, &e, 0); | 1440 | bs1 = simple_strtoul(buf, &e, 0); |
| 1467 | 1441 | ||
| 1468 | if (e && (!isspace(*e))) { | 1442 | if (e && (!isspace(*e))) { |
| 1469 | PRINT_WARN("netiucv: Invalid character in buffer!\n"); | ||
| 1470 | IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); | 1443 | IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); |
| 1471 | return -EINVAL; | 1444 | return -EINVAL; |
| 1472 | } | 1445 | } |
| 1473 | if (bs1 > NETIUCV_BUFSIZE_MAX) { | 1446 | if (bs1 > NETIUCV_BUFSIZE_MAX) { |
| 1474 | PRINT_WARN("netiucv: Given buffer size %d too large.\n", | ||
| 1475 | bs1); | ||
| 1476 | IUCV_DBF_TEXT_(setup, 2, | 1447 | IUCV_DBF_TEXT_(setup, 2, |
| 1477 | "buffer_write: buffer size %d too large\n", | 1448 | "buffer_write: buffer size %d too large\n", |
| 1478 | bs1); | 1449 | bs1); |
| @@ -1480,16 +1451,12 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, | |||
| 1480 | } | 1451 | } |
| 1481 | if ((ndev->flags & IFF_RUNNING) && | 1452 | if ((ndev->flags & IFF_RUNNING) && |
| 1482 | (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { | 1453 | (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { |
| 1483 | PRINT_WARN("netiucv: Given buffer size %d too small.\n", | ||
| 1484 | bs1); | ||
| 1485 | IUCV_DBF_TEXT_(setup, 2, | 1454 | IUCV_DBF_TEXT_(setup, 2, |
| 1486 | "buffer_write: buffer size %d too small\n", | 1455 | "buffer_write: buffer size %d too small\n", |
| 1487 | bs1); | 1456 | bs1); |
| 1488 | return -EINVAL; | 1457 | return -EINVAL; |
| 1489 | } | 1458 | } |
| 1490 | if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { | 1459 | if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { |
| 1491 | PRINT_WARN("netiucv: Given buffer size %d too small.\n", | ||
| 1492 | bs1); | ||
| 1493 | IUCV_DBF_TEXT_(setup, 2, | 1460 | IUCV_DBF_TEXT_(setup, 2, |
| 1494 | "buffer_write: buffer size %d too small\n", | 1461 | "buffer_write: buffer size %d too small\n", |
| 1495 | bs1); | 1462 | bs1); |
| @@ -1963,7 +1930,6 @@ static ssize_t conn_write(struct device_driver *drv, | |||
| 1963 | 1930 | ||
| 1964 | IUCV_DBF_TEXT(trace, 3, __func__); | 1931 | IUCV_DBF_TEXT(trace, 3, __func__); |
| 1965 | if (count>9) { | 1932 | if (count>9) { |
| 1966 | PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); | ||
| 1967 | IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); | 1933 | IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); |
| 1968 | return -EINVAL; | 1934 | return -EINVAL; |
| 1969 | } | 1935 | } |
| @@ -1976,7 +1942,6 @@ static ssize_t conn_write(struct device_driver *drv, | |||
| 1976 | if (*p == '\n') | 1942 | if (*p == '\n') |
| 1977 | /* trailing lf, grr */ | 1943 | /* trailing lf, grr */ |
| 1978 | break; | 1944 | break; |
| 1979 | PRINT_WARN("netiucv: Invalid character in username!\n"); | ||
| 1980 | IUCV_DBF_TEXT_(setup, 2, | 1945 | IUCV_DBF_TEXT_(setup, 2, |
| 1981 | "conn_write: invalid character %c\n", *p); | 1946 | "conn_write: invalid character %c\n", *p); |
| 1982 | return -EINVAL; | 1947 | return -EINVAL; |
| @@ -1989,8 +1954,8 @@ static ssize_t conn_write(struct device_driver *drv, | |||
| 1989 | list_for_each_entry(cp, &iucv_connection_list, list) { | 1954 | list_for_each_entry(cp, &iucv_connection_list, list) { |
| 1990 | if (!strncmp(username, cp->userid, 9)) { | 1955 | if (!strncmp(username, cp->userid, 9)) { |
| 1991 | read_unlock_bh(&iucv_connection_rwlock); | 1956 | read_unlock_bh(&iucv_connection_rwlock); |
| 1992 | PRINT_WARN("netiucv: Connection to %s already " | 1957 | IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection " |
| 1993 | "exists\n", username); | 1958 | "to %s already exists\n", username); |
| 1994 | return -EEXIST; | 1959 | return -EEXIST; |
| 1995 | } | 1960 | } |
| 1996 | } | 1961 | } |
| @@ -1998,9 +1963,6 @@ static ssize_t conn_write(struct device_driver *drv, | |||
| 1998 | 1963 | ||
| 1999 | dev = netiucv_init_netdevice(username); | 1964 | dev = netiucv_init_netdevice(username); |
| 2000 | if (!dev) { | 1965 | if (!dev) { |
| 2001 | PRINT_WARN("netiucv: Could not allocate network device " | ||
| 2002 | "structure for user '%s'\n", | ||
| 2003 | netiucv_printname(username)); | ||
| 2004 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); | 1966 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); |
| 2005 | return -ENODEV; | 1967 | return -ENODEV; |
| 2006 | } | 1968 | } |
| @@ -2020,15 +1982,12 @@ static ssize_t conn_write(struct device_driver *drv, | |||
| 2020 | if (rc) | 1982 | if (rc) |
| 2021 | goto out_unreg; | 1983 | goto out_unreg; |
| 2022 | 1984 | ||
| 2023 | PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); | ||
| 2024 | 1985 | ||
| 2025 | return count; | 1986 | return count; |
| 2026 | 1987 | ||
| 2027 | out_unreg: | 1988 | out_unreg: |
| 2028 | netiucv_unregister_device(priv->dev); | 1989 | netiucv_unregister_device(priv->dev); |
| 2029 | out_free_ndev: | 1990 | out_free_ndev: |
| 2030 | PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); | ||
| 2031 | IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); | ||
| 2032 | netiucv_free_netdevice(dev); | 1991 | netiucv_free_netdevice(dev); |
| 2033 | return rc; | 1992 | return rc; |
| 2034 | } | 1993 | } |
| @@ -2073,14 +2032,13 @@ static ssize_t remove_write (struct device_driver *drv, | |||
| 2073 | PRINT_WARN("netiucv: %s cannot be removed\n", | 2032 | PRINT_WARN("netiucv: %s cannot be removed\n", |
| 2074 | ndev->name); | 2033 | ndev->name); |
| 2075 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); | 2034 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); |
| 2076 | return -EBUSY; | 2035 | return -EPERM; |
| 2077 | } | 2036 | } |
| 2078 | unregister_netdev(ndev); | 2037 | unregister_netdev(ndev); |
| 2079 | netiucv_unregister_device(dev); | 2038 | netiucv_unregister_device(dev); |
| 2080 | return count; | 2039 | return count; |
| 2081 | } | 2040 | } |
| 2082 | read_unlock_bh(&iucv_connection_rwlock); | 2041 | read_unlock_bh(&iucv_connection_rwlock); |
| 2083 | PRINT_WARN("netiucv: net device %s unknown\n", name); | ||
| 2084 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); | 2042 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); |
| 2085 | return -EINVAL; | 2043 | return -EINVAL; |
| 2086 | } | 2044 | } |
| @@ -2148,7 +2106,6 @@ static int __init netiucv_init(void) | |||
| 2148 | netiucv_driver.groups = netiucv_drv_attr_groups; | 2106 | netiucv_driver.groups = netiucv_drv_attr_groups; |
| 2149 | rc = driver_register(&netiucv_driver); | 2107 | rc = driver_register(&netiucv_driver); |
| 2150 | if (rc) { | 2108 | if (rc) { |
| 2151 | PRINT_ERR("NETIUCV: failed to register driver.\n"); | ||
| 2152 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); | 2109 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); |
| 2153 | goto out_iucv; | 2110 | goto out_iucv; |
| 2154 | } | 2111 | } |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9a71dae223e8..0ac54dc638c2 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
| @@ -420,7 +420,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
| 420 | QETH_DBF_TEXT(TRACE, 3, "urla"); | 420 | QETH_DBF_TEXT(TRACE, 3, "urla"); |
| 421 | break; | 421 | break; |
| 422 | default: | 422 | default: |
| 423 | PRINT_WARN("Received data is IPA " | 423 | QETH_DBF_MESSAGE(2, "Received data is IPA " |
| 424 | "but not a reply!\n"); | 424 | "but not a reply!\n"); |
| 425 | break; | 425 | break; |
| 426 | } | 426 | } |
| @@ -735,8 +735,8 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
| 735 | char *sense; | 735 | char *sense; |
| 736 | 736 | ||
| 737 | sense = (char *) irb->ecw; | 737 | sense = (char *) irb->ecw; |
| 738 | cstat = irb->scsw.cstat; | 738 | cstat = irb->scsw.cmd.cstat; |
| 739 | dstat = irb->scsw.dstat; | 739 | dstat = irb->scsw.cmd.dstat; |
| 740 | 740 | ||
| 741 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | | 741 | if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | |
| 742 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 742 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
| @@ -823,8 +823,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
| 823 | 823 | ||
| 824 | if (__qeth_check_irb_error(cdev, intparm, irb)) | 824 | if (__qeth_check_irb_error(cdev, intparm, irb)) |
| 825 | return; | 825 | return; |
| 826 | cstat = irb->scsw.cstat; | 826 | cstat = irb->scsw.cmd.cstat; |
| 827 | dstat = irb->scsw.dstat; | 827 | dstat = irb->scsw.cmd.dstat; |
| 828 | 828 | ||
| 829 | card = CARD_FROM_CDEV(cdev); | 829 | card = CARD_FROM_CDEV(cdev); |
| 830 | if (!card) | 830 | if (!card) |
| @@ -842,10 +842,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
| 842 | } | 842 | } |
| 843 | atomic_set(&channel->irq_pending, 0); | 843 | atomic_set(&channel->irq_pending, 0); |
| 844 | 844 | ||
| 845 | if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC)) | 845 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) |
| 846 | channel->state = CH_STATE_STOPPED; | 846 | channel->state = CH_STATE_STOPPED; |
| 847 | 847 | ||
| 848 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC)) | 848 | if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) |
| 849 | channel->state = CH_STATE_HALTED; | 849 | channel->state = CH_STATE_HALTED; |
| 850 | 850 | ||
| 851 | /*let's wake up immediately on data channel*/ | 851 | /*let's wake up immediately on data channel*/ |
| @@ -4092,7 +4092,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
| 4092 | 4092 | ||
| 4093 | rc = qeth_determine_card_type(card); | 4093 | rc = qeth_determine_card_type(card); |
| 4094 | if (rc) { | 4094 | if (rc) { |
| 4095 | PRINT_WARN("%s: not a valid card type\n", __func__); | ||
| 4096 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); | 4095 | QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); |
| 4097 | goto err_card; | 4096 | goto err_card; |
| 4098 | } | 4097 | } |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 999552c83bbe..06deaee50f6d 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
| @@ -944,15 +944,8 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, | |||
| 944 | else | 944 | else |
| 945 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, | 945 | rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, |
| 946 | addr->del_flags); | 946 | addr->del_flags); |
| 947 | if (rc) { | 947 | if (rc) |
| 948 | QETH_DBF_TEXT(TRACE, 2, "failed"); | 948 | QETH_DBF_TEXT(TRACE, 2, "failed"); |
| 949 | /* TODO: re-activate this warning as soon as we have a | ||
| 950 | * clean mirco code | ||
| 951 | qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); | ||
| 952 | PRINT_WARN("Could not deregister IP address %s (rc=%x)\n", | ||
| 953 | buf, rc); | ||
| 954 | */ | ||
| 955 | } | ||
| 956 | 949 | ||
| 957 | return rc; | 950 | return rc; |
| 958 | } | 951 | } |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 8735a415a116..164e090c2625 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
| @@ -156,11 +156,8 @@ static int __init smsg_init(void) | |||
| 156 | if (rc != 0) | 156 | if (rc != 0) |
| 157 | goto out; | 157 | goto out; |
| 158 | rc = iucv_register(&smsg_handler, 1); | 158 | rc = iucv_register(&smsg_handler, 1); |
| 159 | if (rc) { | 159 | if (rc) |
| 160 | printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); | ||
| 161 | rc = -EIO; /* better errno ? */ | ||
| 162 | goto out_driver; | 160 | goto out_driver; |
| 163 | } | ||
| 164 | smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); | 161 | smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); |
| 165 | if (!smsg_path) { | 162 | if (!smsg_path) { |
| 166 | rc = -ENOMEM; | 163 | rc = -ENOMEM; |
| @@ -168,11 +165,8 @@ static int __init smsg_init(void) | |||
| 168 | } | 165 | } |
| 169 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", | 166 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", |
| 170 | NULL, NULL, NULL); | 167 | NULL, NULL, NULL); |
| 171 | if (rc) { | 168 | if (rc) |
| 172 | printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); | ||
| 173 | rc = -EIO; /* better errno ? */ | ||
| 174 | goto out_free; | 169 | goto out_free; |
| 175 | } | ||
| 176 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 170 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
| 177 | return 0; | 171 | return 0; |
| 178 | 172 | ||
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 5bfbe7659830..834e9ee7e934 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
| @@ -2,10 +2,10 @@ | |||
| 2 | * drivers/s390/s390mach.c | 2 | * drivers/s390/s390mach.c |
| 3 | * S/390 machine check handler | 3 | * S/390 machine check handler |
| 4 | * | 4 | * |
| 5 | * S390 version | 5 | * Copyright IBM Corp. 2000,2008 |
| 6 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
| 7 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
| 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 8 | * Cornelia Huck <cornelia.huck@de.ibm.com> | ||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
| @@ -18,10 +18,6 @@ | |||
| 18 | #include <asm/etr.h> | 18 | #include <asm/etr.h> |
| 19 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
| 20 | #include <asm/cio.h> | 20 | #include <asm/cio.h> |
| 21 | #include "cio/cio.h" | ||
| 22 | #include "cio/chsc.h" | ||
| 23 | #include "cio/css.h" | ||
| 24 | #include "cio/chp.h" | ||
| 25 | #include "s390mach.h" | 21 | #include "s390mach.h" |
| 26 | 22 | ||
| 27 | static struct semaphore m_sem; | 23 | static struct semaphore m_sem; |
| @@ -36,13 +32,40 @@ s390_handle_damage(char *msg) | |||
| 36 | for(;;); | 32 | for(;;); |
| 37 | } | 33 | } |
| 38 | 34 | ||
| 35 | static crw_handler_t crw_handlers[NR_RSCS]; | ||
| 36 | |||
| 37 | /** | ||
| 38 | * s390_register_crw_handler() - register a channel report word handler | ||
| 39 | * @rsc: reporting source code to handle | ||
| 40 | * @handler: handler to be registered | ||
| 41 | * | ||
| 42 | * Returns %0 on success and a negative error value otherwise. | ||
| 43 | */ | ||
| 44 | int s390_register_crw_handler(int rsc, crw_handler_t handler) | ||
| 45 | { | ||
| 46 | if ((rsc < 0) || (rsc >= NR_RSCS)) | ||
| 47 | return -EINVAL; | ||
| 48 | if (!cmpxchg(&crw_handlers[rsc], NULL, handler)) | ||
| 49 | return 0; | ||
| 50 | return -EBUSY; | ||
| 51 | } | ||
| 52 | |||
| 53 | /** | ||
| 54 | * s390_unregister_crw_handler() - unregister a channel report word handler | ||
| 55 | * @rsc: reporting source code to handle | ||
| 56 | */ | ||
| 57 | void s390_unregister_crw_handler(int rsc) | ||
| 58 | { | ||
| 59 | if ((rsc < 0) || (rsc >= NR_RSCS)) | ||
| 60 | return; | ||
| 61 | xchg(&crw_handlers[rsc], NULL); | ||
| 62 | synchronize_sched(); | ||
| 63 | } | ||
| 64 | |||
| 39 | /* | 65 | /* |
| 40 | * Retrieve CRWs and call function to handle event. | 66 | * Retrieve CRWs and call function to handle event. |
| 41 | * | ||
| 42 | * Note : we currently process CRWs for io and chsc subchannels only | ||
| 43 | */ | 67 | */ |
| 44 | static int | 68 | static int s390_collect_crw_info(void *param) |
| 45 | s390_collect_crw_info(void *param) | ||
| 46 | { | 69 | { |
| 47 | struct crw crw[2]; | 70 | struct crw crw[2]; |
| 48 | int ccode; | 71 | int ccode; |
| @@ -84,57 +107,24 @@ repeat: | |||
| 84 | crw[chain].rsid); | 107 | crw[chain].rsid); |
| 85 | /* Check for overflows. */ | 108 | /* Check for overflows. */ |
| 86 | if (crw[chain].oflw) { | 109 | if (crw[chain].oflw) { |
| 110 | int i; | ||
| 111 | |||
| 87 | pr_debug("%s: crw overflow detected!\n", __func__); | 112 | pr_debug("%s: crw overflow detected!\n", __func__); |
| 88 | css_schedule_eval_all(); | 113 | for (i = 0; i < NR_RSCS; i++) { |
| 114 | if (crw_handlers[i]) | ||
| 115 | crw_handlers[i](NULL, NULL, 1); | ||
| 116 | } | ||
| 89 | chain = 0; | 117 | chain = 0; |
| 90 | continue; | 118 | continue; |
| 91 | } | 119 | } |
| 92 | switch (crw[chain].rsc) { | 120 | if (crw[0].chn && !chain) { |
| 93 | case CRW_RSC_SCH: | 121 | chain++; |
| 94 | if (crw[0].chn && !chain) | 122 | continue; |
| 95 | break; | ||
| 96 | pr_debug("source is subchannel %04X\n", crw[0].rsid); | ||
| 97 | css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0); | ||
| 98 | break; | ||
| 99 | case CRW_RSC_MONITOR: | ||
| 100 | pr_debug("source is monitoring facility\n"); | ||
| 101 | break; | ||
| 102 | case CRW_RSC_CPATH: | ||
| 103 | pr_debug("source is channel path %02X\n", crw[0].rsid); | ||
| 104 | /* | ||
| 105 | * Check for solicited machine checks. These are | ||
| 106 | * created by reset channel path and need not be | ||
| 107 | * reported to the common I/O layer. | ||
| 108 | */ | ||
| 109 | if (crw[chain].slct) { | ||
| 110 | pr_debug("solicited machine check for " | ||
| 111 | "channel path %02X\n", crw[0].rsid); | ||
| 112 | break; | ||
| 113 | } | ||
| 114 | switch (crw[0].erc) { | ||
| 115 | case CRW_ERC_IPARM: /* Path has come. */ | ||
| 116 | chp_process_crw(crw[0].rsid, 1); | ||
| 117 | break; | ||
| 118 | case CRW_ERC_PERRI: /* Path has gone. */ | ||
| 119 | case CRW_ERC_PERRN: | ||
| 120 | chp_process_crw(crw[0].rsid, 0); | ||
| 121 | break; | ||
| 122 | default: | ||
| 123 | pr_debug("Don't know how to handle erc=%x\n", | ||
| 124 | crw[0].erc); | ||
| 125 | } | ||
| 126 | break; | ||
| 127 | case CRW_RSC_CONFIG: | ||
| 128 | pr_debug("source is configuration-alert facility\n"); | ||
| 129 | break; | ||
| 130 | case CRW_RSC_CSS: | ||
| 131 | pr_debug("source is channel subsystem\n"); | ||
| 132 | chsc_process_crw(); | ||
| 133 | break; | ||
| 134 | default: | ||
| 135 | pr_debug("unknown source\n"); | ||
| 136 | break; | ||
| 137 | } | 123 | } |
| 124 | if (crw_handlers[crw[chain].rsc]) | ||
| 125 | crw_handlers[crw[chain].rsc](&crw[0], | ||
| 126 | chain ? &crw[1] : NULL, | ||
| 127 | 0); | ||
| 138 | /* chain is always 0 or 1 here. */ | 128 | /* chain is always 0 or 1 here. */ |
| 139 | chain = crw[chain].chn ? chain + 1 : 0; | 129 | chain = crw[chain].chn ? chain + 1 : 0; |
| 140 | } | 130 | } |
| @@ -468,6 +458,10 @@ s390_do_machine_check(struct pt_regs *regs) | |||
| 468 | etr_sync_check(); | 458 | etr_sync_check(); |
| 469 | if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) | 459 | if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) |
| 470 | etr_switch_to_local(); | 460 | etr_switch_to_local(); |
| 461 | if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) | ||
| 462 | stp_sync_check(); | ||
| 463 | if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) | ||
| 464 | stp_island_check(); | ||
| 471 | } | 465 | } |
| 472 | 466 | ||
| 473 | if (mci->se) | 467 | if (mci->se) |
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h index ca681f9b67fc..d39f8b697d27 100644 --- a/drivers/s390/s390mach.h +++ b/drivers/s390/s390mach.h | |||
| @@ -72,6 +72,13 @@ struct crw { | |||
| 72 | __u32 rsid : 16; /* reporting-source ID */ | 72 | __u32 rsid : 16; /* reporting-source ID */ |
| 73 | } __attribute__ ((packed)); | 73 | } __attribute__ ((packed)); |
| 74 | 74 | ||
| 75 | typedef void (*crw_handler_t)(struct crw *, struct crw *, int); | ||
| 76 | |||
| 77 | extern int s390_register_crw_handler(int rsc, crw_handler_t handler); | ||
| 78 | extern void s390_unregister_crw_handler(int rsc); | ||
| 79 | |||
| 80 | #define NR_RSCS 16 | ||
| 81 | |||
| 75 | #define CRW_RSC_MONITOR 0x2 /* monitoring facility */ | 82 | #define CRW_RSC_MONITOR 0x2 /* monitoring facility */ |
| 76 | #define CRW_RSC_SCH 0x3 /* subchannel */ | 83 | #define CRW_RSC_SCH 0x3 /* subchannel */ |
| 77 | #define CRW_RSC_CPATH 0x4 /* channel path */ | 84 | #define CRW_RSC_CPATH 0x4 /* channel path */ |
| @@ -105,6 +112,9 @@ static inline int stcrw(struct crw *pcrw ) | |||
| 105 | #define ED_ETR_SYNC 12 /* External damage ETR sync check */ | 112 | #define ED_ETR_SYNC 12 /* External damage ETR sync check */ |
| 106 | #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ | 113 | #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ |
| 107 | 114 | ||
| 115 | #define ED_STP_SYNC 7 /* External damage STP sync check */ | ||
| 116 | #define ED_STP_ISLAND 6 /* External damage STP island check */ | ||
| 117 | |||
| 108 | struct pt_regs; | 118 | struct pt_regs; |
| 109 | 119 | ||
| 110 | void s390_handle_mcck(void); | 120 | void s390_handle_mcck(void); |
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild index 13c9805349f1..09f312501eb5 100644 --- a/include/asm-s390/Kbuild +++ b/include/asm-s390/Kbuild | |||
| @@ -8,6 +8,9 @@ header-y += ucontext.h | |||
| 8 | header-y += vtoc.h | 8 | header-y += vtoc.h |
| 9 | header-y += zcrypt.h | 9 | header-y += zcrypt.h |
| 10 | header-y += kvm.h | 10 | header-y += kvm.h |
| 11 | header-y += schid.h | ||
| 12 | header-y += chsc.h | ||
| 11 | 13 | ||
| 12 | unifdef-y += cmb.h | 14 | unifdef-y += cmb.h |
| 13 | unifdef-y += debug.h | 15 | unifdef-y += debug.h |
| 16 | unifdef-y += chpid.h | ||
diff --git a/include/asm-s390/airq.h b/include/asm-s390/airq.h index 41d028cb52a4..1ac80d6b0588 100644 --- a/include/asm-s390/airq.h +++ b/include/asm-s390/airq.h | |||
| @@ -13,7 +13,7 @@ | |||
| 13 | 13 | ||
| 14 | typedef void (*adapter_int_handler_t)(void *, void *); | 14 | typedef void (*adapter_int_handler_t)(void *, void *); |
| 15 | 15 | ||
| 16 | void *s390_register_adapter_interrupt(adapter_int_handler_t, void *); | 16 | void *s390_register_adapter_interrupt(adapter_int_handler_t, void *, u8); |
| 17 | void s390_unregister_adapter_interrupt(void *); | 17 | void s390_unregister_adapter_interrupt(void *, u8); |
| 18 | 18 | ||
| 19 | #endif /* _ASM_S390_AIRQ_H */ | 19 | #endif /* _ASM_S390_AIRQ_H */ |
diff --git a/include/asm-s390/ccwdev.h b/include/asm-s390/ccwdev.h index 066aa70518ce..ba007d8df941 100644 --- a/include/asm-s390/ccwdev.h +++ b/include/asm-s390/ccwdev.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
| 14 | #include <linux/mod_devicetable.h> | 14 | #include <linux/mod_devicetable.h> |
| 15 | #include <asm/fcx.h> | ||
| 15 | 16 | ||
| 16 | /* structs from asm/cio.h */ | 17 | /* structs from asm/cio.h */ |
| 17 | struct irb; | 18 | struct irb; |
| @@ -157,6 +158,17 @@ extern int ccw_device_start_timeout_key(struct ccw_device *, struct ccw1 *, | |||
| 157 | extern int ccw_device_resume(struct ccw_device *); | 158 | extern int ccw_device_resume(struct ccw_device *); |
| 158 | extern int ccw_device_halt(struct ccw_device *, unsigned long); | 159 | extern int ccw_device_halt(struct ccw_device *, unsigned long); |
| 159 | extern int ccw_device_clear(struct ccw_device *, unsigned long); | 160 | extern int ccw_device_clear(struct ccw_device *, unsigned long); |
| 161 | int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, | ||
| 162 | unsigned long intparm, u8 lpm, u8 key); | ||
| 163 | int ccw_device_tm_start_key(struct ccw_device *, struct tcw *, | ||
| 164 | unsigned long, u8, u8); | ||
| 165 | int ccw_device_tm_start_timeout_key(struct ccw_device *, struct tcw *, | ||
| 166 | unsigned long, u8, u8, int); | ||
| 167 | int ccw_device_tm_start(struct ccw_device *, struct tcw *, | ||
| 168 | unsigned long, u8); | ||
| 169 | int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *, | ||
| 170 | unsigned long, u8, int); | ||
| 171 | int ccw_device_tm_intrg(struct ccw_device *cdev); | ||
| 160 | 172 | ||
| 161 | extern int ccw_device_set_online(struct ccw_device *cdev); | 173 | extern int ccw_device_set_online(struct ccw_device *cdev); |
| 162 | extern int ccw_device_set_offline(struct ccw_device *cdev); | 174 | extern int ccw_device_set_offline(struct ccw_device *cdev); |
diff --git a/include/asm-s390/chpid.h b/include/asm-s390/chpid.h index b203336fd892..606844d0a5c3 100644 --- a/include/asm-s390/chpid.h +++ b/include/asm-s390/chpid.h | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
| 12 | #include <asm/types.h> | 12 | #include <asm/types.h> |
| 13 | #include <asm/cio.h> | ||
| 14 | 13 | ||
| 15 | #define __MAX_CHPID 255 | 14 | #define __MAX_CHPID 255 |
| 16 | 15 | ||
| @@ -41,6 +40,9 @@ static inline void chp_id_next(struct chp_id *chpid) | |||
| 41 | } | 40 | } |
| 42 | } | 41 | } |
| 43 | 42 | ||
| 43 | #ifdef __KERNEL__ | ||
| 44 | #include <asm/cio.h> | ||
| 45 | |||
| 44 | static inline int chp_id_is_valid(struct chp_id *chpid) | 46 | static inline int chp_id_is_valid(struct chp_id *chpid) |
| 45 | { | 47 | { |
| 46 | return (chpid->cssid <= __MAX_CSSID); | 48 | return (chpid->cssid <= __MAX_CSSID); |
| @@ -49,5 +51,6 @@ static inline int chp_id_is_valid(struct chp_id *chpid) | |||
| 49 | 51 | ||
| 50 | #define chp_id_for_each(c) \ | 52 | #define chp_id_for_each(c) \ |
| 51 | for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c)) | 53 | for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c)) |
| 54 | #endif /* __KERNEL */ | ||
| 52 | 55 | ||
| 53 | #endif /* _ASM_S390_CHPID_H */ | 56 | #endif /* _ASM_S390_CHPID_H */ |
diff --git a/include/asm-s390/chsc.h b/include/asm-s390/chsc.h new file mode 100644 index 000000000000..d38d0cf62d4b --- /dev/null +++ b/include/asm-s390/chsc.h | |||
| @@ -0,0 +1,127 @@ | |||
| 1 | /* | ||
| 2 | * ioctl interface for /dev/chsc | ||
| 3 | * | ||
| 4 | * Copyright 2008 IBM Corp. | ||
| 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _ASM_CHSC_H | ||
| 9 | #define _ASM_CHSC_H | ||
| 10 | |||
| 11 | #include <asm/chpid.h> | ||
| 12 | #include <asm/schid.h> | ||
| 13 | |||
| 14 | struct chsc_async_header { | ||
| 15 | __u16 length; | ||
| 16 | __u16 code; | ||
| 17 | __u32 cmd_dependend; | ||
| 18 | __u32 key : 4; | ||
| 19 | __u32 : 28; | ||
| 20 | struct subchannel_id sid; | ||
| 21 | } __attribute__ ((packed)); | ||
| 22 | |||
| 23 | struct chsc_async_area { | ||
| 24 | struct chsc_async_header header; | ||
| 25 | __u8 data[PAGE_SIZE - 16 /* size of chsc_async_header */]; | ||
| 26 | } __attribute__ ((packed)); | ||
| 27 | |||
| 28 | |||
| 29 | struct chsc_response_struct { | ||
| 30 | __u16 length; | ||
| 31 | __u16 code; | ||
| 32 | __u32 parms; | ||
| 33 | __u8 data[PAGE_SIZE - 8]; | ||
| 34 | } __attribute__ ((packed)); | ||
| 35 | |||
| 36 | struct chsc_chp_cd { | ||
| 37 | struct chp_id chpid; | ||
| 38 | int m; | ||
| 39 | int fmt; | ||
| 40 | struct chsc_response_struct cpcb; | ||
| 41 | }; | ||
| 42 | |||
| 43 | struct chsc_cu_cd { | ||
| 44 | __u16 cun; | ||
| 45 | __u8 cssid; | ||
| 46 | int m; | ||
| 47 | int fmt; | ||
| 48 | struct chsc_response_struct cucb; | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct chsc_sch_cud { | ||
| 52 | struct subchannel_id schid; | ||
| 53 | int fmt; | ||
| 54 | struct chsc_response_struct scub; | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct conf_id { | ||
| 58 | int m; | ||
| 59 | __u8 cssid; | ||
| 60 | __u8 ssid; | ||
| 61 | }; | ||
| 62 | |||
| 63 | struct chsc_conf_info { | ||
| 64 | struct conf_id id; | ||
| 65 | int fmt; | ||
| 66 | struct chsc_response_struct scid; | ||
| 67 | }; | ||
| 68 | |||
| 69 | struct ccl_parm_chpid { | ||
| 70 | int m; | ||
| 71 | struct chp_id chp; | ||
| 72 | }; | ||
| 73 | |||
| 74 | struct ccl_parm_cssids { | ||
| 75 | __u8 f_cssid; | ||
| 76 | __u8 l_cssid; | ||
| 77 | }; | ||
| 78 | |||
| 79 | struct chsc_comp_list { | ||
| 80 | struct { | ||
| 81 | enum { | ||
| 82 | CCL_CU_ON_CHP = 1, | ||
| 83 | CCL_CHP_TYPE_CAP = 2, | ||
| 84 | CCL_CSS_IMG = 4, | ||
| 85 | CCL_CSS_IMG_CONF_CHAR = 5, | ||
| 86 | CCL_IOP_CHP = 6, | ||
| 87 | } ctype; | ||
| 88 | int fmt; | ||
| 89 | struct ccl_parm_chpid chpid; | ||
| 90 | struct ccl_parm_cssids cssids; | ||
| 91 | } req; | ||
| 92 | struct chsc_response_struct sccl; | ||
| 93 | }; | ||
| 94 | |||
| 95 | struct chsc_dcal { | ||
| 96 | struct { | ||
| 97 | enum { | ||
| 98 | DCAL_CSS_IID_PN = 4, | ||
| 99 | } atype; | ||
| 100 | __u32 list_parm[2]; | ||
| 101 | int fmt; | ||
| 102 | } req; | ||
| 103 | struct chsc_response_struct sdcal; | ||
| 104 | }; | ||
| 105 | |||
| 106 | struct chsc_cpd_info { | ||
| 107 | struct chp_id chpid; | ||
| 108 | int m; | ||
| 109 | int fmt; | ||
| 110 | int rfmt; | ||
| 111 | int c; | ||
| 112 | struct chsc_response_struct chpdb; | ||
| 113 | }; | ||
| 114 | |||
| 115 | #define CHSC_IOCTL_MAGIC 'c' | ||
| 116 | |||
| 117 | #define CHSC_START _IOWR(CHSC_IOCTL_MAGIC, 0x81, struct chsc_async_area) | ||
| 118 | #define CHSC_INFO_CHANNEL_PATH _IOWR(CHSC_IOCTL_MAGIC, 0x82, \ | ||
| 119 | struct chsc_chp_cd) | ||
| 120 | #define CHSC_INFO_CU _IOWR(CHSC_IOCTL_MAGIC, 0x83, struct chsc_cu_cd) | ||
| 121 | #define CHSC_INFO_SCH_CU _IOWR(CHSC_IOCTL_MAGIC, 0x84, struct chsc_sch_cud) | ||
| 122 | #define CHSC_INFO_CI _IOWR(CHSC_IOCTL_MAGIC, 0x85, struct chsc_conf_info) | ||
| 123 | #define CHSC_INFO_CCL _IOWR(CHSC_IOCTL_MAGIC, 0x86, struct chsc_comp_list) | ||
| 124 | #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) | ||
| 125 | #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) | ||
| 126 | |||
| 127 | #endif | ||
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index 0818ecd30ca6..6dccb071aec3 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #define __MAX_CSSID 0 | 16 | #define __MAX_CSSID 0 |
| 17 | 17 | ||
| 18 | /** | 18 | /** |
| 19 | * struct scsw - subchannel status word | 19 | * struct cmd_scsw - command-mode subchannel status word |
| 20 | * @key: subchannel key | 20 | * @key: subchannel key |
| 21 | * @sctl: suspend control | 21 | * @sctl: suspend control |
| 22 | * @eswf: esw format | 22 | * @eswf: esw format |
| @@ -38,7 +38,7 @@ | |||
| 38 | * @cstat: subchannel status | 38 | * @cstat: subchannel status |
| 39 | * @count: residual count | 39 | * @count: residual count |
| 40 | */ | 40 | */ |
| 41 | struct scsw { | 41 | struct cmd_scsw { |
| 42 | __u32 key : 4; | 42 | __u32 key : 4; |
| 43 | __u32 sctl : 1; | 43 | __u32 sctl : 1; |
| 44 | __u32 eswf : 1; | 44 | __u32 eswf : 1; |
| @@ -61,6 +61,114 @@ struct scsw { | |||
| 61 | __u32 count : 16; | 61 | __u32 count : 16; |
| 62 | } __attribute__ ((packed)); | 62 | } __attribute__ ((packed)); |
| 63 | 63 | ||
| 64 | /** | ||
| 65 | * struct tm_scsw - transport-mode subchannel status word | ||
| 66 | * @key: subchannel key | ||
| 67 | * @eswf: esw format | ||
| 68 | * @cc: deferred condition code | ||
| 69 | * @fmt: format | ||
| 70 | * @x: IRB-format control | ||
| 71 | * @q: interrogate-complete | ||
| 72 | * @ectl: extended control | ||
| 73 | * @pno: path not operational | ||
| 74 | * @fctl: function control | ||
| 75 | * @actl: activity control | ||
| 76 | * @stctl: status control | ||
| 77 | * @tcw: TCW address | ||
| 78 | * @dstat: device status | ||
| 79 | * @cstat: subchannel status | ||
| 80 | * @fcxs: FCX status | ||
| 81 | * @schxs: subchannel-extended status | ||
| 82 | */ | ||
| 83 | struct tm_scsw { | ||
| 84 | u32 key:4; | ||
| 85 | u32 :1; | ||
| 86 | u32 eswf:1; | ||
| 87 | u32 cc:2; | ||
| 88 | u32 fmt:3; | ||
| 89 | u32 x:1; | ||
| 90 | u32 q:1; | ||
| 91 | u32 :1; | ||
| 92 | u32 ectl:1; | ||
| 93 | u32 pno:1; | ||
| 94 | u32 :1; | ||
| 95 | u32 fctl:3; | ||
| 96 | u32 actl:7; | ||
| 97 | u32 stctl:5; | ||
| 98 | u32 tcw; | ||
| 99 | u32 dstat:8; | ||
| 100 | u32 cstat:8; | ||
| 101 | u32 fcxs:8; | ||
| 102 | u32 schxs:8; | ||
| 103 | } __attribute__ ((packed)); | ||
| 104 | |||
| 105 | /** | ||
| 106 | * union scsw - subchannel status word | ||
| 107 | * @cmd: command-mode SCSW | ||
| 108 | * @tm: transport-mode SCSW | ||
| 109 | */ | ||
| 110 | union scsw { | ||
| 111 | struct cmd_scsw cmd; | ||
| 112 | struct tm_scsw tm; | ||
| 113 | } __attribute__ ((packed)); | ||
| 114 | |||
| 115 | int scsw_is_tm(union scsw *scsw); | ||
| 116 | u32 scsw_key(union scsw *scsw); | ||
| 117 | u32 scsw_eswf(union scsw *scsw); | ||
| 118 | u32 scsw_cc(union scsw *scsw); | ||
| 119 | u32 scsw_ectl(union scsw *scsw); | ||
| 120 | u32 scsw_pno(union scsw *scsw); | ||
| 121 | u32 scsw_fctl(union scsw *scsw); | ||
| 122 | u32 scsw_actl(union scsw *scsw); | ||
| 123 | u32 scsw_stctl(union scsw *scsw); | ||
| 124 | u32 scsw_dstat(union scsw *scsw); | ||
| 125 | u32 scsw_cstat(union scsw *scsw); | ||
| 126 | int scsw_is_solicited(union scsw *scsw); | ||
| 127 | int scsw_is_valid_key(union scsw *scsw); | ||
| 128 | int scsw_is_valid_eswf(union scsw *scsw); | ||
| 129 | int scsw_is_valid_cc(union scsw *scsw); | ||
| 130 | int scsw_is_valid_ectl(union scsw *scsw); | ||
| 131 | int scsw_is_valid_pno(union scsw *scsw); | ||
| 132 | int scsw_is_valid_fctl(union scsw *scsw); | ||
| 133 | int scsw_is_valid_actl(union scsw *scsw); | ||
| 134 | int scsw_is_valid_stctl(union scsw *scsw); | ||
| 135 | int scsw_is_valid_dstat(union scsw *scsw); | ||
| 136 | int scsw_is_valid_cstat(union scsw *scsw); | ||
| 137 | int scsw_cmd_is_valid_key(union scsw *scsw); | ||
| 138 | int scsw_cmd_is_valid_sctl(union scsw *scsw); | ||
| 139 | int scsw_cmd_is_valid_eswf(union scsw *scsw); | ||
| 140 | int scsw_cmd_is_valid_cc(union scsw *scsw); | ||
| 141 | int scsw_cmd_is_valid_fmt(union scsw *scsw); | ||
| 142 | int scsw_cmd_is_valid_pfch(union scsw *scsw); | ||
| 143 | int scsw_cmd_is_valid_isic(union scsw *scsw); | ||
| 144 | int scsw_cmd_is_valid_alcc(union scsw *scsw); | ||
| 145 | int scsw_cmd_is_valid_ssi(union scsw *scsw); | ||
| 146 | int scsw_cmd_is_valid_zcc(union scsw *scsw); | ||
| 147 | int scsw_cmd_is_valid_ectl(union scsw *scsw); | ||
| 148 | int scsw_cmd_is_valid_pno(union scsw *scsw); | ||
| 149 | int scsw_cmd_is_valid_fctl(union scsw *scsw); | ||
| 150 | int scsw_cmd_is_valid_actl(union scsw *scsw); | ||
| 151 | int scsw_cmd_is_valid_stctl(union scsw *scsw); | ||
| 152 | int scsw_cmd_is_valid_dstat(union scsw *scsw); | ||
| 153 | int scsw_cmd_is_valid_cstat(union scsw *scsw); | ||
| 154 | int scsw_cmd_is_solicited(union scsw *scsw); | ||
| 155 | int scsw_tm_is_valid_key(union scsw *scsw); | ||
| 156 | int scsw_tm_is_valid_eswf(union scsw *scsw); | ||
| 157 | int scsw_tm_is_valid_cc(union scsw *scsw); | ||
| 158 | int scsw_tm_is_valid_fmt(union scsw *scsw); | ||
| 159 | int scsw_tm_is_valid_x(union scsw *scsw); | ||
| 160 | int scsw_tm_is_valid_q(union scsw *scsw); | ||
| 161 | int scsw_tm_is_valid_ectl(union scsw *scsw); | ||
| 162 | int scsw_tm_is_valid_pno(union scsw *scsw); | ||
| 163 | int scsw_tm_is_valid_fctl(union scsw *scsw); | ||
| 164 | int scsw_tm_is_valid_actl(union scsw *scsw); | ||
| 165 | int scsw_tm_is_valid_stctl(union scsw *scsw); | ||
| 166 | int scsw_tm_is_valid_dstat(union scsw *scsw); | ||
| 167 | int scsw_tm_is_valid_cstat(union scsw *scsw); | ||
| 168 | int scsw_tm_is_valid_fcxs(union scsw *scsw); | ||
| 169 | int scsw_tm_is_valid_schxs(union scsw *scsw); | ||
| 170 | int scsw_tm_is_solicited(union scsw *scsw); | ||
| 171 | |||
| 64 | #define SCSW_FCTL_CLEAR_FUNC 0x1 | 172 | #define SCSW_FCTL_CLEAR_FUNC 0x1 |
| 65 | #define SCSW_FCTL_HALT_FUNC 0x2 | 173 | #define SCSW_FCTL_HALT_FUNC 0x2 |
| 66 | #define SCSW_FCTL_START_FUNC 0x4 | 174 | #define SCSW_FCTL_START_FUNC 0x4 |
| @@ -303,7 +411,7 @@ struct esw3 { | |||
| 303 | * if applicable). | 411 | * if applicable). |
| 304 | */ | 412 | */ |
| 305 | struct irb { | 413 | struct irb { |
| 306 | struct scsw scsw; | 414 | union scsw scsw; |
| 307 | union { | 415 | union { |
| 308 | struct esw0 esw0; | 416 | struct esw0 esw0; |
| 309 | struct esw1 esw1; | 417 | struct esw1 esw1; |
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h index b3ac262c4582..3cad56923815 100644 --- a/include/asm-s390/elf.h +++ b/include/asm-s390/elf.h | |||
| @@ -113,6 +113,9 @@ | |||
| 113 | typedef s390_fp_regs elf_fpregset_t; | 113 | typedef s390_fp_regs elf_fpregset_t; |
| 114 | typedef s390_regs elf_gregset_t; | 114 | typedef s390_regs elf_gregset_t; |
| 115 | 115 | ||
| 116 | typedef s390_fp_regs compat_elf_fpregset_t; | ||
| 117 | typedef s390_compat_regs compat_elf_gregset_t; | ||
| 118 | |||
| 116 | #include <linux/sched.h> /* for task_struct */ | 119 | #include <linux/sched.h> /* for task_struct */ |
| 117 | #include <asm/system.h> /* for save_access_regs */ | 120 | #include <asm/system.h> /* for save_access_regs */ |
| 118 | #include <asm/mmu_context.h> | 121 | #include <asm/mmu_context.h> |
| @@ -123,6 +126,10 @@ typedef s390_regs elf_gregset_t; | |||
| 123 | #define elf_check_arch(x) \ | 126 | #define elf_check_arch(x) \ |
| 124 | (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ | 127 | (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ |
| 125 | && (x)->e_ident[EI_CLASS] == ELF_CLASS) | 128 | && (x)->e_ident[EI_CLASS] == ELF_CLASS) |
| 129 | #define compat_elf_check_arch(x) \ | ||
| 130 | (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ | ||
| 131 | && (x)->e_ident[EI_CLASS] == ELF_CLASS) | ||
| 132 | #define compat_start_thread start_thread31 | ||
| 126 | 133 | ||
| 127 | /* For SVR4/S390 the function pointer to be registered with `atexit` is | 134 | /* For SVR4/S390 the function pointer to be registered with `atexit` is |
| 128 | passed in R14. */ | 135 | passed in R14. */ |
| @@ -131,6 +138,7 @@ typedef s390_regs elf_gregset_t; | |||
| 131 | _r->gprs[14] = 0; \ | 138 | _r->gprs[14] = 0; \ |
| 132 | } while (0) | 139 | } while (0) |
| 133 | 140 | ||
| 141 | #define CORE_DUMP_USE_REGSET | ||
| 134 | #define USE_ELF_CORE_DUMP | 142 | #define USE_ELF_CORE_DUMP |
| 135 | #define ELF_EXEC_PAGESIZE 4096 | 143 | #define ELF_EXEC_PAGESIZE 4096 |
| 136 | 144 | ||
| @@ -140,44 +148,6 @@ typedef s390_regs elf_gregset_t; | |||
| 140 | that it will "exec", and that there is sufficient room for the brk. */ | 148 | that it will "exec", and that there is sufficient room for the brk. */ |
| 141 | #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) | 149 | #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) |
| 142 | 150 | ||
| 143 | /* Wow, the "main" arch needs arch dependent functions too.. :) */ | ||
| 144 | |||
| 145 | /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is | ||
| 146 | now struct_user_regs, they are different) */ | ||
| 147 | |||
| 148 | static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs) | ||
| 149 | { | ||
| 150 | memcpy(®s->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs)); | ||
| 151 | save_access_regs(regs->acrs); | ||
| 152 | regs->orig_gpr2 = ptregs->orig_gpr2; | ||
| 153 | return 1; | ||
| 154 | } | ||
| 155 | |||
| 156 | #define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs(regs, &pr_reg); | ||
| 157 | |||
| 158 | static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | ||
| 159 | { | ||
| 160 | struct pt_regs *ptregs = task_pt_regs(tsk); | ||
| 161 | memcpy(®s->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs)); | ||
| 162 | memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs)); | ||
| 163 | regs->orig_gpr2 = ptregs->orig_gpr2; | ||
| 164 | return 1; | ||
| 165 | } | ||
| 166 | |||
| 167 | #define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs(tsk, regs) | ||
| 168 | |||
| 169 | static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
| 170 | { | ||
| 171 | if (tsk == current) | ||
| 172 | save_fp_regs(fpregs); | ||
| 173 | else | ||
| 174 | memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t)); | ||
| 175 | return 1; | ||
| 176 | } | ||
| 177 | |||
| 178 | #define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs) | ||
| 179 | |||
| 180 | |||
| 181 | /* This yields a mask that user programs can use to figure out what | 151 | /* This yields a mask that user programs can use to figure out what |
| 182 | instruction set this CPU supports. */ | 152 | instruction set this CPU supports. */ |
| 183 | 153 | ||
| @@ -204,7 +174,10 @@ do { \ | |||
| 204 | set_personality(PER_SVR4); \ | 174 | set_personality(PER_SVR4); \ |
| 205 | else if (current->personality != PER_LINUX32) \ | 175 | else if (current->personality != PER_LINUX32) \ |
| 206 | set_personality(PER_LINUX); \ | 176 | set_personality(PER_LINUX); \ |
| 207 | clear_thread_flag(TIF_31BIT); \ | 177 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ |
| 178 | set_thread_flag(TIF_31BIT); \ | ||
| 179 | else \ | ||
| 180 | clear_thread_flag(TIF_31BIT); \ | ||
| 208 | } while (0) | 181 | } while (0) |
| 209 | #endif /* __s390x__ */ | 182 | #endif /* __s390x__ */ |
| 210 | 183 | ||
diff --git a/include/asm-s390/etr.h b/include/asm-s390/etr.h index b498f19bb9a7..80ef58c61970 100644 --- a/include/asm-s390/etr.h +++ b/include/asm-s390/etr.h | |||
| @@ -122,7 +122,7 @@ struct etr_aib { | |||
| 122 | } __attribute__ ((packed,aligned(8))); | 122 | } __attribute__ ((packed,aligned(8))); |
| 123 | 123 | ||
| 124 | /* ETR interruption parameter */ | 124 | /* ETR interruption parameter */ |
| 125 | struct etr_interruption_parameter { | 125 | struct etr_irq_parm { |
| 126 | unsigned int _pad0 : 8; | 126 | unsigned int _pad0 : 8; |
| 127 | unsigned int pc0 : 1; /* port 0 state change */ | 127 | unsigned int pc0 : 1; /* port 0 state change */ |
| 128 | unsigned int pc1 : 1; /* port 1 state change */ | 128 | unsigned int pc1 : 1; /* port 1 state change */ |
| @@ -213,7 +213,46 @@ static inline int etr_ptff(void *ptff_block, unsigned int func) | |||
| 213 | #define ETR_PTFF_SGS 0x43 /* set gross steering rate */ | 213 | #define ETR_PTFF_SGS 0x43 /* set gross steering rate */ |
| 214 | 214 | ||
| 215 | /* Functions needed by the machine check handler */ | 215 | /* Functions needed by the machine check handler */ |
| 216 | extern void etr_switch_to_local(void); | 216 | void etr_switch_to_local(void); |
| 217 | extern void etr_sync_check(void); | 217 | void etr_sync_check(void); |
| 218 | |||
| 219 | /* STP interruption parameter */ | ||
| 220 | struct stp_irq_parm { | ||
| 221 | unsigned int _pad0 : 14; | ||
| 222 | unsigned int tsc : 1; /* Timing status change */ | ||
| 223 | unsigned int lac : 1; /* Link availability change */ | ||
| 224 | unsigned int tcpc : 1; /* Time control parameter change */ | ||
| 225 | unsigned int _pad2 : 15; | ||
| 226 | } __attribute__ ((packed)); | ||
| 227 | |||
| 228 | #define STP_OP_SYNC 1 | ||
| 229 | #define STP_OP_CTRL 3 | ||
| 230 | |||
| 231 | struct stp_sstpi { | ||
| 232 | unsigned int rsvd0; | ||
| 233 | unsigned int rsvd1 : 8; | ||
| 234 | unsigned int stratum : 8; | ||
| 235 | unsigned int vbits : 16; | ||
| 236 | unsigned int leaps : 16; | ||
| 237 | unsigned int tmd : 4; | ||
| 238 | unsigned int ctn : 4; | ||
| 239 | unsigned int rsvd2 : 3; | ||
| 240 | unsigned int c : 1; | ||
| 241 | unsigned int tst : 4; | ||
| 242 | unsigned int tzo : 16; | ||
| 243 | unsigned int dsto : 16; | ||
| 244 | unsigned int ctrl : 16; | ||
| 245 | unsigned int rsvd3 : 16; | ||
| 246 | unsigned int tto; | ||
| 247 | unsigned int rsvd4; | ||
| 248 | unsigned int ctnid[3]; | ||
| 249 | unsigned int rsvd5; | ||
| 250 | unsigned int todoff[4]; | ||
| 251 | unsigned int rsvd6[48]; | ||
| 252 | } __attribute__ ((packed)); | ||
| 253 | |||
| 254 | /* Functions needed by the machine check handler */ | ||
| 255 | void stp_sync_check(void); | ||
| 256 | void stp_island_check(void); | ||
| 218 | 257 | ||
| 219 | #endif /* __S390_ETR_H */ | 258 | #endif /* __S390_ETR_H */ |
diff --git a/include/asm-s390/fcx.h b/include/asm-s390/fcx.h new file mode 100644 index 000000000000..8be1f3a58042 --- /dev/null +++ b/include/asm-s390/fcx.h | |||
| @@ -0,0 +1,311 @@ | |||
| 1 | /* | ||
| 2 | * Functions for assembling fcx enabled I/O control blocks. | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _ASM_S390_FCX_H | ||
| 9 | #define _ASM_S390_FCX_H _ASM_S390_FCX_H | ||
| 10 | |||
| 11 | #include <linux/types.h> | ||
| 12 | |||
| 13 | #define TCW_FORMAT_DEFAULT 0 | ||
| 14 | #define TCW_TIDAW_FORMAT_DEFAULT 0 | ||
| 15 | #define TCW_FLAGS_INPUT_TIDA 1 << (23 - 5) | ||
| 16 | #define TCW_FLAGS_TCCB_TIDA 1 << (23 - 6) | ||
| 17 | #define TCW_FLAGS_OUTPUT_TIDA 1 << (23 - 7) | ||
| 18 | #define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9) | ||
| 19 | #define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3) | ||
| 20 | |||
| 21 | /** | ||
| 22 | * struct tcw - Transport Control Word (TCW) | ||
| 23 | * @format: TCW format | ||
| 24 | * @flags: TCW flags | ||
| 25 | * @tccbl: Transport-Command-Control-Block Length | ||
| 26 | * @r: Read Operations | ||
| 27 | * @w: Write Operations | ||
| 28 | * @output: Output-Data Address | ||
| 29 | * @input: Input-Data Address | ||
| 30 | * @tsb: Transport-Status-Block Address | ||
| 31 | * @tccb: Transport-Command-Control-Block Address | ||
| 32 | * @output_count: Output Count | ||
| 33 | * @input_count: Input Count | ||
| 34 | * @intrg: Interrogate TCW Address | ||
| 35 | */ | ||
| 36 | struct tcw { | ||
| 37 | u32 format:2; | ||
| 38 | u32 :6; | ||
| 39 | u32 flags:24; | ||
| 40 | u32 :8; | ||
| 41 | u32 tccbl:6; | ||
| 42 | u32 r:1; | ||
| 43 | u32 w:1; | ||
| 44 | u32 :16; | ||
| 45 | u64 output; | ||
| 46 | u64 input; | ||
| 47 | u64 tsb; | ||
| 48 | u64 tccb; | ||
| 49 | u32 output_count; | ||
| 50 | u32 input_count; | ||
| 51 | u32 :32; | ||
| 52 | u32 :32; | ||
| 53 | u32 :32; | ||
| 54 | u32 intrg; | ||
| 55 | } __attribute__ ((packed, aligned(64))); | ||
| 56 | |||
| 57 | #define TIDAW_FLAGS_LAST 1 << (7 - 0) | ||
| 58 | #define TIDAW_FLAGS_SKIP 1 << (7 - 1) | ||
| 59 | #define TIDAW_FLAGS_DATA_INT 1 << (7 - 2) | ||
| 60 | #define TIDAW_FLAGS_TTIC 1 << (7 - 3) | ||
| 61 | #define TIDAW_FLAGS_INSERT_CBC 1 << (7 - 4) | ||
| 62 | |||
| 63 | /** | ||
| 64 | * struct tidaw - Transport-Indirect-Addressing Word (TIDAW) | ||
| 65 | * @flags: TIDAW flags. Can be an arithmetic OR of the following constants: | ||
| 66 | * %TIDAW_FLAGS_LAST, %TIDAW_FLAGS_SKIP, %TIDAW_FLAGS_DATA_INT, | ||
| 67 | * %TIDAW_FLAGS_TTIC, %TIDAW_FLAGS_INSERT_CBC | ||
| 68 | * @count: Count | ||
| 69 | * @addr: Address | ||
| 70 | */ | ||
| 71 | struct tidaw { | ||
| 72 | u32 flags:8; | ||
| 73 | u32 :24; | ||
| 74 | u32 count; | ||
| 75 | u64 addr; | ||
| 76 | } __attribute__ ((packed, aligned(16))); | ||
| 77 | |||
| 78 | /** | ||
| 79 | * struct tsa_iostat - I/O-Status Transport-Status Area (IO-Stat TSA) | ||
| 80 | * @dev_time: Device Time | ||
| 81 | * @def_time: Defer Time | ||
| 82 | * @queue_time: Queue Time | ||
| 83 | * @dev_busy_time: Device-Busy Time | ||
| 84 | * @dev_act_time: Device-Active-Only Time | ||
| 85 | * @sense: Sense Data (if present) | ||
| 86 | */ | ||
| 87 | struct tsa_iostat { | ||
| 88 | u32 dev_time; | ||
| 89 | u32 def_time; | ||
| 90 | u32 queue_time; | ||
| 91 | u32 dev_busy_time; | ||
| 92 | u32 dev_act_time; | ||
| 93 | u8 sense[32]; | ||
| 94 | } __attribute__ ((packed)); | ||
| 95 | |||
| 96 | /** | ||
| 97 | * struct tsa_ddpcs - Device-Detected-Program-Check Transport-Status Area (DDPC TSA) | ||
| 98 | * @rc: Reason Code | ||
| 99 | * @rcq: Reason Code Qualifier | ||
| 100 | * @sense: Sense Data (if present) | ||
| 101 | */ | ||
| 102 | struct tsa_ddpc { | ||
| 103 | u32 :24; | ||
| 104 | u32 rc:8; | ||
| 105 | u8 rcq[16]; | ||
| 106 | u8 sense[32]; | ||
| 107 | } __attribute__ ((packed)); | ||
| 108 | |||
| 109 | #define TSA_INTRG_FLAGS_CU_STATE_VALID 1 << (7 - 0) | ||
| 110 | #define TSA_INTRG_FLAGS_DEV_STATE_VALID 1 << (7 - 1) | ||
| 111 | #define TSA_INTRG_FLAGS_OP_STATE_VALID 1 << (7 - 2) | ||
| 112 | |||
| 113 | /** | ||
| 114 | * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA) | ||
| 115 | * @format: Format | ||
| 116 | * @flags: Flags. Can be an arithmetic OR of the following constants: | ||
| 117 | * %TSA_INTRG_FLAGS_CU_STATE_VALID, %TSA_INTRG_FLAGS_DEV_STATE_VALID, | ||
| 118 | * %TSA_INTRG_FLAGS_OP_STATE_VALID | ||
| 119 | * @cu_state: Controle-Unit State | ||
| 120 | * @dev_state: Device State | ||
| 121 | * @op_state: Operation State | ||
| 122 | * @sd_info: State-Dependent Information | ||
| 123 | * @dl_id: Device-Level Identifier | ||
| 124 | * @dd_data: Device-Dependent Data | ||
| 125 | */ | ||
| 126 | struct tsa_intrg { | ||
| 127 | u32 format:8; | ||
| 128 | u32 flags:8; | ||
| 129 | u32 cu_state:8; | ||
| 130 | u32 dev_state:8; | ||
| 131 | u32 op_state:8; | ||
| 132 | u32 :24; | ||
| 133 | u8 sd_info[12]; | ||
| 134 | u32 dl_id; | ||
| 135 | u8 dd_data[28]; | ||
| 136 | } __attribute__ ((packed)); | ||
| 137 | |||
| 138 | #define TSB_FORMAT_NONE 0 | ||
| 139 | #define TSB_FORMAT_IOSTAT 1 | ||
| 140 | #define TSB_FORMAT_DDPC 2 | ||
| 141 | #define TSB_FORMAT_INTRG 3 | ||
| 142 | |||
| 143 | #define TSB_FLAGS_DCW_OFFSET_VALID 1 << (7 - 0) | ||
| 144 | #define TSB_FLAGS_COUNT_VALID 1 << (7 - 1) | ||
| 145 | #define TSB_FLAGS_CACHE_MISS 1 << (7 - 2) | ||
| 146 | #define TSB_FLAGS_TIME_VALID 1 << (7 - 3) | ||
| 147 | #define TSB_FLAGS_FORMAT(x) ((x) & 7) | ||
| 148 | #define TSB_FORMAT(t) ((t)->flags & 7) | ||
| 149 | |||
| 150 | /** | ||
| 151 | * struct tsb - Transport-Status Block (TSB) | ||
| 152 | * @length: Length | ||
| 153 | * @flags: Flags. Can be an arithmetic OR of the following constants: | ||
| 154 | * %TSB_FLAGS_DCW_OFFSET_VALID, %TSB_FLAGS_COUNT_VALID, %TSB_FLAGS_CACHE_MISS, | ||
| 155 | * %TSB_FLAGS_TIME_VALID | ||
| 156 | * @dcw_offset: DCW Offset | ||
| 157 | * @count: Count | ||
| 158 | * @tsa: Transport-Status-Area | ||
| 159 | */ | ||
| 160 | struct tsb { | ||
| 161 | u32 length:8; | ||
| 162 | u32 flags:8; | ||
| 163 | u32 dcw_offset:16; | ||
| 164 | u32 count; | ||
| 165 | u32 :32; | ||
| 166 | union { | ||
| 167 | struct tsa_iostat iostat; | ||
| 168 | struct tsa_ddpc ddpc; | ||
| 169 | struct tsa_intrg intrg; | ||
| 170 | } __attribute__ ((packed)) tsa; | ||
| 171 | } __attribute__ ((packed, aligned(8))); | ||
| 172 | |||
| 173 | #define DCW_INTRG_FORMAT_DEFAULT 0 | ||
| 174 | |||
| 175 | #define DCW_INTRG_RC_UNSPECIFIED 0 | ||
| 176 | #define DCW_INTRG_RC_TIMEOUT 1 | ||
| 177 | |||
| 178 | #define DCW_INTRG_RCQ_UNSPECIFIED 0 | ||
| 179 | #define DCW_INTRG_RCQ_PRIMARY 1 | ||
| 180 | #define DCW_INTRG_RCQ_SECONDARY 2 | ||
| 181 | |||
| 182 | #define DCW_INTRG_FLAGS_MPM 1 < (7 - 0) | ||
| 183 | #define DCW_INTRG_FLAGS_PPR 1 < (7 - 1) | ||
| 184 | #define DCW_INTRG_FLAGS_CRIT 1 < (7 - 2) | ||
| 185 | |||
| 186 | /** | ||
| 187 | * struct dcw_intrg_data - Interrogate DCW data | ||
| 188 | * @format: Format. Should be %DCW_INTRG_FORMAT_DEFAULT | ||
| 189 | * @rc: Reason Code. Can be one of %DCW_INTRG_RC_UNSPECIFIED, | ||
| 190 | * %DCW_INTRG_RC_TIMEOUT | ||
| 191 | * @rcq: Reason Code Qualifier: Can be one of %DCW_INTRG_RCQ_UNSPECIFIED, | ||
| 192 | * %DCW_INTRG_RCQ_PRIMARY, %DCW_INTRG_RCQ_SECONDARY | ||
| 193 | * @lpm: Logical-Path Mask | ||
| 194 | * @pam: Path-Available Mask | ||
| 195 | * @pim: Path-Installed Mask | ||
| 196 | * @timeout: Timeout | ||
| 197 | * @flags: Flags. Can be an arithmetic OR of %DCW_INTRG_FLAGS_MPM, | ||
| 198 | * %DCW_INTRG_FLAGS_PPR, %DCW_INTRG_FLAGS_CRIT | ||
| 199 | * @time: Time | ||
| 200 | * @prog_id: Program Identifier | ||
| 201 | * @prog_data: Program-Dependent Data | ||
| 202 | */ | ||
| 203 | struct dcw_intrg_data { | ||
| 204 | u32 format:8; | ||
| 205 | u32 rc:8; | ||
| 206 | u32 rcq:8; | ||
| 207 | u32 lpm:8; | ||
| 208 | u32 pam:8; | ||
| 209 | u32 pim:8; | ||
| 210 | u32 timeout:16; | ||
| 211 | u32 flags:8; | ||
| 212 | u32 :24; | ||
| 213 | u32 :32; | ||
| 214 | u64 time; | ||
| 215 | u64 prog_id; | ||
| 216 | u8 prog_data[0]; | ||
| 217 | } __attribute__ ((packed)); | ||
| 218 | |||
| 219 | #define DCW_FLAGS_CC 1 << (7 - 1) | ||
| 220 | |||
| 221 | #define DCW_CMD_WRITE 0x01 | ||
| 222 | #define DCW_CMD_READ 0x02 | ||
| 223 | #define DCW_CMD_CONTROL 0x03 | ||
| 224 | #define DCW_CMD_SENSE 0x04 | ||
| 225 | #define DCW_CMD_SENSE_ID 0xe4 | ||
| 226 | #define DCW_CMD_INTRG 0x40 | ||
| 227 | |||
| 228 | /** | ||
| 229 | * struct dcw - Device-Command Word (DCW) | ||
| 230 | * @cmd: Command Code. Can be one of %DCW_CMD_WRITE, %DCW_CMD_READ, | ||
| 231 | * %DCW_CMD_CONTROL, %DCW_CMD_SENSE, %DCW_CMD_SENSE_ID, %DCW_CMD_INTRG | ||
| 232 | * @flags: Flags. Can be an arithmetic OR of %DCW_FLAGS_CC | ||
| 233 | * @cd_count: Control-Data Count | ||
| 234 | * @count: Count | ||
| 235 | * @cd: Control Data | ||
| 236 | */ | ||
| 237 | struct dcw { | ||
| 238 | u32 cmd:8; | ||
| 239 | u32 flags:8; | ||
| 240 | u32 :8; | ||
| 241 | u32 cd_count:8; | ||
| 242 | u32 count; | ||
| 243 | u8 cd[0]; | ||
| 244 | } __attribute__ ((packed)); | ||
| 245 | |||
| 246 | #define TCCB_FORMAT_DEFAULT 0x7f | ||
| 247 | #define TCCB_MAX_DCW 30 | ||
| 248 | #define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \ | ||
| 249 | TCCB_MAX_DCW * sizeof(struct dcw) + \ | ||
| 250 | sizeof(struct tccb_tcat)) | ||
| 251 | #define TCCB_SAC_DEFAULT 0xf901 | ||
| 252 | #define TCCB_SAC_INTRG 0xf902 | ||
| 253 | |||
| 254 | /** | ||
| 255 | * struct tccb_tcah - Transport-Command-Area Header (TCAH) | ||
| 256 | * @format: Format. Should be %TCCB_FORMAT_DEFAULT | ||
| 257 | * @tcal: Transport-Command-Area Length | ||
| 258 | * @sac: Service-Action Code. Can be one of %TCCB_SAC_DEFAULT, %TCCB_SAC_INTRG | ||
| 259 | * @prio: Priority | ||
| 260 | */ | ||
| 261 | struct tccb_tcah { | ||
| 262 | u32 format:8; | ||
| 263 | u32 :24; | ||
| 264 | u32 :24; | ||
| 265 | u32 tcal:8; | ||
| 266 | u32 sac:16; | ||
| 267 | u32 :8; | ||
| 268 | u32 prio:8; | ||
| 269 | u32 :32; | ||
| 270 | } __attribute__ ((packed)); | ||
| 271 | |||
| 272 | /** | ||
| 273 | * struct tccb_tcat - Transport-Command-Area Trailer (TCAT) | ||
| 274 | * @count: Transport Count | ||
| 275 | */ | ||
| 276 | struct tccb_tcat { | ||
| 277 | u32 :32; | ||
| 278 | u32 count; | ||
| 279 | } __attribute__ ((packed)); | ||
| 280 | |||
| 281 | /** | ||
| 282 | * struct tccb - (partial) Transport-Command-Control Block (TCCB) | ||
| 283 | * @tcah: TCAH | ||
| 284 | * @tca: Transport-Command Area | ||
| 285 | */ | ||
| 286 | struct tccb { | ||
| 287 | struct tccb_tcah tcah; | ||
| 288 | u8 tca[0]; | ||
| 289 | } __attribute__ ((packed, aligned(8))); | ||
| 290 | |||
| 291 | struct tcw *tcw_get_intrg(struct tcw *tcw); | ||
| 292 | void *tcw_get_data(struct tcw *tcw); | ||
| 293 | struct tccb *tcw_get_tccb(struct tcw *tcw); | ||
| 294 | struct tsb *tcw_get_tsb(struct tcw *tcw); | ||
| 295 | |||
| 296 | void tcw_init(struct tcw *tcw, int r, int w); | ||
| 297 | void tcw_finalize(struct tcw *tcw, int num_tidaws); | ||
| 298 | |||
| 299 | void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw); | ||
| 300 | void tcw_set_data(struct tcw *tcw, void *data, int use_tidal); | ||
| 301 | void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb); | ||
| 302 | void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb); | ||
| 303 | |||
| 304 | void tccb_init(struct tccb *tccb, size_t tccb_size, u32 sac); | ||
| 305 | void tsb_init(struct tsb *tsb); | ||
| 306 | struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags, | ||
| 307 | void *cd, u8 cd_count, u32 count); | ||
| 308 | struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, | ||
| 309 | void *addr, u32 count); | ||
| 310 | |||
| 311 | #endif /* _ASM_S390_FCX_H */ | ||
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h index c1b2e50392bb..eaca6dff5405 100644 --- a/include/asm-s390/ipl.h +++ b/include/asm-s390/ipl.h | |||
| @@ -56,15 +56,19 @@ struct ipl_block_fcp { | |||
| 56 | u8 scp_data[]; | 56 | u8 scp_data[]; |
| 57 | } __attribute__((packed)); | 57 | } __attribute__((packed)); |
| 58 | 58 | ||
| 59 | #define DIAG308_VMPARM_SIZE 64 | ||
| 60 | |||
| 59 | struct ipl_block_ccw { | 61 | struct ipl_block_ccw { |
| 60 | u8 load_param[8]; | 62 | u8 load_parm[8]; |
| 61 | u8 reserved1[84]; | 63 | u8 reserved1[84]; |
| 62 | u8 reserved2[2]; | 64 | u8 reserved2[2]; |
| 63 | u16 devno; | 65 | u16 devno; |
| 64 | u8 vm_flags; | 66 | u8 vm_flags; |
| 65 | u8 reserved3[3]; | 67 | u8 reserved3[3]; |
| 66 | u32 vm_parm_len; | 68 | u32 vm_parm_len; |
| 67 | u8 reserved4[80]; | 69 | u8 nss_name[8]; |
| 70 | u8 vm_parm[DIAG308_VMPARM_SIZE]; | ||
| 71 | u8 reserved4[8]; | ||
| 68 | } __attribute__((packed)); | 72 | } __attribute__((packed)); |
| 69 | 73 | ||
| 70 | struct ipl_parameter_block { | 74 | struct ipl_parameter_block { |
| @@ -73,7 +77,7 @@ struct ipl_parameter_block { | |||
| 73 | struct ipl_block_fcp fcp; | 77 | struct ipl_block_fcp fcp; |
| 74 | struct ipl_block_ccw ccw; | 78 | struct ipl_block_ccw ccw; |
| 75 | } ipl_info; | 79 | } ipl_info; |
| 76 | } __attribute__((packed)); | 80 | } __attribute__((packed,aligned(4096))); |
| 77 | 81 | ||
| 78 | /* | 82 | /* |
| 79 | * IPL validity flags | 83 | * IPL validity flags |
| @@ -86,6 +90,8 @@ extern void do_reipl(void); | |||
| 86 | extern void do_halt(void); | 90 | extern void do_halt(void); |
| 87 | extern void do_poff(void); | 91 | extern void do_poff(void); |
| 88 | extern void ipl_save_parameters(void); | 92 | extern void ipl_save_parameters(void); |
| 93 | extern void ipl_update_parameters(void); | ||
| 94 | extern void get_ipl_vmparm(char *); | ||
| 89 | 95 | ||
| 90 | enum { | 96 | enum { |
| 91 | IPL_DEVNO_VALID = 1, | 97 | IPL_DEVNO_VALID = 1, |
| @@ -147,6 +153,11 @@ enum diag308_flags { | |||
| 147 | DIAG308_FLAGS_LP_VALID = 0x80, | 153 | DIAG308_FLAGS_LP_VALID = 0x80, |
| 148 | }; | 154 | }; |
| 149 | 155 | ||
| 156 | enum diag308_vm_flags { | ||
| 157 | DIAG308_VM_FLAGS_NSS_VALID = 0x80, | ||
| 158 | DIAG308_VM_FLAGS_VP_VALID = 0x40, | ||
| 159 | }; | ||
| 160 | |||
| 150 | enum diag308_rc { | 161 | enum diag308_rc { |
| 151 | DIAG308_RC_OK = 1, | 162 | DIAG308_RC_OK = 1, |
| 152 | }; | 163 | }; |
diff --git a/include/asm-s390/isc.h b/include/asm-s390/isc.h new file mode 100644 index 000000000000..34bb8916db4f --- /dev/null +++ b/include/asm-s390/isc.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | #ifndef _ASM_S390_ISC_H | ||
| 2 | #define _ASM_S390_ISC_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * I/O interruption subclasses used by drivers. | ||
| 8 | * Please add all used iscs here so that it is possible to distribute | ||
| 9 | * isc usage between drivers. | ||
| 10 | * Reminder: 0 is highest priority, 7 lowest. | ||
| 11 | */ | ||
| 12 | #define MAX_ISC 7 | ||
| 13 | |||
| 14 | /* Regular I/O interrupts. */ | ||
| 15 | #define IO_SCH_ISC 3 /* regular I/O subchannels */ | ||
| 16 | #define CONSOLE_ISC 1 /* console I/O subchannel */ | ||
| 17 | #define CHSC_SCH_ISC 7 /* CHSC subchannels */ | ||
| 18 | /* Adapter interrupts. */ | ||
| 19 | #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ | ||
| 20 | |||
| 21 | /* Functions for registration of I/O interruption subclasses */ | ||
| 22 | void isc_register(unsigned int isc); | ||
| 23 | void isc_unregister(unsigned int isc); | ||
| 24 | |||
| 25 | #endif /* _ASM_S390_ISC_H */ | ||
diff --git a/include/asm-s390/itcw.h b/include/asm-s390/itcw.h new file mode 100644 index 000000000000..a9bc5c36b32a --- /dev/null +++ b/include/asm-s390/itcw.h | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* | ||
| 2 | * Functions for incremental construction of fcx enabled I/O control blocks. | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2008 | ||
| 5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _ASM_S390_ITCW_H | ||
| 9 | #define _ASM_S390_ITCW_H _ASM_S390_ITCW_H | ||
| 10 | |||
| 11 | #include <linux/types.h> | ||
| 12 | #include <asm/fcx.h> | ||
| 13 | |||
| 14 | #define ITCW_OP_READ 0 | ||
| 15 | #define ITCW_OP_WRITE 1 | ||
| 16 | |||
| 17 | struct itcw; | ||
| 18 | |||
| 19 | struct tcw *itcw_get_tcw(struct itcw *itcw); | ||
| 20 | size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws); | ||
| 21 | struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, | ||
| 22 | int max_tidaws, int intrg_max_tidaws); | ||
| 23 | struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, | ||
| 24 | u8 cd_count, u32 count); | ||
| 25 | struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, | ||
| 26 | u32 count); | ||
| 27 | void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal); | ||
| 28 | void itcw_finalize(struct itcw *itcw); | ||
| 29 | |||
| 30 | #endif /* _ASM_S390_ITCW_H */ | ||
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index bd0ea191dfa9..0bdb704ae051 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | * the S390 page table tree. | 29 | * the S390 page table tree. |
| 30 | */ | 30 | */ |
| 31 | #ifndef __ASSEMBLY__ | 31 | #ifndef __ASSEMBLY__ |
| 32 | #include <linux/sched.h> | ||
| 32 | #include <linux/mm_types.h> | 33 | #include <linux/mm_types.h> |
| 33 | #include <asm/bitops.h> | 34 | #include <asm/bitops.h> |
| 34 | #include <asm/bug.h> | 35 | #include <asm/bug.h> |
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index a00f79dd323b..4af80af2a88f 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h | |||
| @@ -143,11 +143,19 @@ struct stack_frame { | |||
| 143 | /* | 143 | /* |
| 144 | * Do necessary setup to start up a new thread. | 144 | * Do necessary setup to start up a new thread. |
| 145 | */ | 145 | */ |
| 146 | #define start_thread(regs, new_psw, new_stackp) do { \ | 146 | #define start_thread(regs, new_psw, new_stackp) do { \ |
| 147 | set_fs(USER_DS); \ | 147 | set_fs(USER_DS); \ |
| 148 | regs->psw.mask = psw_user_bits; \ | 148 | regs->psw.mask = psw_user_bits; \ |
| 149 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 149 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ |
| 150 | regs->gprs[15] = new_stackp ; \ | 150 | regs->gprs[15] = new_stackp; \ |
| 151 | } while (0) | ||
| 152 | |||
| 153 | #define start_thread31(regs, new_psw, new_stackp) do { \ | ||
| 154 | set_fs(USER_DS); \ | ||
| 155 | regs->psw.mask = psw_user32_bits; \ | ||
| 156 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | ||
| 157 | regs->gprs[15] = new_stackp; \ | ||
| 158 | crst_table_downgrade(current->mm, 1UL << 31); \ | ||
| 151 | } while (0) | 159 | } while (0) |
| 152 | 160 | ||
| 153 | /* Forward declaration, a strange C thing */ | 161 | /* Forward declaration, a strange C thing */ |
| @@ -328,16 +336,6 @@ extern void (*s390_base_mcck_handler_fn)(void); | |||
| 328 | extern void (*s390_base_pgm_handler_fn)(void); | 336 | extern void (*s390_base_pgm_handler_fn)(void); |
| 329 | extern void (*s390_base_ext_handler_fn)(void); | 337 | extern void (*s390_base_ext_handler_fn)(void); |
| 330 | 338 | ||
| 331 | /* | ||
| 332 | * CPU idle notifier chain. | ||
| 333 | */ | ||
| 334 | #define S390_CPU_IDLE 0 | ||
| 335 | #define S390_CPU_NOT_IDLE 1 | ||
| 336 | |||
| 337 | struct notifier_block; | ||
| 338 | int register_idle_notifier(struct notifier_block *nb); | ||
| 339 | int unregister_idle_notifier(struct notifier_block *nb); | ||
| 340 | |||
| 341 | #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL | 339 | #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL |
| 342 | 340 | ||
| 343 | #endif | 341 | #endif |
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h index d7d4e2eb3e6f..af2c9ac28a07 100644 --- a/include/asm-s390/ptrace.h +++ b/include/asm-s390/ptrace.h | |||
| @@ -215,6 +215,12 @@ typedef struct | |||
| 215 | unsigned long addr; | 215 | unsigned long addr; |
| 216 | } __attribute__ ((aligned(8))) psw_t; | 216 | } __attribute__ ((aligned(8))) psw_t; |
| 217 | 217 | ||
| 218 | typedef struct | ||
| 219 | { | ||
| 220 | __u32 mask; | ||
| 221 | __u32 addr; | ||
| 222 | } __attribute__ ((aligned(8))) psw_compat_t; | ||
| 223 | |||
| 218 | #ifndef __s390x__ | 224 | #ifndef __s390x__ |
| 219 | 225 | ||
| 220 | #define PSW_MASK_PER 0x40000000UL | 226 | #define PSW_MASK_PER 0x40000000UL |
| @@ -292,6 +298,15 @@ typedef struct | |||
| 292 | unsigned long orig_gpr2; | 298 | unsigned long orig_gpr2; |
| 293 | } s390_regs; | 299 | } s390_regs; |
| 294 | 300 | ||
| 301 | typedef struct | ||
| 302 | { | ||
| 303 | psw_compat_t psw; | ||
| 304 | __u32 gprs[NUM_GPRS]; | ||
| 305 | __u32 acrs[NUM_ACRS]; | ||
| 306 | __u32 orig_gpr2; | ||
| 307 | } s390_compat_regs; | ||
| 308 | |||
| 309 | |||
| 295 | #ifdef __KERNEL__ | 310 | #ifdef __KERNEL__ |
| 296 | #include <asm/setup.h> | 311 | #include <asm/setup.h> |
| 297 | #include <asm/page.h> | 312 | #include <asm/page.h> |
diff --git a/drivers/s390/cio/schid.h b/include/asm-s390/schid.h index 54328fec5ade..5017ffa78e04 100644 --- a/drivers/s390/cio/schid.h +++ b/include/asm-s390/schid.h | |||
| @@ -1,12 +1,14 @@ | |||
| 1 | #ifndef S390_SCHID_H | 1 | #ifndef ASM_SCHID_H |
| 2 | #define S390_SCHID_H | 2 | #define ASM_SCHID_H |
| 3 | 3 | ||
| 4 | struct subchannel_id { | 4 | struct subchannel_id { |
| 5 | __u32 reserved:13; | 5 | __u32 cssid : 8; |
| 6 | __u32 ssid:2; | 6 | __u32 : 4; |
| 7 | __u32 one:1; | 7 | __u32 m : 1; |
| 8 | __u32 sch_no:16; | 8 | __u32 ssid : 2; |
| 9 | } __attribute__ ((packed,aligned(4))); | 9 | __u32 one : 1; |
| 10 | __u32 sch_no : 16; | ||
| 11 | } __attribute__ ((packed, aligned(4))); | ||
| 10 | 12 | ||
| 11 | 13 | ||
| 12 | /* Helper function for sane state of pre-allocated subchannel_id. */ | 14 | /* Helper function for sane state of pre-allocated subchannel_id. */ |
| @@ -23,4 +25,4 @@ schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2) | |||
| 23 | return !memcmp(schid1, schid2, sizeof(struct subchannel_id)); | 25 | return !memcmp(schid1, schid2, sizeof(struct subchannel_id)); |
| 24 | } | 26 | } |
| 25 | 27 | ||
| 26 | #endif /* S390_SCHID_H */ | 28 | #endif /* ASM_SCHID_H */ |
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h index b5f2843013a3..fed7bee650a0 100644 --- a/include/asm-s390/sclp.h +++ b/include/asm-s390/sclp.h | |||
| @@ -45,9 +45,9 @@ struct sclp_cpu_info { | |||
| 45 | int sclp_get_cpu_info(struct sclp_cpu_info *info); | 45 | int sclp_get_cpu_info(struct sclp_cpu_info *info); |
| 46 | int sclp_cpu_configure(u8 cpu); | 46 | int sclp_cpu_configure(u8 cpu); |
| 47 | int sclp_cpu_deconfigure(u8 cpu); | 47 | int sclp_cpu_deconfigure(u8 cpu); |
| 48 | void sclp_read_info_early(void); | ||
| 49 | void sclp_facilities_detect(void); | 48 | void sclp_facilities_detect(void); |
| 50 | unsigned long long sclp_memory_detect(void); | 49 | unsigned long long sclp_get_rnmax(void); |
| 50 | unsigned long long sclp_get_rzm(void); | ||
| 51 | int sclp_sdias_blk_count(void); | 51 | int sclp_sdias_blk_count(void); |
| 52 | int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); | 52 | int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); |
| 53 | int sclp_chp_configure(struct chp_id chpid); | 53 | int sclp_chp_configure(struct chp_id chpid); |
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h index ba69674012a7..f09ee3f72977 100644 --- a/include/asm-s390/setup.h +++ b/include/asm-s390/setup.h | |||
| @@ -8,14 +8,16 @@ | |||
| 8 | #ifndef _ASM_S390_SETUP_H | 8 | #ifndef _ASM_S390_SETUP_H |
| 9 | #define _ASM_S390_SETUP_H | 9 | #define _ASM_S390_SETUP_H |
| 10 | 10 | ||
| 11 | #define COMMAND_LINE_SIZE 896 | 11 | #define COMMAND_LINE_SIZE 1024 |
| 12 | |||
| 13 | #define ARCH_COMMAND_LINE_SIZE 896 | ||
| 12 | 14 | ||
| 13 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
| 14 | 16 | ||
| 15 | #include <asm/types.h> | 17 | #include <asm/types.h> |
| 16 | 18 | ||
| 17 | #define PARMAREA 0x10400 | 19 | #define PARMAREA 0x10400 |
| 18 | #define MEMORY_CHUNKS 16 /* max 0x7fff */ | 20 | #define MEMORY_CHUNKS 256 |
| 19 | 21 | ||
| 20 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
| 21 | 23 | ||
| @@ -36,12 +38,14 @@ | |||
| 36 | struct mem_chunk { | 38 | struct mem_chunk { |
| 37 | unsigned long addr; | 39 | unsigned long addr; |
| 38 | unsigned long size; | 40 | unsigned long size; |
| 39 | unsigned long type; | 41 | int type; |
| 40 | }; | 42 | }; |
| 41 | 43 | ||
| 42 | extern struct mem_chunk memory_chunk[]; | 44 | extern struct mem_chunk memory_chunk[]; |
| 43 | extern unsigned long real_memory_size; | 45 | extern unsigned long real_memory_size; |
| 44 | 46 | ||
| 47 | void detect_memory_layout(struct mem_chunk chunk[]); | ||
| 48 | |||
| 45 | #ifdef CONFIG_S390_SWITCH_AMODE | 49 | #ifdef CONFIG_S390_SWITCH_AMODE |
| 46 | extern unsigned int switch_amode; | 50 | extern unsigned int switch_amode; |
| 47 | #else | 51 | #else |
| @@ -61,7 +65,6 @@ extern unsigned long machine_flags; | |||
| 61 | 65 | ||
| 62 | #define MACHINE_FLAG_VM (1UL << 0) | 66 | #define MACHINE_FLAG_VM (1UL << 0) |
| 63 | #define MACHINE_FLAG_IEEE (1UL << 1) | 67 | #define MACHINE_FLAG_IEEE (1UL << 1) |
| 64 | #define MACHINE_FLAG_P390 (1UL << 2) | ||
| 65 | #define MACHINE_FLAG_CSP (1UL << 3) | 68 | #define MACHINE_FLAG_CSP (1UL << 3) |
| 66 | #define MACHINE_FLAG_MVPG (1UL << 4) | 69 | #define MACHINE_FLAG_MVPG (1UL << 4) |
| 67 | #define MACHINE_FLAG_DIAG44 (1UL << 5) | 70 | #define MACHINE_FLAG_DIAG44 (1UL << 5) |
| @@ -97,7 +100,6 @@ extern unsigned long machine_flags; | |||
| 97 | #define MACHINE_HAS_PFMF (machine_flags & MACHINE_FLAG_PFMF) | 100 | #define MACHINE_HAS_PFMF (machine_flags & MACHINE_FLAG_PFMF) |
| 98 | #endif /* __s390x__ */ | 101 | #endif /* __s390x__ */ |
| 99 | 102 | ||
| 100 | #define MACHINE_HAS_SCLP (!MACHINE_IS_P390) | ||
| 101 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) | 103 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) |
| 102 | 104 | ||
| 103 | /* | 105 | /* |
diff --git a/include/asm-s390/sparsemem.h b/include/asm-s390/sparsemem.h index 06dfdab6c0e8..545d219e6a2d 100644 --- a/include/asm-s390/sparsemem.h +++ b/include/asm-s390/sparsemem.h | |||
| @@ -1,15 +1,15 @@ | |||
| 1 | #ifndef _ASM_S390_SPARSEMEM_H | 1 | #ifndef _ASM_S390_SPARSEMEM_H |
| 2 | #define _ASM_S390_SPARSEMEM_H | 2 | #define _ASM_S390_SPARSEMEM_H |
| 3 | 3 | ||
| 4 | #define SECTION_SIZE_BITS 25 | ||
| 5 | |||
| 6 | #ifdef CONFIG_64BIT | 4 | #ifdef CONFIG_64BIT |
| 7 | 5 | ||
| 6 | #define SECTION_SIZE_BITS 28 | ||
| 8 | #define MAX_PHYSADDR_BITS 42 | 7 | #define MAX_PHYSADDR_BITS 42 |
| 9 | #define MAX_PHYSMEM_BITS 42 | 8 | #define MAX_PHYSMEM_BITS 42 |
| 10 | 9 | ||
| 11 | #else | 10 | #else |
| 12 | 11 | ||
| 12 | #define SECTION_SIZE_BITS 25 | ||
| 13 | #define MAX_PHYSADDR_BITS 31 | 13 | #define MAX_PHYSADDR_BITS 31 |
| 14 | #define MAX_PHYSMEM_BITS 31 | 14 | #define MAX_PHYSMEM_BITS 31 |
| 15 | 15 | ||
diff --git a/include/asm-s390/timer.h b/include/asm-s390/timer.h index adb34860a543..d98d79e35cd6 100644 --- a/include/asm-s390/timer.h +++ b/include/asm-s390/timer.h | |||
| @@ -48,6 +48,18 @@ extern int del_virt_timer(struct vtimer_list *timer); | |||
| 48 | extern void init_cpu_vtimer(void); | 48 | extern void init_cpu_vtimer(void); |
| 49 | extern void vtime_init(void); | 49 | extern void vtime_init(void); |
| 50 | 50 | ||
| 51 | #ifdef CONFIG_VIRT_TIMER | ||
| 52 | |||
| 53 | extern void vtime_start_cpu_timer(void); | ||
| 54 | extern void vtime_stop_cpu_timer(void); | ||
| 55 | |||
| 56 | #else | ||
| 57 | |||
| 58 | static inline void vtime_start_cpu_timer(void) { } | ||
| 59 | static inline void vtime_stop_cpu_timer(void) { } | ||
| 60 | |||
| 61 | #endif /* CONFIG_VIRT_TIMER */ | ||
| 62 | |||
| 51 | #endif /* __KERNEL__ */ | 63 | #endif /* __KERNEL__ */ |
| 52 | 64 | ||
| 53 | #endif /* _ASM_S390_TIMER_H */ | 65 | #endif /* _ASM_S390_TIMER_H */ |
diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h index f228f1b86877..00d3bbd44117 100644 --- a/include/asm-s390/zcrypt.h +++ b/include/asm-s390/zcrypt.h | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | 29 | ||
| 30 | #define ZCRYPT_VERSION 2 | 30 | #define ZCRYPT_VERSION 2 |
| 31 | #define ZCRYPT_RELEASE 1 | 31 | #define ZCRYPT_RELEASE 1 |
| 32 | #define ZCRYPT_VARIANT 0 | 32 | #define ZCRYPT_VARIANT 1 |
| 33 | 33 | ||
| 34 | #include <linux/ioctl.h> | 34 | #include <linux/ioctl.h> |
| 35 | #include <linux/compiler.h> | 35 | #include <linux/compiler.h> |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 69b2342d5ebb..c4db5827963d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
| @@ -159,6 +159,15 @@ struct ap_device_id { | |||
| 159 | 159 | ||
| 160 | #define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 | 160 | #define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 |
| 161 | 161 | ||
| 162 | /* s390 css bus devices (subchannels) */ | ||
| 163 | struct css_device_id { | ||
| 164 | __u8 match_flags; | ||
| 165 | __u8 type; /* subchannel type */ | ||
| 166 | __u16 pad2; | ||
| 167 | __u32 pad3; | ||
| 168 | kernel_ulong_t driver_data; | ||
| 169 | }; | ||
| 170 | |||
| 162 | #define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */ | 171 | #define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */ |
| 163 | /* to workaround crosscompile issues */ | 172 | /* to workaround crosscompile issues */ |
| 164 | 173 | ||
diff --git a/mm/Kconfig b/mm/Kconfig index 3aa819d628c1..4242743b981b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -129,7 +129,7 @@ config MEMORY_HOTPLUG | |||
| 129 | bool "Allow for memory hot-add" | 129 | bool "Allow for memory hot-add" |
| 130 | depends on SPARSEMEM || X86_64_ACPI_NUMA | 130 | depends on SPARSEMEM || X86_64_ACPI_NUMA |
| 131 | depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG | 131 | depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG |
| 132 | depends on (IA64 || X86 || PPC64 || SUPERH) | 132 | depends on (IA64 || X86 || PPC64 || SUPERH || S390) |
| 133 | 133 | ||
| 134 | comment "Memory hotplug is currently incompatible with Software Suspend" | 134 | comment "Memory hotplug is currently incompatible with Software Suspend" |
| 135 | depends on SPARSEMEM && HOTPLUG && HIBERNATION | 135 | depends on SPARSEMEM && HOTPLUG && HIBERNATION |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 7b0038f45b16..bda71015885c 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
| @@ -1135,8 +1135,7 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
| 1135 | if (this) | 1135 | if (this) |
| 1136 | kfree_skb(this); | 1136 | kfree_skb(this); |
| 1137 | } | 1137 | } |
| 1138 | if (!this) | 1138 | BUG_ON(!this); |
| 1139 | printk(KERN_ERR "AF_IUCV msg tag %u not found\n", msg->tag); | ||
| 1140 | 1139 | ||
| 1141 | if (sk->sk_state == IUCV_CLOSING) { | 1140 | if (sk->sk_state == IUCV_CLOSING) { |
| 1142 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { | 1141 | if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { |
| @@ -1196,7 +1195,7 @@ static int __init afiucv_init(void) | |||
| 1196 | } | 1195 | } |
| 1197 | cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); | 1196 | cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); |
| 1198 | if (unlikely(err)) { | 1197 | if (unlikely(err)) { |
| 1199 | printk(KERN_ERR "AF_IUCV needs the VM userid\n"); | 1198 | WARN_ON(err); |
| 1200 | err = -EPROTONOSUPPORT; | 1199 | err = -EPROTONOSUPPORT; |
| 1201 | goto out; | 1200 | goto out; |
| 1202 | } | 1201 | } |
| @@ -1210,7 +1209,6 @@ static int __init afiucv_init(void) | |||
| 1210 | err = sock_register(&iucv_sock_family_ops); | 1209 | err = sock_register(&iucv_sock_family_ops); |
| 1211 | if (err) | 1210 | if (err) |
| 1212 | goto out_proto; | 1211 | goto out_proto; |
| 1213 | printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n"); | ||
| 1214 | return 0; | 1212 | return 0; |
| 1215 | 1213 | ||
| 1216 | out_proto: | 1214 | out_proto: |
| @@ -1226,8 +1224,6 @@ static void __exit afiucv_exit(void) | |||
| 1226 | sock_unregister(PF_IUCV); | 1224 | sock_unregister(PF_IUCV); |
| 1227 | proto_unregister(&iucv_proto); | 1225 | proto_unregister(&iucv_proto); |
| 1228 | iucv_unregister(&af_iucv_handler, 0); | 1226 | iucv_unregister(&af_iucv_handler, 0); |
| 1229 | |||
| 1230 | printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n"); | ||
| 1231 | } | 1227 | } |
| 1232 | 1228 | ||
| 1233 | module_init(afiucv_init); | 1229 | module_init(afiucv_init); |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 918970762131..7f82b7616212 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
| @@ -1559,16 +1559,11 @@ static void iucv_external_interrupt(u16 code) | |||
| 1559 | 1559 | ||
| 1560 | p = iucv_irq_data[smp_processor_id()]; | 1560 | p = iucv_irq_data[smp_processor_id()]; |
| 1561 | if (p->ippathid >= iucv_max_pathid) { | 1561 | if (p->ippathid >= iucv_max_pathid) { |
| 1562 | printk(KERN_WARNING "iucv_do_int: Got interrupt with " | 1562 | WARN_ON(p->ippathid >= iucv_max_pathid); |
| 1563 | "pathid %d > max_connections (%ld)\n", | ||
| 1564 | p->ippathid, iucv_max_pathid - 1); | ||
| 1565 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); | 1563 | iucv_sever_pathid(p->ippathid, iucv_error_no_listener); |
| 1566 | return; | 1564 | return; |
| 1567 | } | 1565 | } |
| 1568 | if (p->iptype < 0x01 || p->iptype > 0x09) { | 1566 | BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); |
| 1569 | printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n"); | ||
| 1570 | return; | ||
| 1571 | } | ||
| 1572 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); | 1567 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); |
| 1573 | if (!work) { | 1568 | if (!work) { |
| 1574 | printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); | 1569 | printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); |
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index cea4a790e1e9..37d5c363fbcd 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
| @@ -304,6 +304,14 @@ static int do_ap_entry(const char *filename, | |||
| 304 | return 1; | 304 | return 1; |
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | /* looks like: "css:tN" */ | ||
| 308 | static int do_css_entry(const char *filename, | ||
| 309 | struct css_device_id *id, char *alias) | ||
| 310 | { | ||
| 311 | sprintf(alias, "css:t%01X", id->type); | ||
| 312 | return 1; | ||
| 313 | } | ||
| 314 | |||
| 307 | /* Looks like: "serio:tyNprNidNexN" */ | 315 | /* Looks like: "serio:tyNprNidNexN" */ |
| 308 | static int do_serio_entry(const char *filename, | 316 | static int do_serio_entry(const char *filename, |
| 309 | struct serio_device_id *id, char *alias) | 317 | struct serio_device_id *id, char *alias) |
| @@ -680,6 +688,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, | |||
| 680 | do_table(symval, sym->st_size, | 688 | do_table(symval, sym->st_size, |
| 681 | sizeof(struct ap_device_id), "ap", | 689 | sizeof(struct ap_device_id), "ap", |
| 682 | do_ap_entry, mod); | 690 | do_ap_entry, mod); |
| 691 | else if (sym_is(symname, "__mod_css_device_table")) | ||
| 692 | do_table(symval, sym->st_size, | ||
| 693 | sizeof(struct css_device_id), "css", | ||
| 694 | do_css_entry, mod); | ||
| 683 | else if (sym_is(symname, "__mod_serio_device_table")) | 695 | else if (sym_is(symname, "__mod_serio_device_table")) |
| 684 | do_table(symval, sym->st_size, | 696 | do_table(symval, sym->st_size, |
| 685 | sizeof(struct serio_device_id), "serio", | 697 | sizeof(struct serio_device_id), "serio", |
